aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2014-03-07 11:41:32 -0500
committerEric Paris <eparis@redhat.com>2014-03-07 11:41:32 -0500
commitb7d3622a39fde7658170b7f3cf6c6889bb8db30d (patch)
tree64f4e781ecb2a85d675e234072b988560bcd25f1 /include/linux
parentf3411cb2b2e396a41ed3a439863f028db7140a34 (diff)
parentd8ec26d7f8287f5788a494f56e8814210f0e64be (diff)
Merge tag 'v3.13' into for-3.15
Linux 3.13 Conflicts: include/net/xfrm.h Simple merge where v3.13 removed 'extern' from definitions and the audit tree did s/u32/unsigned int/ to the same definitions.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h107
-rw-r--r--include/linux/acpi_gpio.h31
-rw-r--r--include/linux/amba/bus.h4
-rw-r--r--include/linux/amba/serial.h2
-rw-r--r--include/linux/anon_inodes.h3
-rw-r--r--include/linux/assoc_array.h92
-rw-r--r--include/linux/assoc_array_priv.h182
-rw-r--r--include/linux/ata.h7
-rw-r--r--include/linux/atmel_serial.h1
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/backlight.h4
-rw-r--r--include/linux/binfmts.h6
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/bitops.h11
-rw-r--r--include/linux/blk-mq.h183
-rw-r--r--include/linux/blk_types.h68
-rw-r--r--include/linux/blkdev.h63
-rw-r--r--include/linux/blktrace_api.h4
-rw-r--r--include/linux/cgroup.h37
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/clk/mxs.h2
-rw-r--r--include/linux/clk/sunxi.h22
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cmdline-parser.h2
-rw-r--r--include/linux/compat.h6
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/completion.h30
-rw-r--r--include/linux/coredump.h10
-rw-r--r--include/linux/cper.h13
-rw-r--r--include/linux/cpu.h16
-rw-r--r--include/linux/cpufreq.h83
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crash_dump.h2
-rw-r--r--include/linux/crc32.h40
-rw-r--r--include/linux/dcache.h106
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/devfreq.h8
-rw-r--r--include/linux/device.h76
-rw-r--r--include/linux/dma-mapping.h31
-rw-r--r--include/linux/dmaengine.h76
-rw-r--r--include/linux/dmi.h5
-rw-r--r--include/linux/edac.h2
-rw-r--r--include/linux/efi.h62
-rw-r--r--include/linux/elf.h6
-rw-r--r--include/linux/elfcore.h7
-rw-r--r--include/linux/etherdevice.h35
-rw-r--r--include/linux/export.h4
-rw-r--r--include/linux/extcon.h72
-rw-r--r--include/linux/extcon/extcon-adc-jack.h42
-rw-r--r--include/linux/extcon/extcon-gpio.h20
-rw-r--r--include/linux/fb.h12
-rw-r--r--include/linux/fcdevice.h2
-rw-r--r--include/linux/fddidevice.h7
-rw-r--r--include/linux/fs.h113
-rw-r--r--include/linux/fscache-cache.h50
-rw-r--r--include/linux/fscache.h113
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/ftrace_event.h41
-rw-r--r--include/linux/genalloc.h2
-rw-r--r--include/linux/genl_magic_func.h53
-rw-r--r--include/linux/gpio.h67
-rw-r--r--include/linux/gpio/consumer.h253
-rw-r--r--include/linux/gpio/driver.h194
-rw-r--r--include/linux/hardirq.h8
-rw-r--r--include/linux/hashtable.h15
-rw-r--r--include/linux/hid-sensor-hub.h23
-rw-r--r--include/linux/hid-sensor-ids.h12
-rw-r--r--include/linux/hippidevice.h10
-rw-r--r--include/linux/host1x.h284
-rw-r--r--include/linux/huge_mm.h17
-rw-r--r--include/linux/hugetlb.h41
-rw-r--r--include/linux/hwmon-vid.h2
-rw-r--r--include/linux/hwmon.h10
-rw-r--r--include/linux/hyperv.h37
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/i2c/twl.h2
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/ieee80211.h24
-rw-r--r--include/linux/if_macvlan.h18
-rw-r--r--include/linux/if_vlan.h101
-rw-r--r--include/linux/iio/buffer.h61
-rw-r--r--include/linux/iio/common/st_sensors.h6
-rw-r--r--include/linux/iio/consumer.h2
-rw-r--r--include/linux/iio/events.h14
-rw-r--r--include/linux/iio/iio.h93
-rw-r--r--include/linux/iio/sysfs.h15
-rw-r--r--include/linux/iio/types.h20
-rw-r--r--include/linux/inetdevice.h28
-rw-r--r--include/linux/init.h6
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/interrupt.h33
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/ipv6.h77
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/irqchip/arm-gic.h7
-rw-r--r--include/linux/irqreturn.h2
-rw-r--r--include/linux/jump_label.h14
-rw-r--r--include/linux/jump_label_ratelimit.h2
-rw-r--r--include/linux/kdb.h1
-rw-r--r--include/linux/kernel-page-flags.h1
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/key-type.h6
-rw-r--r--include/linux/key.h52
-rw-r--r--include/linux/kfifo.h47
-rw-r--r--include/linux/kgdb.h1
-rw-r--r--include/linux/kobj_completion.h18
-rw-r--r--include/linux/kobject.h1
-rw-r--r--include/linux/kvm_host.h42
-rw-r--r--include/linux/lglock.h10
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/list.h79
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/lockref.h13
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/memblock.h24
-rw-r--r--include/linux/memory_hotplug.h11
-rw-r--r--include/linux/mempolicy.h6
-rw-r--r--include/linux/mfd/arizona/registers.h2
-rw-r--r--include/linux/mfd/as3722.h423
-rw-r--r--include/linux/mfd/core.h8
-rw-r--r--include/linux/mfd/da9052/da9052.h20
-rw-r--r--include/linux/mfd/dbx500-prcmu.h70
-rw-r--r--include/linux/mfd/max77693-private.h1
-rw-r--r--include/linux/mfd/max77693.h2
-rw-r--r--include/linux/mfd/mc13xxx.h7
-rw-r--r--include/linux/mfd/rtsx_pci.h53
-rw-r--r--include/linux/mfd/samsung/core.h4
-rw-r--r--include/linux/mfd/samsung/rtc.h11
-rw-r--r--include/linux/mfd/si476x-core.h2
-rw-r--r--include/linux/mfd/stw481x.h56
-rw-r--r--include/linux/mfd/syscon.h25
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h13
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h29
-rw-r--r--include/linux/mfd/wm8994/core.h47
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/migrate.h19
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx4/cmd.h6
-rw-r--r--include/linux/mlx4/device.h25
-rw-r--r--include/linux/mlx5/device.h13
-rw-r--r--include/linux/mlx5/driver.h18
-rw-r--r--include/linux/mm.h266
-rw-r--r--include/linux/mm_types.h108
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmc/card.h7
-rw-r--r--include/linux/mmc/core.h4
-rw-r--r--include/linux/mmc/dw_mmc.h4
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/msg.h6
-rw-r--r--include/linux/msi.h12
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/map.h4
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/net.h119
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h548
-rw-r--r--include/linux/netfilter.h24
-rw-r--r--include/linux/netfilter/ipset/ip_set.h161
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h57
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h14
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h162
-rw-r--r--include/linux/netfilter/nfnetlink.h29
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h6
-rw-r--r--include/linux/netfilter/x_tables.h128
-rw-r--r--include/linux/netfilter_bridge.h4
-rw-r--r--include/linux/netfilter_ipv4.h6
-rw-r--r--include/linux/netfilter_ipv6.h10
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/nfs4.h18
-rw-r--r--include/linux/nfs_fs.h26
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_xdr.h24
-rw-r--r--include/linux/of.h35
-rw-r--r--include/linux/of_address.h39
-rw-r--r--include/linux/of_fdt.h19
-rw-r--r--include/linux/of_gpio.h29
-rw-r--r--include/linux/of_irq.h47
-rw-r--r--include/linux/of_mtd.h21
-rw-r--r--include/linux/of_pci.h5
-rw-r--r--include/linux/oom.h5
-rw-r--r--include/linux/opp.h134
-rw-r--r--include/linux/padata.h3
-rw-r--r--include/linux/page-flags-layout.h28
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pci-acpi.h4
-rw-r--r--include/linux/pci.h105
-rw-r--r--include/linux/pci_hotplug.h5
-rw-r--r--include/linux/pcieport_if.h2
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h32
-rw-r--r--include/linux/percpu_ida.h23
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy/phy.h270
-rw-r--r--include/linux/pid_namespace.h1
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/platform_data/at24.h (renamed from include/linux/i2c/at24.h)2
-rw-r--r--include/linux/platform_data/clk-nomadik.h2
-rw-r--r--include/linux/platform_data/clk-ux500.h3
-rw-r--r--include/linux/platform_data/davinci_asp.h2
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h46
-rw-r--r--include/linux/platform_data/edma.h8
-rw-r--r--include/linux/platform_data/gpio-davinci.h60
-rw-r--r--include/linux/platform_data/leds-lp55xx.h7
-rw-r--r--include/linux/platform_data/leds-pca9685.h35
-rw-r--r--include/linux/platform_data/lm3630_bl.h57
-rw-r--r--include/linux/platform_data/lm3630a_bl.h65
-rw-r--r--include/linux/platform_data/lp855x.h19
-rw-r--r--include/linux/platform_data/mipi-csis.h9
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h5
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h18
-rw-r--r--include/linux/platform_data/pinctrl-adi2.h40
-rw-r--r--include/linux/platform_data/pinctrl-single.h12
-rw-r--r--include/linux/platform_data/usb-ehci-s5p.h21
-rw-r--r--include/linux/platform_data/usb-ohci-exynos.h21
-rw-r--r--include/linux/platform_data/usb-rcar-gen2-phy.h22
-rw-r--r--include/linux/platform_data/zforce_ts.h26
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm_opp.h139
-rw-r--r--include/linux/power/bq24735-charger.h (renamed from include/linux/irqchip/bcm2835.h)26
-rw-r--r--include/linux/powercap.h325
-rw-r--r--include/linux/preempt.h112
-rw-r--r--include/linux/preempt_mask.h41
-rw-r--r--include/linux/printk.h16
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/pwm_backlight.h5
-rw-r--r--include/linux/random.h14
-rw-r--r--include/linux/rbtree.h16
-rw-r--r--include/linux/rculist.h23
-rw-r--r--include/linux/rcupdate.h24
-rw-r--r--include/linux/rcutiny.h17
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/regmap.h53
-rw-r--r--include/linux/regulator/consumer.h79
-rw-r--r--include/linux/regulator/driver.h18
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/rtnetlink.h7
-rw-r--r--include/linux/sched.h189
-rw-r--r--include/linux/sched/sysctl.h3
-rw-r--r--include/linux/sched_clock.h4
-rw-r--r--include/linux/security.h26
-rw-r--r--include/linux/seq_file.h15
-rw-r--r--include/linux/seqlock.h125
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serial_sci.h2
-rw-r--r--include/linux/sfi.h3
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/skbuff.h329
-rw-r--r--include/linux/slab.h111
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/smp.h16
-rw-r--r--include/linux/spi/rspi.h2
-rw-r--r--include/linux/spi/spi.h61
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/ssb/ssb_driver_gige.h14
-rw-r--r--include/linux/stop_machine.h1
-rw-r--r--include/linux/sunrpc/clnt.h6
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/swapops.h7
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/sysfs.h88
-rw-r--r--include/linux/sysrq.h3
-rw-r--r--include/linux/tegra-powergate.h36
-rw-r--r--include/linux/thinkpad_acpi.h15
-rw-r--r--include/linux/thread_info.h17
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/tty.h29
-rw-r--r--include/linux/u64_stats_sync.h7
-rw-r--r--include/linux/uaccess.h8
-rw-r--r--include/linux/uprobes.h15
-rw-r--r--include/linux/usb.h12
-rw-r--r--include/linux/usb/cdc_ncm.h33
-rw-r--r--include/linux/usb/hcd.h16
-rw-r--r--include/linux/usb/intel_mid_otg.h180
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/omap_control_usb.h33
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/usb_phy_gen_xceiv.h3
-rw-r--r--include/linux/usb/wusb-wa.h51
-rw-r--r--include/linux/usb/wusb.h2
-rw-r--r--include/linux/user_namespace.h6
-rw-r--r--include/linux/virtio.h6
-rw-r--r--include/linux/virtio_config.h161
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/wait.h393
-rw-r--r--include/linux/writeback.h2
304 files changed, 7831 insertions, 3059 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a5db4aeefa36..d9099b15b472 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -44,6 +44,20 @@
44#include <acpi/acpi_numa.h> 44#include <acpi/acpi_numa.h>
45#include <asm/acpi.h> 45#include <asm/acpi.h>
46 46
47static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
48{
49 return adev ? adev->handle : NULL;
50}
51
52#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
53#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
54#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
55
56static inline const char *acpi_dev_name(struct acpi_device *adev)
57{
58 return dev_name(&adev->dev);
59}
60
47enum acpi_irq_model_id { 61enum acpi_irq_model_id {
48 ACPI_IRQ_MODEL_PIC = 0, 62 ACPI_IRQ_MODEL_PIC = 0,
49 ACPI_IRQ_MODEL_IOAPIC, 63 ACPI_IRQ_MODEL_IOAPIC,
@@ -116,7 +130,7 @@ void acpi_numa_arch_fixup(void);
116 130
117#ifdef CONFIG_ACPI_HOTPLUG_CPU 131#ifdef CONFIG_ACPI_HOTPLUG_CPU
118/* Arch dependent functions for cpu hotplug support */ 132/* Arch dependent functions for cpu hotplug support */
119int acpi_map_lsapic(acpi_handle handle, int *pcpu); 133int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
120int acpi_unmap_lsapic(int cpu); 134int acpi_unmap_lsapic(int cpu);
121#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 135#endif /* CONFIG_ACPI_HOTPLUG_CPU */
122 136
@@ -294,58 +308,52 @@ void __init acpi_nvs_nosave_s3(void);
294#endif /* CONFIG_PM_SLEEP */ 308#endif /* CONFIG_PM_SLEEP */
295 309
296struct acpi_osc_context { 310struct acpi_osc_context {
297 char *uuid_str; /* uuid string */ 311 char *uuid_str; /* UUID string */
298 int rev; 312 int rev;
299 struct acpi_buffer cap; /* arg2/arg3 */ 313 struct acpi_buffer cap; /* list of DWORD capabilities */
300 struct acpi_buffer ret; /* free by caller if success */ 314 struct acpi_buffer ret; /* free by caller if success */
301}; 315};
302 316
303#define OSC_QUERY_TYPE 0 317acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
304#define OSC_SUPPORT_TYPE 1
305#define OSC_CONTROL_TYPE 2
306
307/* _OSC DW0 Definition */
308#define OSC_QUERY_ENABLE 1
309#define OSC_REQUEST_ERROR 2
310#define OSC_INVALID_UUID_ERROR 4
311#define OSC_INVALID_REVISION_ERROR 8
312#define OSC_CAPABILITIES_MASK_ERROR 16
313
314acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); 318acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
315 319
316/* platform-wide _OSC bits */ 320/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
317#define OSC_SB_PAD_SUPPORT 1 321#define OSC_QUERY_DWORD 0 /* DWORD 1 */
318#define OSC_SB_PPC_OST_SUPPORT 2 322#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
319#define OSC_SB_PR3_SUPPORT 4 323#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
320#define OSC_SB_HOTPLUG_OST_SUPPORT 8 324
321#define OSC_SB_APEI_SUPPORT 16 325/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
326#define OSC_QUERY_ENABLE 0x00000001 /* input */
327#define OSC_REQUEST_ERROR 0x00000002 /* return */
328#define OSC_INVALID_UUID_ERROR 0x00000004 /* return */
329#define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */
330#define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */
331
332/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */
333#define OSC_SB_PAD_SUPPORT 0x00000001
334#define OSC_SB_PPC_OST_SUPPORT 0x00000002
335#define OSC_SB_PR3_SUPPORT 0x00000004
336#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008
337#define OSC_SB_APEI_SUPPORT 0x00000010
338#define OSC_SB_CPC_SUPPORT 0x00000020
322 339
323extern bool osc_sb_apei_support_acked; 340extern bool osc_sb_apei_support_acked;
324 341
325/* PCI defined _OSC bits */ 342/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
326/* _OSC DW1 Definition (OS Support Fields) */ 343#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
327#define OSC_EXT_PCI_CONFIG_SUPPORT 1 344#define OSC_PCI_ASPM_SUPPORT 0x00000002
328#define OSC_ACTIVE_STATE_PWR_SUPPORT 2 345#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
329#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 346#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
330#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 347#define OSC_PCI_MSI_SUPPORT 0x00000010
331#define OSC_MSI_SUPPORT 16 348#define OSC_PCI_SUPPORT_MASKS 0x0000001f
332#define OSC_PCI_SUPPORT_MASKS 0x1f 349
333 350/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
334/* _OSC DW1 Definition (OS Control Fields) */ 351#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
335#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 352#define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002
336#define OSC_SHPC_NATIVE_HP_CONTROL 2 353#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
337#define OSC_PCI_EXPRESS_PME_CONTROL 4 354#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
338#define OSC_PCI_EXPRESS_AER_CONTROL 8 355#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
339#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 356#define OSC_PCI_CONTROL_MASKS 0x0000001f
340
341#define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
342 OSC_SHPC_NATIVE_HP_CONTROL | \
343 OSC_PCI_EXPRESS_PME_CONTROL | \
344 OSC_PCI_EXPRESS_AER_CONTROL | \
345 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
346
347#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
348 OSC_SHPC_NATIVE_HP_CONTROL)
349 357
350extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, 358extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
351 u32 *mask, u32 req); 359 u32 *mask, u32 req);
@@ -407,6 +415,15 @@ static inline bool acpi_driver_match_device(struct device *dev,
407 415
408#define acpi_disabled 1 416#define acpi_disabled 1
409 417
418#define ACPI_COMPANION(dev) (NULL)
419#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
420#define ACPI_HANDLE(dev) (NULL)
421
422static inline const char *acpi_dev_name(struct acpi_device *adev)
423{
424 return NULL;
425}
426
410static inline void acpi_early_init(void) { } 427static inline void acpi_early_init(void) { }
411 428
412static inline int early_acpi_boot_init(void) 429static inline int early_acpi_boot_init(void)
diff --git a/include/linux/acpi_gpio.h b/include/linux/acpi_gpio.h
index 4c120a1e0ca3..d875bc3dba3c 100644
--- a/include/linux/acpi_gpio.h
+++ b/include/linux/acpi_gpio.h
@@ -2,36 +2,35 @@
2#define _LINUX_ACPI_GPIO_H_ 2#define _LINUX_ACPI_GPIO_H_
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/err.h>
5#include <linux/errno.h> 6#include <linux/errno.h>
6#include <linux/gpio.h> 7#include <linux/gpio.h>
8#include <linux/gpio/consumer.h>
7 9
8/** 10/**
9 * struct acpi_gpio_info - ACPI GPIO specific information 11 * struct acpi_gpio_info - ACPI GPIO specific information
10 * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo 12 * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
13 * @active_low: in case of @gpioint, the pin is active low
11 */ 14 */
12struct acpi_gpio_info { 15struct acpi_gpio_info {
13 bool gpioint; 16 bool gpioint;
17 bool active_low;
14}; 18};
15 19
16#ifdef CONFIG_GPIO_ACPI 20#ifdef CONFIG_GPIO_ACPI
17 21
18int acpi_get_gpio(char *path, int pin); 22struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
19int acpi_get_gpio_by_index(struct device *dev, int index, 23 struct acpi_gpio_info *info);
20 struct acpi_gpio_info *info);
21void acpi_gpiochip_request_interrupts(struct gpio_chip *chip); 24void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
22void acpi_gpiochip_free_interrupts(struct gpio_chip *chip); 25void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
23 26
24#else /* CONFIG_GPIO_ACPI */ 27#else /* CONFIG_GPIO_ACPI */
25 28
26static inline int acpi_get_gpio(char *path, int pin) 29static inline struct gpio_desc *
30acpi_get_gpiod_by_index(struct device *dev, int index,
31 struct acpi_gpio_info *info)
27{ 32{
28 return -ENODEV; 33 return ERR_PTR(-ENOSYS);
29}
30
31static inline int acpi_get_gpio_by_index(struct device *dev, int index,
32 struct acpi_gpio_info *info)
33{
34 return -ENODEV;
35} 34}
36 35
37static inline void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { } 36static inline void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
@@ -39,4 +38,14 @@ static inline void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
39 38
40#endif /* CONFIG_GPIO_ACPI */ 39#endif /* CONFIG_GPIO_ACPI */
41 40
41static inline int acpi_get_gpio_by_index(struct device *dev, int index,
42 struct acpi_gpio_info *info)
43{
44 struct gpio_desc *desc = acpi_get_gpiod_by_index(dev, index, info);
45
46 if (IS_ERR(desc))
47 return PTR_ERR(desc);
48 return desc_to_gpio(desc);
49}
50
42#endif /* _LINUX_ACPI_GPIO_H_ */ 51#endif /* _LINUX_ACPI_GPIO_H_ */
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 43ec7e247a80..63b5eff0a80f 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -21,7 +21,7 @@
21#include <linux/resource.h> 21#include <linux/resource.h>
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23 23
24#define AMBA_NR_IRQS 2 24#define AMBA_NR_IRQS 9
25#define AMBA_CID 0xb105f00d 25#define AMBA_CID 0xb105f00d
26 26
27struct clk; 27struct clk;
@@ -30,7 +30,6 @@ struct amba_device {
30 struct device dev; 30 struct device dev;
31 struct resource res; 31 struct resource res;
32 struct clk *pclk; 32 struct clk *pclk;
33 u64 dma_mask;
34 unsigned int periphid; 33 unsigned int periphid;
35 unsigned int irq[AMBA_NR_IRQS]; 34 unsigned int irq[AMBA_NR_IRQS];
36}; 35};
@@ -131,7 +130,6 @@ struct amba_device name##_device = { \
131struct amba_device name##_device = { \ 130struct amba_device name##_device = { \
132 .dev = __AMBA_DEV(busid, data, ~0ULL), \ 131 .dev = __AMBA_DEV(busid, data, ~0ULL), \
133 .res = DEFINE_RES_MEM(base, SZ_4K), \ 132 .res = DEFINE_RES_MEM(base, SZ_4K), \
134 .dma_mask = ~0ULL, \
135 .irq = irqs, \ 133 .irq = irqs, \
136 .periphid = id, \ 134 .periphid = id, \
137} 135}
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 62d9303c2837..0ddb5c02ad8b 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -40,7 +40,7 @@
40#define UART010_LCRL 0x10 /* Line control register, low byte. */ 40#define UART010_LCRL 0x10 /* Line control register, low byte. */
41#define UART010_CR 0x14 /* Control register. */ 41#define UART010_CR 0x14 /* Control register. */
42#define UART01x_FR 0x18 /* Flag register (Read only). */ 42#define UART01x_FR 0x18 /* Flag register (Read only). */
43#define UART010_IIR 0x1C /* Interrupt indentification register (Read). */ 43#define UART010_IIR 0x1C /* Interrupt identification register (Read). */
44#define UART010_ICR 0x1C /* Interrupt clear register (Write). */ 44#define UART010_ICR 0x1C /* Interrupt clear register (Write). */
45#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */ 45#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */
46#define UART01x_ILPR 0x20 /* IrDA low power counter register. */ 46#define UART01x_ILPR 0x20 /* IrDA low power counter register. */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index cf573c22b81e..8013a45242fe 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -13,9 +13,6 @@ struct file_operations;
13struct file *anon_inode_getfile(const char *name, 13struct file *anon_inode_getfile(const char *name,
14 const struct file_operations *fops, 14 const struct file_operations *fops,
15 void *priv, int flags); 15 void *priv, int flags);
16struct file *anon_inode_getfile_private(const char *name,
17 const struct file_operations *fops,
18 void *priv, int flags);
19int anon_inode_getfd(const char *name, const struct file_operations *fops, 16int anon_inode_getfd(const char *name, const struct file_operations *fops,
20 void *priv, int flags); 17 void *priv, int flags);
21 18
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
new file mode 100644
index 000000000000..a89df3be1686
--- /dev/null
+++ b/include/linux/assoc_array.h
@@ -0,0 +1,92 @@
1/* Generic associative array implementation.
2 *
3 * See Documentation/assoc_array.txt for information.
4 *
5 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public Licence
10 * as published by the Free Software Foundation; either version
11 * 2 of the Licence, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_ASSOC_ARRAY_H
15#define _LINUX_ASSOC_ARRAY_H
16
17#ifdef CONFIG_ASSOCIATIVE_ARRAY
18
19#include <linux/types.h>
20
21#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */
22
23/*
24 * Generic associative array.
25 */
26struct assoc_array {
27 struct assoc_array_ptr *root; /* The node at the root of the tree */
28 unsigned long nr_leaves_on_tree;
29};
30
31/*
32 * Operations on objects and index keys for use by array manipulation routines.
33 */
34struct assoc_array_ops {
35 /* Method to get a chunk of an index key from caller-supplied data */
36 unsigned long (*get_key_chunk)(const void *index_key, int level);
37
38 /* Method to get a piece of an object's index key */
39 unsigned long (*get_object_key_chunk)(const void *object, int level);
40
41 /* Is this the object we're looking for? */
42 bool (*compare_object)(const void *object, const void *index_key);
43
44 /* How different is an object from an index key, to a bit position in
45 * their keys? (or -1 if they're the same)
46 */
47 int (*diff_objects)(const void *object, const void *index_key);
48
49 /* Method to free an object. */
50 void (*free_object)(void *object);
51};
52
53/*
54 * Access and manipulation functions.
55 */
56struct assoc_array_edit;
57
58static inline void assoc_array_init(struct assoc_array *array)
59{
60 array->root = NULL;
61 array->nr_leaves_on_tree = 0;
62}
63
64extern int assoc_array_iterate(const struct assoc_array *array,
65 int (*iterator)(const void *object,
66 void *iterator_data),
67 void *iterator_data);
68extern void *assoc_array_find(const struct assoc_array *array,
69 const struct assoc_array_ops *ops,
70 const void *index_key);
71extern void assoc_array_destroy(struct assoc_array *array,
72 const struct assoc_array_ops *ops);
73extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
74 const struct assoc_array_ops *ops,
75 const void *index_key,
76 void *object);
77extern void assoc_array_insert_set_object(struct assoc_array_edit *edit,
78 void *object);
79extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
80 const struct assoc_array_ops *ops,
81 const void *index_key);
82extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
83 const struct assoc_array_ops *ops);
84extern void assoc_array_apply_edit(struct assoc_array_edit *edit);
85extern void assoc_array_cancel_edit(struct assoc_array_edit *edit);
86extern int assoc_array_gc(struct assoc_array *array,
87 const struct assoc_array_ops *ops,
88 bool (*iterator)(void *object, void *iterator_data),
89 void *iterator_data);
90
91#endif /* CONFIG_ASSOCIATIVE_ARRAY */
92#endif /* _LINUX_ASSOC_ARRAY_H */
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
new file mode 100644
index 000000000000..711275e6681c
--- /dev/null
+++ b/include/linux/assoc_array_priv.h
@@ -0,0 +1,182 @@
1/* Private definitions for the generic associative array implementation.
2 *
3 * See Documentation/assoc_array.txt for information.
4 *
5 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public Licence
10 * as published by the Free Software Foundation; either version
11 * 2 of the Licence, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
15#define _LINUX_ASSOC_ARRAY_PRIV_H
16
17#ifdef CONFIG_ASSOCIATIVE_ARRAY
18
19#include <linux/assoc_array.h>
20
21#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */
22#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1)
23#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT))
24#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1)
25#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1)
26#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG))
27
28/*
29 * Undefined type representing a pointer with type information in the bottom
30 * two bits.
31 */
32struct assoc_array_ptr;
33
34/*
35 * An N-way node in the tree.
36 *
37 * Each slot contains one of four things:
38 *
39 * (1) Nothing (NULL).
40 *
41 * (2) A leaf object (pointer types 0).
42 *
43 * (3) A next-level node (pointer type 1, subtype 0).
44 *
45 * (4) A shortcut (pointer type 1, subtype 1).
46 *
47 * The tree is optimised for search-by-ID, but permits reasonable iteration
48 * also.
49 *
50 * The tree is navigated by constructing an index key consisting of an array of
51 * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size.
52 *
53 * The segments correspond to levels of the tree (the first segment is used at
54 * level 0, the second at level 1, etc.).
55 */
56struct assoc_array_node {
57 struct assoc_array_ptr *back_pointer;
58 u8 parent_slot;
59 struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT];
60 unsigned long nr_leaves_on_branch;
61};
62
63/*
64 * A shortcut through the index space out to where a collection of nodes/leaves
65 * with the same IDs live.
66 */
67struct assoc_array_shortcut {
68 struct assoc_array_ptr *back_pointer;
69 int parent_slot;
70 int skip_to_level;
71 struct assoc_array_ptr *next_node;
72 unsigned long index_key[];
73};
74
75/*
76 * Preallocation cache.
77 */
78struct assoc_array_edit {
79 struct rcu_head rcu;
80 struct assoc_array *array;
81 const struct assoc_array_ops *ops;
82 const struct assoc_array_ops *ops_for_excised_subtree;
83 struct assoc_array_ptr *leaf;
84 struct assoc_array_ptr **leaf_p;
85 struct assoc_array_ptr *dead_leaf;
86 struct assoc_array_ptr *new_meta[3];
87 struct assoc_array_ptr *excised_meta[1];
88 struct assoc_array_ptr *excised_subtree;
89 struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT];
90 struct assoc_array_ptr *set_backpointers_to;
91 struct assoc_array_node *adjust_count_on;
92 long adjust_count_by;
93 struct {
94 struct assoc_array_ptr **ptr;
95 struct assoc_array_ptr *to;
96 } set[2];
97 struct {
98 u8 *p;
99 u8 to;
100 } set_parent_slot[1];
101 u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1];
102};
103
104/*
105 * Internal tree member pointers are marked in the bottom one or two bits to
106 * indicate what type they are so that we don't have to look behind every
107 * pointer to see what it points to.
108 *
109 * We provide functions to test type annotations and to create and translate
110 * the annotated pointers.
111 */
112#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL
113#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */
114#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */
115#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL
116#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL
117#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL
118
119static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x)
120{
121 return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK;
122}
123static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x)
124{
125 return !assoc_array_ptr_is_meta(x);
126}
127static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x)
128{
129 return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK;
130}
131static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x)
132{
133 return !assoc_array_ptr_is_shortcut(x);
134}
135
136static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x)
137{
138 return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK);
139}
140
141static inline
142unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x)
143{
144 return (unsigned long)x &
145 ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK);
146}
147static inline
148struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x)
149{
150 return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x);
151}
152static inline
153struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x)
154{
155 return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x);
156}
157
158static inline
159struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
160{
161 return (struct assoc_array_ptr *)((unsigned long)p | t);
162}
163static inline
164struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p)
165{
166 return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE);
167}
168static inline
169struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p)
170{
171 return __assoc_array_x_to_ptr(
172 p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE);
173}
174static inline
175struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p)
176{
177 return __assoc_array_x_to_ptr(
178 p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE);
179}
180
181#endif /* CONFIG_ASSOCIATIVE_ARRAY */
182#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index bf4c69ca76df..f2f4d8da97c0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -219,6 +219,7 @@ enum {
219 ATA_CMD_IDLE = 0xE3, /* place in idle power mode */ 219 ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
220 ATA_CMD_EDD = 0x90, /* execute device diagnostic */ 220 ATA_CMD_EDD = 0x90, /* execute device diagnostic */
221 ATA_CMD_DOWNLOAD_MICRO = 0x92, 221 ATA_CMD_DOWNLOAD_MICRO = 0x92,
222 ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93,
222 ATA_CMD_NOP = 0x00, 223 ATA_CMD_NOP = 0x00,
223 ATA_CMD_FLUSH = 0xE7, 224 ATA_CMD_FLUSH = 0xE7,
224 ATA_CMD_FLUSH_EXT = 0xEA, 225 ATA_CMD_FLUSH_EXT = 0xEA,
@@ -268,12 +269,15 @@ enum {
268 ATA_CMD_WRITE_LOG_EXT = 0x3F, 269 ATA_CMD_WRITE_LOG_EXT = 0x3F,
269 ATA_CMD_READ_LOG_DMA_EXT = 0x47, 270 ATA_CMD_READ_LOG_DMA_EXT = 0x47,
270 ATA_CMD_WRITE_LOG_DMA_EXT = 0x57, 271 ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
272 ATA_CMD_TRUSTED_NONDATA = 0x5B,
271 ATA_CMD_TRUSTED_RCV = 0x5C, 273 ATA_CMD_TRUSTED_RCV = 0x5C,
272 ATA_CMD_TRUSTED_RCV_DMA = 0x5D, 274 ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
273 ATA_CMD_TRUSTED_SND = 0x5E, 275 ATA_CMD_TRUSTED_SND = 0x5E,
274 ATA_CMD_TRUSTED_SND_DMA = 0x5F, 276 ATA_CMD_TRUSTED_SND_DMA = 0x5F,
275 ATA_CMD_PMP_READ = 0xE4, 277 ATA_CMD_PMP_READ = 0xE4,
278 ATA_CMD_PMP_READ_DMA = 0xE9,
276 ATA_CMD_PMP_WRITE = 0xE8, 279 ATA_CMD_PMP_WRITE = 0xE8,
280 ATA_CMD_PMP_WRITE_DMA = 0xEB,
277 ATA_CMD_CONF_OVERLAY = 0xB1, 281 ATA_CMD_CONF_OVERLAY = 0xB1,
278 ATA_CMD_SEC_SET_PASS = 0xF1, 282 ATA_CMD_SEC_SET_PASS = 0xF1,
279 ATA_CMD_SEC_UNLOCK = 0xF2, 283 ATA_CMD_SEC_UNLOCK = 0xF2,
@@ -292,6 +296,9 @@ enum {
292 ATA_CMD_CFA_TRANS_SECT = 0x87, 296 ATA_CMD_CFA_TRANS_SECT = 0x87,
293 ATA_CMD_CFA_ERASE = 0xC0, 297 ATA_CMD_CFA_ERASE = 0xC0,
294 ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, 298 ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
299 ATA_CMD_REQ_SENSE_DATA = 0x0B,
300 ATA_CMD_SANITIZE_DEVICE = 0xB4,
301
295 /* marked obsolete in the ATA/ATAPI-7 spec */ 302 /* marked obsolete in the ATA/ATAPI-7 spec */
296 ATA_CMD_RESTORE = 0x10, 303 ATA_CMD_RESTORE = 0x10,
297 304
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index be201ca2990c..00beddf6be20 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -125,5 +125,6 @@
125#define ATMEL_US_IF 0x4c /* IrDA Filter Register */ 125#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
126 126
127#define ATMEL_US_NAME 0xf0 /* Ip Name */ 127#define ATMEL_US_NAME 0xf0 /* Ip Name */
128#define ATMEL_US_VERSION 0xfc /* Ip Version */
128 129
129#endif 130#endif
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 669fef5c745a..3e0fbe441763 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -3,6 +3,6 @@
3 3
4#include <uapi/linux/auxvec.h> 4#include <uapi/linux/auxvec.h>
5 5
6#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ 6#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
7 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ 7 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
8#endif /* _LINUX_AUXVEC_H */ 8#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5f66d519a726..24819001f5c8 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -109,7 +109,7 @@ struct backing_dev_info {
109#endif 109#endif
110}; 110};
111 111
112int bdi_init(struct backing_dev_info *bdi); 112int __must_check bdi_init(struct backing_dev_info *bdi);
113void bdi_destroy(struct backing_dev_info *bdi); 113void bdi_destroy(struct backing_dev_info *bdi);
114 114
115__printf(3, 4) 115__printf(3, 4)
@@ -117,7 +117,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
117 const char *fmt, ...); 117 const char *fmt, ...);
118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
119void bdi_unregister(struct backing_dev_info *bdi); 119void bdi_unregister(struct backing_dev_info *bdi);
120int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 120int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
121void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 121void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
122 enum wb_reason reason); 122 enum wb_reason reason);
123void bdi_start_background_writeback(struct backing_dev_info *bdi); 123void bdi_start_background_writeback(struct backing_dev_info *bdi);
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 53b77949c79d..5f9cd963213d 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -100,6 +100,9 @@ struct backlight_device {
100 /* The framebuffer notifier block */ 100 /* The framebuffer notifier block */
101 struct notifier_block fb_notif; 101 struct notifier_block fb_notif;
102 102
103 /* list entry of all registered backlight devices */
104 struct list_head entry;
105
103 struct device dev; 106 struct device dev;
104}; 107};
105 108
@@ -123,6 +126,7 @@ extern void devm_backlight_device_unregister(struct device *dev,
123 struct backlight_device *bd); 126 struct backlight_device *bd);
124extern void backlight_force_update(struct backlight_device *bd, 127extern void backlight_force_update(struct backlight_device *bd,
125 enum backlight_update_reason reason); 128 enum backlight_update_reason reason);
129extern bool backlight_device_registered(enum backlight_type type);
126 130
127#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) 131#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
128 132
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index e8112ae50531..fd8bf3219ef7 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -56,11 +56,12 @@ struct linux_binprm {
56 56
57/* Function parameter for binfmt->coredump */ 57/* Function parameter for binfmt->coredump */
58struct coredump_params { 58struct coredump_params {
59 siginfo_t *siginfo; 59 const siginfo_t *siginfo;
60 struct pt_regs *regs; 60 struct pt_regs *regs;
61 struct file *file; 61 struct file *file;
62 unsigned long limit; 62 unsigned long limit;
63 unsigned long mm_flags; 63 unsigned long mm_flags;
64 loff_t written;
64}; 65};
65 66
66/* 67/*
@@ -99,9 +100,6 @@ extern void setup_new_exec(struct linux_binprm * bprm);
99extern void would_dump(struct linux_binprm *, struct file *); 100extern void would_dump(struct linux_binprm *, struct file *);
100 101
101extern int suid_dumpable; 102extern int suid_dumpable;
102#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
103#define SUID_DUMP_USER 1 /* Dump as user of process */
104#define SUID_DUMP_ROOT 2 /* Dump as root */
105 103
106/* Stack area protections */ 104/* Stack area protections */
107#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */ 105#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ec48bac5b039..060ff695085c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,6 +218,7 @@ struct bio_pair {
218}; 218};
219extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); 219extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
220extern void bio_pair_release(struct bio_pair *dbio); 220extern void bio_pair_release(struct bio_pair *dbio);
221extern void bio_trim(struct bio *bio, int offset, int size);
221 222
222extern struct bio_set *bioset_create(unsigned int, unsigned int); 223extern struct bio_set *bioset_create(unsigned int, unsigned int);
223extern void bioset_free(struct bio_set *); 224extern void bioset_free(struct bio_set *);
@@ -419,6 +420,8 @@ static inline void bio_list_init(struct bio_list *bl)
419 bl->head = bl->tail = NULL; 420 bl->head = bl->tail = NULL;
420} 421}
421 422
423#define BIO_EMPTY_LIST { NULL, NULL }
424
422#define bio_list_for_each(bio, bl) \ 425#define bio_list_for_each(bio, bl) \
423 for (bio = (bl)->head; bio; bio = bio->bi_next) 426 for (bio = (bl)->head; bio; bio = bio->bi_next)
424 427
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3b6b82108b9..abc9ca778456 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,12 +4,23 @@
4 4
5#ifdef __KERNEL__ 5#ifdef __KERNEL__
6#define BIT(nr) (1UL << (nr)) 6#define BIT(nr) (1UL << (nr))
7#define BIT_ULL(nr) (1ULL << (nr))
7#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) 8#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
8#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) 9#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
10#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
11#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
9#define BITS_PER_BYTE 8 12#define BITS_PER_BYTE 8
10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 13#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
11#endif 14#endif
12 15
16/*
17 * Create a contiguous bitmask starting at bit position @l and ending at
18 * position @h. For example
19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
20 */
21#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
22#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
23
13extern unsigned int __sw_hweight8(unsigned int w); 24extern unsigned int __sw_hweight8(unsigned int w);
14extern unsigned int __sw_hweight16(unsigned int w); 25extern unsigned int __sw_hweight16(unsigned int w);
15extern unsigned int __sw_hweight32(unsigned int w); 26extern unsigned int __sw_hweight32(unsigned int w);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
new file mode 100644
index 000000000000..ab0e9b2025b3
--- /dev/null
+++ b/include/linux/blk-mq.h
@@ -0,0 +1,183 @@
1#ifndef BLK_MQ_H
2#define BLK_MQ_H
3
4#include <linux/blkdev.h>
5
6struct blk_mq_tags;
7
8struct blk_mq_cpu_notifier {
9 struct list_head list;
10 void *data;
11 void (*notify)(void *data, unsigned long action, unsigned int cpu);
12};
13
14struct blk_mq_hw_ctx {
15 struct {
16 spinlock_t lock;
17 struct list_head dispatch;
18 } ____cacheline_aligned_in_smp;
19
20 unsigned long state; /* BLK_MQ_S_* flags */
21 struct delayed_work delayed_work;
22
23 unsigned long flags; /* BLK_MQ_F_* flags */
24
25 struct request_queue *queue;
26 unsigned int queue_num;
27
28 void *driver_data;
29
30 unsigned int nr_ctx;
31 struct blk_mq_ctx **ctxs;
32 unsigned int nr_ctx_map;
33 unsigned long *ctx_map;
34
35 struct request **rqs;
36 struct list_head page_list;
37 struct blk_mq_tags *tags;
38
39 unsigned long queued;
40 unsigned long run;
41#define BLK_MQ_MAX_DISPATCH_ORDER 10
42 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
43
44 unsigned int queue_depth;
45 unsigned int numa_node;
46 unsigned int cmd_size; /* per-request extra data */
47
48 struct blk_mq_cpu_notifier cpu_notifier;
49 struct kobject kobj;
50};
51
52struct blk_mq_reg {
53 struct blk_mq_ops *ops;
54 unsigned int nr_hw_queues;
55 unsigned int queue_depth;
56 unsigned int reserved_tags;
57 unsigned int cmd_size; /* per-request extra data */
58 int numa_node;
59 unsigned int timeout;
60 unsigned int flags; /* BLK_MQ_F_* */
61};
62
63typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
64typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
65typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
66typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
67typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
68typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
69
70struct blk_mq_ops {
71 /*
72 * Queue request
73 */
74 queue_rq_fn *queue_rq;
75
76 /*
77 * Map to specific hardware queue
78 */
79 map_queue_fn *map_queue;
80
81 /*
82 * Called on request timeout
83 */
84 rq_timed_out_fn *timeout;
85
86 /*
87 * Override for hctx allocations (should probably go)
88 */
89 alloc_hctx_fn *alloc_hctx;
90 free_hctx_fn *free_hctx;
91
92 /*
93 * Called when the block layer side of a hardware queue has been
94 * set up, allowing the driver to allocate/init matching structures.
95 * Ditto for exit/teardown.
96 */
97 init_hctx_fn *init_hctx;
98 exit_hctx_fn *exit_hctx;
99};
100
101enum {
102 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
103 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
104 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
105
106 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
107 BLK_MQ_F_SHOULD_SORT = 1 << 1,
108 BLK_MQ_F_SHOULD_IPI = 1 << 2,
109
110 BLK_MQ_S_STOPPED = 1 << 0,
111
112 BLK_MQ_MAX_DEPTH = 2048,
113};
114
115struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
116void blk_mq_free_queue(struct request_queue *);
117int blk_mq_register_disk(struct gendisk *);
118void blk_mq_unregister_disk(struct gendisk *);
119void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
120
121void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
122
123void blk_mq_insert_request(struct request_queue *, struct request *, bool);
124void blk_mq_run_queues(struct request_queue *q, bool async);
125void blk_mq_free_request(struct request *rq);
126bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
127struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
128struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
129struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
130
131struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
132struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
133void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
134
135void blk_mq_end_io(struct request *rq, int error);
136
137void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
138void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
139void blk_mq_stop_hw_queues(struct request_queue *q);
140void blk_mq_start_stopped_hw_queues(struct request_queue *q);
141
142/*
143 * Driver command data is immediately after the request. So subtract request
144 * size to get back to the original request.
145 */
146static inline struct request *blk_mq_rq_from_pdu(void *pdu)
147{
148 return pdu - sizeof(struct request);
149}
150static inline void *blk_mq_rq_to_pdu(struct request *rq)
151{
152 return (void *) rq + sizeof(*rq);
153}
154
155static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
156 unsigned int tag)
157{
158 return hctx->rqs[tag];
159}
160
161#define queue_for_each_hw_ctx(q, hctx, i) \
162 for ((i) = 0, hctx = (q)->queue_hw_ctx[0]; \
163 (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
164
165#define queue_for_each_ctx(q, ctx, i) \
166 for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0); \
167 (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
168
169#define hctx_for_each_ctx(hctx, ctx, i) \
170 for ((i) = 0, ctx = (hctx)->ctxs[0]; \
171 (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
172
173#define blk_ctx_sum(q, sum) \
174({ \
175 struct blk_mq_ctx *__x; \
176 unsigned int __ret = 0, __i; \
177 \
178 queue_for_each_ctx((q), __x, __i) \
179 __ret += sum; \
180 __ret; \
181})
182
183#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index fa1abeb45b76..238ef0ed62f8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -178,19 +178,20 @@ enum rq_flag_bits {
178 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 178 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
179 __REQ_KERNEL, /* direct IO to kernel pages */ 179 __REQ_KERNEL, /* direct IO to kernel pages */
180 __REQ_PM, /* runtime pm request */ 180 __REQ_PM, /* runtime pm request */
181 __REQ_END, /* last of chain of requests */
181 __REQ_NR_BITS, /* stops here */ 182 __REQ_NR_BITS, /* stops here */
182}; 183};
183 184
184#define REQ_WRITE (1 << __REQ_WRITE) 185#define REQ_WRITE (1ULL << __REQ_WRITE)
185#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 186#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
186#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 187#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
187#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 188#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
188#define REQ_SYNC (1 << __REQ_SYNC) 189#define REQ_SYNC (1ULL << __REQ_SYNC)
189#define REQ_META (1 << __REQ_META) 190#define REQ_META (1ULL << __REQ_META)
190#define REQ_PRIO (1 << __REQ_PRIO) 191#define REQ_PRIO (1ULL << __REQ_PRIO)
191#define REQ_DISCARD (1 << __REQ_DISCARD) 192#define REQ_DISCARD (1ULL << __REQ_DISCARD)
192#define REQ_WRITE_SAME (1 << __REQ_WRITE_SAME) 193#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
193#define REQ_NOIDLE (1 << __REQ_NOIDLE) 194#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
194 195
195#define REQ_FAILFAST_MASK \ 196#define REQ_FAILFAST_MASK \
196 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 197 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -206,28 +207,29 @@ enum rq_flag_bits {
206#define REQ_NOMERGE_FLAGS \ 207#define REQ_NOMERGE_FLAGS \
207 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 208 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
208 209
209#define REQ_RAHEAD (1 << __REQ_RAHEAD) 210#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
210#define REQ_THROTTLED (1 << __REQ_THROTTLED) 211#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
211 212
212#define REQ_SORTED (1 << __REQ_SORTED) 213#define REQ_SORTED (1ULL << __REQ_SORTED)
213#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 214#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
214#define REQ_FUA (1 << __REQ_FUA) 215#define REQ_FUA (1ULL << __REQ_FUA)
215#define REQ_NOMERGE (1 << __REQ_NOMERGE) 216#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
216#define REQ_STARTED (1 << __REQ_STARTED) 217#define REQ_STARTED (1ULL << __REQ_STARTED)
217#define REQ_DONTPREP (1 << __REQ_DONTPREP) 218#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
218#define REQ_QUEUED (1 << __REQ_QUEUED) 219#define REQ_QUEUED (1ULL << __REQ_QUEUED)
219#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 220#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
220#define REQ_FAILED (1 << __REQ_FAILED) 221#define REQ_FAILED (1ULL << __REQ_FAILED)
221#define REQ_QUIET (1 << __REQ_QUIET) 222#define REQ_QUIET (1ULL << __REQ_QUIET)
222#define REQ_PREEMPT (1 << __REQ_PREEMPT) 223#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
223#define REQ_ALLOCED (1 << __REQ_ALLOCED) 224#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
224#define REQ_COPY_USER (1 << __REQ_COPY_USER) 225#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
225#define REQ_FLUSH (1 << __REQ_FLUSH) 226#define REQ_FLUSH (1ULL << __REQ_FLUSH)
226#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ) 227#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
227#define REQ_IO_STAT (1 << __REQ_IO_STAT) 228#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
228#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 229#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
229#define REQ_SECURE (1 << __REQ_SECURE) 230#define REQ_SECURE (1ULL << __REQ_SECURE)
230#define REQ_KERNEL (1 << __REQ_KERNEL) 231#define REQ_KERNEL (1ULL << __REQ_KERNEL)
231#define REQ_PM (1 << __REQ_PM) 232#define REQ_PM (1ULL << __REQ_PM)
233#define REQ_END (1ULL << __REQ_END)
232 234
233#endif /* __LINUX_BLK_TYPES_H */ 235#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0e6f765aa1f5..1b135d49b279 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -8,6 +8,7 @@
8#include <linux/major.h> 8#include <linux/major.h>
9#include <linux/genhd.h> 9#include <linux/genhd.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/llist.h>
11#include <linux/timer.h> 12#include <linux/timer.h>
12#include <linux/workqueue.h> 13#include <linux/workqueue.h>
13#include <linux/pagemap.h> 14#include <linux/pagemap.h>
@@ -94,12 +95,19 @@ enum rq_cmd_type_bits {
94 * as well! 95 * as well!
95 */ 96 */
96struct request { 97struct request {
97 struct list_head queuelist; 98 union {
98 struct call_single_data csd; 99 struct list_head queuelist;
100 struct llist_node ll_list;
101 };
102 union {
103 struct call_single_data csd;
104 struct work_struct mq_flush_data;
105 };
99 106
100 struct request_queue *q; 107 struct request_queue *q;
108 struct blk_mq_ctx *mq_ctx;
101 109
102 unsigned int cmd_flags; 110 u64 cmd_flags;
103 enum rq_cmd_type_bits cmd_type; 111 enum rq_cmd_type_bits cmd_type;
104 unsigned long atomic_flags; 112 unsigned long atomic_flags;
105 113
@@ -160,8 +168,6 @@ struct request {
160 168
161 unsigned short ioprio; 169 unsigned short ioprio;
162 170
163 int ref_count;
164
165 void *special; /* opaque pointer available for LLD use */ 171 void *special; /* opaque pointer available for LLD use */
166 char *buffer; /* kaddr of the current segment if available */ 172 char *buffer; /* kaddr of the current segment if available */
167 173
@@ -215,6 +221,8 @@ struct request_pm_state
215 221
216#include <linux/elevator.h> 222#include <linux/elevator.h>
217 223
224struct blk_queue_ctx;
225
218typedef void (request_fn_proc) (struct request_queue *q); 226typedef void (request_fn_proc) (struct request_queue *q);
219typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 227typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
220typedef int (prep_rq_fn) (struct request_queue *, struct request *); 228typedef int (prep_rq_fn) (struct request_queue *, struct request *);
@@ -313,6 +321,18 @@ struct request_queue {
313 dma_drain_needed_fn *dma_drain_needed; 321 dma_drain_needed_fn *dma_drain_needed;
314 lld_busy_fn *lld_busy_fn; 322 lld_busy_fn *lld_busy_fn;
315 323
324 struct blk_mq_ops *mq_ops;
325
326 unsigned int *mq_map;
327
328 /* sw queues */
329 struct blk_mq_ctx *queue_ctx;
330 unsigned int nr_queues;
331
332 /* hw dispatch queues */
333 struct blk_mq_hw_ctx **queue_hw_ctx;
334 unsigned int nr_hw_queues;
335
316 /* 336 /*
317 * Dispatch queue sorting 337 * Dispatch queue sorting
318 */ 338 */
@@ -361,6 +381,11 @@ struct request_queue {
361 */ 381 */
362 struct kobject kobj; 382 struct kobject kobj;
363 383
384 /*
385 * mq queue kobject
386 */
387 struct kobject mq_kobj;
388
364#ifdef CONFIG_PM_RUNTIME 389#ifdef CONFIG_PM_RUNTIME
365 struct device *dev; 390 struct device *dev;
366 int rpm_status; 391 int rpm_status;
@@ -425,7 +450,13 @@ struct request_queue {
425 unsigned long flush_pending_since; 450 unsigned long flush_pending_since;
426 struct list_head flush_queue[2]; 451 struct list_head flush_queue[2];
427 struct list_head flush_data_in_flight; 452 struct list_head flush_data_in_flight;
428 struct request flush_rq; 453 union {
454 struct request flush_rq;
455 struct {
456 spinlock_t mq_flush_lock;
457 struct work_struct mq_flush_work;
458 };
459 };
429 460
430 struct mutex sysfs_lock; 461 struct mutex sysfs_lock;
431 462
@@ -437,14 +468,14 @@ struct request_queue {
437 struct bsg_class_device bsg_dev; 468 struct bsg_class_device bsg_dev;
438#endif 469#endif
439 470
440#ifdef CONFIG_BLK_CGROUP
441 struct list_head all_q_node;
442#endif
443#ifdef CONFIG_BLK_DEV_THROTTLING 471#ifdef CONFIG_BLK_DEV_THROTTLING
444 /* Throttle data */ 472 /* Throttle data */
445 struct throtl_data *td; 473 struct throtl_data *td;
446#endif 474#endif
447 struct rcu_head rcu_head; 475 struct rcu_head rcu_head;
476 wait_queue_head_t mq_freeze_wq;
477 struct percpu_counter mq_usage_counter;
478 struct list_head all_q_node;
448}; 479};
449 480
450#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 481#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -467,12 +498,16 @@ struct request_queue {
467#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 498#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
468#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 499#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
469#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 500#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
501#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
470 502
471#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 503#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
472 (1 << QUEUE_FLAG_STACKABLE) | \ 504 (1 << QUEUE_FLAG_STACKABLE) | \
473 (1 << QUEUE_FLAG_SAME_COMP) | \ 505 (1 << QUEUE_FLAG_SAME_COMP) | \
474 (1 << QUEUE_FLAG_ADD_RANDOM)) 506 (1 << QUEUE_FLAG_ADD_RANDOM))
475 507
508#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
509 (1 << QUEUE_FLAG_SAME_COMP))
510
476static inline void queue_lockdep_assert_held(struct request_queue *q) 511static inline void queue_lockdep_assert_held(struct request_queue *q)
477{ 512{
478 if (q->queue_lock) 513 if (q->queue_lock)
@@ -539,6 +574,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
539#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 574#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
540#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 575#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
541#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 576#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
577#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
542#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 578#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
543#define blk_queue_noxmerges(q) \ 579#define blk_queue_noxmerges(q) \
544 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 580 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
@@ -570,7 +606,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
570 606
571#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 607#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
572 608
573#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 609#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
574 610
575static inline unsigned int blk_queue_cluster(struct request_queue *q) 611static inline unsigned int blk_queue_cluster(struct request_queue *q)
576{ 612{
@@ -1013,6 +1049,7 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1013struct blk_plug { 1049struct blk_plug {
1014 unsigned long magic; /* detect uninitialized use-cases */ 1050 unsigned long magic; /* detect uninitialized use-cases */
1015 struct list_head list; /* requests */ 1051 struct list_head list; /* requests */
1052 struct list_head mq_list; /* blk-mq requests */
1016 struct list_head cb_list; /* md requires an unplug callback */ 1053 struct list_head cb_list; /* md requires an unplug callback */
1017}; 1054};
1018#define BLK_MAX_REQUEST_COUNT 16 1055#define BLK_MAX_REQUEST_COUNT 16
@@ -1050,7 +1087,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1050{ 1087{
1051 struct blk_plug *plug = tsk->plug; 1088 struct blk_plug *plug = tsk->plug;
1052 1089
1053 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); 1090 return plug &&
1091 (!list_empty(&plug->list) ||
1092 !list_empty(&plug->mq_list) ||
1093 !list_empty(&plug->cb_list));
1054} 1094}
1055 1095
1056/* 1096/*
@@ -1325,6 +1365,7 @@ static inline void put_dev_sector(Sector p)
1325 1365
1326struct work_struct; 1366struct work_struct;
1327int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1367int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1368int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1328 1369
1329#ifdef CONFIG_BLK_CGROUP 1370#ifdef CONFIG_BLK_CGROUP
1330/* 1371/*
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7c2e030e72f1..afc1343df3c7 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -5,6 +5,7 @@
5#include <linux/relay.h> 5#include <linux/relay.h>
6#include <linux/compat.h> 6#include <linux/compat.h>
7#include <uapi/linux/blktrace_api.h> 7#include <uapi/linux/blktrace_api.h>
8#include <linux/list.h>
8 9
9#if defined(CONFIG_BLK_DEV_IO_TRACE) 10#if defined(CONFIG_BLK_DEV_IO_TRACE)
10 11
@@ -23,6 +24,7 @@ struct blk_trace {
23 struct dentry *dir; 24 struct dentry *dir;
24 struct dentry *dropped_file; 25 struct dentry *dropped_file;
25 struct dentry *msg_file; 26 struct dentry *msg_file;
27 struct list_head running_list;
26 atomic_t dropped; 28 atomic_t dropped;
27}; 29};
28 30
@@ -87,7 +89,7 @@ static inline int blk_trace_init_sysfs(struct device *dev)
87#ifdef CONFIG_COMPAT 89#ifdef CONFIG_COMPAT
88 90
89struct compat_blk_user_trace_setup { 91struct compat_blk_user_trace_setup {
90 char name[32]; 92 char name[BLKTRACE_BDEV_SIZE];
91 u16 act_mask; 93 u16 act_mask;
92 u32 buf_size; 94 u32 buf_size;
93 u32 buf_nr; 95 u32 buf_nr;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3561d305b1e0..39c1d9469677 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -612,11 +612,6 @@ struct cgroup_subsys {
612 int subsys_id; 612 int subsys_id;
613 int disabled; 613 int disabled;
614 int early_init; 614 int early_init;
615 /*
616 * True if this subsys uses ID. ID is not available before cgroup_init()
617 * (not available in early_init time.)
618 */
619 bool use_id;
620 615
621 /* 616 /*
622 * If %false, this subsystem is properly hierarchical - 617 * If %false, this subsystem is properly hierarchical -
@@ -642,9 +637,6 @@ struct cgroup_subsys {
642 */ 637 */
643 struct cgroupfs_root *root; 638 struct cgroupfs_root *root;
644 struct list_head sibling; 639 struct list_head sibling;
645 /* used when use_id == true */
646 struct idr idr;
647 spinlock_t id_lock;
648 640
649 /* list of cftype_sets */ 641 /* list of cftype_sets */
650 struct list_head cftsets; 642 struct list_head cftsets;
@@ -875,35 +867,6 @@ int css_scan_tasks(struct cgroup_subsys_state *css,
875int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 867int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
876int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 868int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
877 869
878/*
879 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
880 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
881 * CSS ID is assigned at cgroup allocation (create) automatically
882 * and removed when subsys calls free_css_id() function. This is because
883 * the lifetime of cgroup_subsys_state is subsys's matter.
884 *
885 * Looking up and scanning function should be called under rcu_read_lock().
886 * Taking cgroup_mutex is not necessary for following calls.
887 * But the css returned by this routine can be "not populated yet" or "being
888 * destroyed". The caller should check css and cgroup's status.
889 */
890
891/*
892 * Typically Called at ->destroy(), or somewhere the subsys frees
893 * cgroup_subsys_state.
894 */
895void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
896
897/* Find a cgroup_subsys_state which has given ID */
898
899struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
900
901/* Returns true if root is ancestor of cg */
902bool css_is_ancestor(struct cgroup_subsys_state *cg,
903 const struct cgroup_subsys_state *root);
904
905/* Get id and depth of css */
906unsigned short css_id(struct cgroup_subsys_state *css);
907struct cgroup_subsys_state *css_from_dir(struct dentry *dentry, 870struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
908 struct cgroup_subsys *ss); 871 struct cgroup_subsys *ss);
909 872
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 73bdb69f0c08..7e59253b8603 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -472,6 +472,7 @@ void of_clk_del_provider(struct device_node *np);
472struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 472struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
473 void *data); 473 void *data);
474struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); 474struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
475int of_clk_get_parent_count(struct device_node *np);
475const char *of_clk_get_parent_name(struct device_node *np, int index); 476const char *of_clk_get_parent_name(struct device_node *np, int index);
476 477
477void of_clk_init(const struct of_device_id *matches); 478void of_clk_init(const struct of_device_id *matches);
diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h
index 90c30dc3efc7..5138a90e018c 100644
--- a/include/linux/clk/mxs.h
+++ b/include/linux/clk/mxs.h
@@ -9,8 +9,6 @@
9#ifndef __LINUX_CLK_MXS_H 9#ifndef __LINUX_CLK_MXS_H
10#define __LINUX_CLK_MXS_H 10#define __LINUX_CLK_MXS_H
11 11
12int mx23_clocks_init(void);
13int mx28_clocks_init(void);
14int mxs_saif_clkmux_select(unsigned int clkmux); 12int mxs_saif_clkmux_select(unsigned int clkmux);
15 13
16#endif 14#endif
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644
index e074fdd5a236..000000000000
--- a/include/linux/clk/sunxi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright 2012 Maxime Ripard
3 *
4 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __LINUX_CLK_SUNXI_H_
18#define __LINUX_CLK_SUNXI_H_
19
20void __init sunxi_init_clocks(void);
21
22#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0857922e8ad0..493aa021c7a9 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -60,6 +60,7 @@ enum clock_event_mode {
60 * Core shall set the interrupt affinity dynamically in broadcast mode 60 * Core shall set the interrupt affinity dynamically in broadcast mode
61 */ 61 */
62#define CLOCK_EVT_FEAT_DYNIRQ 0x000020 62#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
63#define CLOCK_EVT_FEAT_PERCPU 0x000040
63 64
64/** 65/**
65 * struct clock_event_device - clock event device descriptor 66 * struct clock_event_device - clock event device descriptor
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index dbbf8aa7731b..67301a405712 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -292,6 +292,8 @@ extern void clocksource_resume(void);
292extern struct clocksource * __init __weak clocksource_default_clock(void); 292extern struct clocksource * __init __weak clocksource_default_clock(void);
293extern void clocksource_mark_unstable(struct clocksource *cs); 293extern void clocksource_mark_unstable(struct clocksource *cs);
294 294
295extern u64
296clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask);
295extern void 297extern void
296clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 298clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
297 299
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
index 98e892ef6d5a..a0f9280421ec 100644
--- a/include/linux/cmdline-parser.h
+++ b/include/linux/cmdline-parser.h
@@ -8,6 +8,8 @@
8#define CMDLINEPARSEH 8#define CMDLINEPARSEH
9 9
10#include <linux/blkdev.h> 10#include <linux/blkdev.h>
11#include <linux/fs.h>
12#include <linux/slab.h>
11 13
12/* partition flags */ 14/* partition flags */
13#define PF_RDONLY 0x01 /* Device is read only */ 15#define PF_RDONLY 0x01 /* Device is read only */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 345da00a86e0..eb8a49d75ab3 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -41,14 +41,14 @@
41 COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) 41 COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
42 42
43#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ 43#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
44 asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ 44 asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
45 __attribute__((alias(__stringify(compat_SyS##name)))); \
45 static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ 46 static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
46 asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\ 47 asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
47 asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ 48 asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
48 { \ 49 { \
49 return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ 50 return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
50 } \ 51 } \
51 SYSCALL_ALIAS(compat_sys##name, compat_SyS##name); \
52 static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 52 static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
53 53
54#ifndef compat_user_stack_pointer 54#ifndef compat_user_stack_pointer
@@ -362,7 +362,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
362long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, 362long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
363 unsigned long bitmap_size); 363 unsigned long bitmap_size);
364int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); 364int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
365int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); 365int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
366int get_compat_sigevent(struct sigevent *event, 366int get_compat_sigevent(struct sigevent *event,
367 const struct compat_sigevent __user *u_event); 367 const struct compat_sigevent __user *u_event);
368long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, 368long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 973ce10c40b6..dc1bd3dcf11f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -28,8 +28,6 @@
28 28
29#endif 29#endif
30 30
31#define uninitialized_var(x) x
32
33#ifndef __HAVE_BUILTIN_BSWAP16__ 31#ifndef __HAVE_BUILTIN_BSWAP16__
34/* icc has this, but it's called _bswap16 */ 32/* icc has this, but it's called _bswap16 */
35#define __HAVE_BUILTIN_BSWAP16__ 33#define __HAVE_BUILTIN_BSWAP16__
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 3cd574d5b19e..5d5aaae3af43 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -5,7 +5,7 @@
5 * (C) Copyright 2001 Linus Torvalds 5 * (C) Copyright 2001 Linus Torvalds
6 * 6 *
7 * Atomic wait-for-completion handler data structures. 7 * Atomic wait-for-completion handler data structures.
8 * See kernel/sched/core.c for details. 8 * See kernel/sched/completion.c for details.
9 */ 9 */
10 10
11#include <linux/wait.h> 11#include <linux/wait.h>
@@ -19,8 +19,8 @@
19 * 19 *
20 * See also: complete(), wait_for_completion() (and friends _timeout, 20 * See also: complete(), wait_for_completion() (and friends _timeout,
21 * _interruptible, _interruptible_timeout, and _killable), init_completion(), 21 * _interruptible, _interruptible_timeout, and _killable), init_completion(),
22 * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and 22 * reinit_completion(), and macros DECLARE_COMPLETION(),
23 * INIT_COMPLETION(). 23 * DECLARE_COMPLETION_ONSTACK().
24 */ 24 */
25struct completion { 25struct completion {
26 unsigned int done; 26 unsigned int done;
@@ -65,7 +65,7 @@ struct completion {
65 65
66/** 66/**
67 * init_completion - Initialize a dynamically allocated completion 67 * init_completion - Initialize a dynamically allocated completion
68 * @x: completion structure that is to be initialized 68 * @x: pointer to completion structure that is to be initialized
69 * 69 *
70 * This inline function will initialize a dynamically created completion 70 * This inline function will initialize a dynamically created completion
71 * structure. 71 * structure.
@@ -76,6 +76,18 @@ static inline void init_completion(struct completion *x)
76 init_waitqueue_head(&x->wait); 76 init_waitqueue_head(&x->wait);
77} 77}
78 78
79/**
80 * reinit_completion - reinitialize a completion structure
81 * @x: pointer to completion structure that is to be reinitialized
82 *
83 * This inline function should be used to reinitialize a completion structure so it can
84 * be reused. This is especially important after complete_all() is used.
85 */
86static inline void reinit_completion(struct completion *x)
87{
88 x->done = 0;
89}
90
79extern void wait_for_completion(struct completion *); 91extern void wait_for_completion(struct completion *);
80extern void wait_for_completion_io(struct completion *); 92extern void wait_for_completion_io(struct completion *);
81extern int wait_for_completion_interruptible(struct completion *x); 93extern int wait_for_completion_interruptible(struct completion *x);
@@ -94,14 +106,4 @@ extern bool completion_done(struct completion *x);
94extern void complete(struct completion *); 106extern void complete(struct completion *);
95extern void complete_all(struct completion *); 107extern void complete_all(struct completion *);
96 108
97/**
98 * INIT_COMPLETION - reinitialize a completion structure
99 * @x: completion structure to be reinitialized
100 *
101 * This macro should be used to reinitialize a completion structure so it can
102 * be reused. This is especially important after complete_all() is used.
103 */
104#define INIT_COMPLETION(x) ((x).done = 0)
105
106
107#endif 109#endif
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index a98f1ca60407..d016a121a8c4 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -10,12 +10,14 @@
10 * These are the only things you should do on a core-file: use only these 10 * These are the only things you should do on a core-file: use only these
11 * functions to write out all the necessary info. 11 * functions to write out all the necessary info.
12 */ 12 */
13extern int dump_write(struct file *file, const void *addr, int nr); 13struct coredump_params;
14extern int dump_seek(struct file *file, loff_t off); 14extern int dump_skip(struct coredump_params *cprm, size_t nr);
15extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
16extern int dump_align(struct coredump_params *cprm, int align);
15#ifdef CONFIG_COREDUMP 17#ifdef CONFIG_COREDUMP
16extern void do_coredump(siginfo_t *siginfo); 18extern void do_coredump(const siginfo_t *siginfo);
17#else 19#else
18static inline void do_coredump(siginfo_t *siginfo) {} 20static inline void do_coredump(const siginfo_t *siginfo) {}
19#endif 21#endif
20 22
21#endif /* _LINUX_COREDUMP_H */ 23#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index c23049496531..2fc0ec3d89cc 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -218,8 +218,8 @@ enum {
218#define CPER_PROC_VALID_IP 0x1000 218#define CPER_PROC_VALID_IP 0x1000
219 219
220#define CPER_MEM_VALID_ERROR_STATUS 0x0001 220#define CPER_MEM_VALID_ERROR_STATUS 0x0001
221#define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002 221#define CPER_MEM_VALID_PA 0x0002
222#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004 222#define CPER_MEM_VALID_PA_MASK 0x0004
223#define CPER_MEM_VALID_NODE 0x0008 223#define CPER_MEM_VALID_NODE 0x0008
224#define CPER_MEM_VALID_CARD 0x0010 224#define CPER_MEM_VALID_CARD 0x0010
225#define CPER_MEM_VALID_MODULE 0x0020 225#define CPER_MEM_VALID_MODULE 0x0020
@@ -232,6 +232,9 @@ enum {
232#define CPER_MEM_VALID_RESPONDER_ID 0x1000 232#define CPER_MEM_VALID_RESPONDER_ID 0x1000
233#define CPER_MEM_VALID_TARGET_ID 0x2000 233#define CPER_MEM_VALID_TARGET_ID 0x2000
234#define CPER_MEM_VALID_ERROR_TYPE 0x4000 234#define CPER_MEM_VALID_ERROR_TYPE 0x4000
235#define CPER_MEM_VALID_RANK_NUMBER 0x8000
236#define CPER_MEM_VALID_CARD_HANDLE 0x10000
237#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
235 238
236#define CPER_PCIE_VALID_PORT_TYPE 0x0001 239#define CPER_PCIE_VALID_PORT_TYPE 0x0001
237#define CPER_PCIE_VALID_VERSION 0x0002 240#define CPER_PCIE_VALID_VERSION 0x0002
@@ -347,6 +350,10 @@ struct cper_sec_mem_err {
347 __u64 responder_id; 350 __u64 responder_id;
348 __u64 target_id; 351 __u64 target_id;
349 __u8 error_type; 352 __u8 error_type;
353 __u8 reserved;
354 __u16 rank;
355 __u16 mem_array_handle; /* card handle in UEFI 2.4 */
356 __u16 mem_dev_handle; /* module handle in UEFI 2.4 */
350}; 357};
351 358
352struct cper_sec_pcie { 359struct cper_sec_pcie {
@@ -389,6 +396,6 @@ struct cper_sec_pcie {
389 396
390u64 cper_next_record_id(void); 397u64 cper_next_record_id(void);
391void cper_print_bits(const char *prefix, unsigned int bits, 398void cper_print_bits(const char *prefix, unsigned int bits,
392 const char *strs[], unsigned int strs_size); 399 const char * const strs[], unsigned int strs_size);
393 400
394#endif 401#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 801ff9e73679..03e235ad1bba 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
18#include <linux/cpumask.h> 18#include <linux/cpumask.h>
19 19
20struct device; 20struct device;
21struct device_node;
21 22
22struct cpu { 23struct cpu {
23 int node_id; /* The node which contains the CPU */ 24 int node_id; /* The node which contains the CPU */
@@ -29,6 +30,8 @@ extern int register_cpu(struct cpu *cpu, int num);
29extern struct device *get_cpu_device(unsigned cpu); 30extern struct device *get_cpu_device(unsigned cpu);
30extern bool cpu_is_hotpluggable(unsigned cpu); 31extern bool cpu_is_hotpluggable(unsigned cpu);
31extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id); 32extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
33extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
34 int cpu, unsigned int *thread);
32 35
33extern int cpu_add_dev_attr(struct device_attribute *attr); 36extern int cpu_add_dev_attr(struct device_attribute *attr);
34extern void cpu_remove_dev_attr(struct device_attribute *attr); 37extern void cpu_remove_dev_attr(struct device_attribute *attr);
@@ -185,19 +188,6 @@ extern void cpu_hotplug_enable(void);
185void clear_tasks_mm_cpumask(int cpu); 188void clear_tasks_mm_cpumask(int cpu);
186int cpu_down(unsigned int cpu); 189int cpu_down(unsigned int cpu);
187 190
188#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
189extern void cpu_hotplug_driver_lock(void);
190extern void cpu_hotplug_driver_unlock(void);
191#else
192static inline void cpu_hotplug_driver_lock(void)
193{
194}
195
196static inline void cpu_hotplug_driver_unlock(void)
197{
198}
199#endif
200
201#else /* CONFIG_HOTPLUG_CPU */ 191#else /* CONFIG_HOTPLUG_CPU */
202 192
203static inline void cpu_hotplug_begin(void) {} 193static inline void cpu_hotplug_begin(void) {}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index fcabc42d66ab..dc196bbcf227 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -85,6 +85,20 @@ struct cpufreq_policy {
85 struct list_head policy_list; 85 struct list_head policy_list;
86 struct kobject kobj; 86 struct kobject kobj;
87 struct completion kobj_unregister; 87 struct completion kobj_unregister;
88
89 /*
90 * The rules for this semaphore:
91 * - Any routine that wants to read from the policy structure will
92 * do a down_read on this semaphore.
93 * - Any routine that will write to the policy structure and/or may take away
94 * the policy altogether (eg. CPU hotplug), will hold this lock in write
95 * mode before doing so.
96 *
97 * Additional rules:
98 * - Lock should not be held across
99 * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
100 */
101 struct rw_semaphore rwsem;
88}; 102};
89 103
90/* Only for ACPI */ 104/* Only for ACPI */
@@ -93,8 +107,16 @@ struct cpufreq_policy {
93#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ 107#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
94#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ 108#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
95 109
110#ifdef CONFIG_CPU_FREQ
96struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); 111struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
97void cpufreq_cpu_put(struct cpufreq_policy *policy); 112void cpufreq_cpu_put(struct cpufreq_policy *policy);
113#else
114static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
115{
116 return NULL;
117}
118static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
119#endif
98 120
99static inline bool policy_is_shared(struct cpufreq_policy *policy) 121static inline bool policy_is_shared(struct cpufreq_policy *policy)
100{ 122{
@@ -180,13 +202,6 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
180struct cpufreq_driver { 202struct cpufreq_driver {
181 char name[CPUFREQ_NAME_LEN]; 203 char name[CPUFREQ_NAME_LEN];
182 u8 flags; 204 u8 flags;
183 /*
184 * This should be set by platforms having multiple clock-domains, i.e.
185 * supporting multiple policies. With this sysfs directories of governor
186 * would be created in cpu/cpu<num>/cpufreq/ directory and so they can
187 * use the same governor with different tunables for different clusters.
188 */
189 bool have_governor_per_policy;
190 205
191 /* needed by all drivers */ 206 /* needed by all drivers */
192 int (*init) (struct cpufreq_policy *policy); 207 int (*init) (struct cpufreq_policy *policy);
@@ -194,9 +209,11 @@ struct cpufreq_driver {
194 209
195 /* define one out of two */ 210 /* define one out of two */
196 int (*setpolicy) (struct cpufreq_policy *policy); 211 int (*setpolicy) (struct cpufreq_policy *policy);
197 int (*target) (struct cpufreq_policy *policy, 212 int (*target) (struct cpufreq_policy *policy, /* Deprecated */
198 unsigned int target_freq, 213 unsigned int target_freq,
199 unsigned int relation); 214 unsigned int relation);
215 int (*target_index) (struct cpufreq_policy *policy,
216 unsigned int index);
200 217
201 /* should be defined, if possible */ 218 /* should be defined, if possible */
202 unsigned int (*get) (unsigned int cpu); 219 unsigned int (*get) (unsigned int cpu);
@@ -211,13 +228,29 @@ struct cpufreq_driver {
211}; 228};
212 229
213/* flags */ 230/* flags */
214#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if 231#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
215 * all ->init() calls failed */ 232 all ->init() calls failed */
216#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel 233#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
217 * "constants" aren't affected by 234 kernel "constants" aren't
218 * frequency transitions */ 235 affected by frequency
219#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed 236 transitions */
220 * mismatches */ 237#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
238 speed mismatches */
239
240/*
241 * This should be set by platforms having multiple clock-domains, i.e.
242 * supporting multiple policies. With this sysfs directories of governor would
243 * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
244 * governor with different tunables for different clusters.
245 */
246#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
247
248/*
249 * Driver will do POSTCHANGE notifications from outside of their ->target()
250 * routine and so must set cpufreq_driver->flags with this flag, so that core
251 * can handle them specially.
252 */
253#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
221 254
222int cpufreq_register_driver(struct cpufreq_driver *driver_data); 255int cpufreq_register_driver(struct cpufreq_driver *driver_data);
223int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); 256int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -240,6 +273,13 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
240 return; 273 return;
241} 274}
242 275
276static inline void
277cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
278{
279 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
280 policy->cpuinfo.max_freq);
281}
282
243/********************************************************************* 283/*********************************************************************
244 * CPUFREQ NOTIFIER INTERFACE * 284 * CPUFREQ NOTIFIER INTERFACE *
245 *********************************************************************/ 285 *********************************************************************/
@@ -392,6 +432,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
392 432
393int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, 433int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
394 struct cpufreq_frequency_table *table); 434 struct cpufreq_frequency_table *table);
435int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
395 436
396int cpufreq_frequency_table_target(struct cpufreq_policy *policy, 437int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
397 struct cpufreq_frequency_table *table, 438 struct cpufreq_frequency_table *table,
@@ -407,8 +448,20 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
407 448
408/* the following are really really optional */ 449/* the following are really really optional */
409extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 450extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
451extern struct freq_attr *cpufreq_generic_attr[];
410void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, 452void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
411 unsigned int cpu); 453 unsigned int cpu);
412void cpufreq_frequency_table_put_attr(unsigned int cpu); 454void cpufreq_frequency_table_put_attr(unsigned int cpu);
455int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
456 struct cpufreq_frequency_table *table);
457
458int cpufreq_generic_init(struct cpufreq_policy *policy,
459 struct cpufreq_frequency_table *table,
460 unsigned int transition_latency);
461static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
462{
463 cpufreq_frequency_table_put_attr(policy->cpu);
464 return 0;
465}
413 466
414#endif /* _LINUX_CPUFREQ_H */ 467#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 781addc66f03..50fcbb0ac4e7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -114,7 +114,7 @@ struct cpuidle_driver {
114 int safe_state_index; 114 int safe_state_index;
115 115
116 /* the driver handles the cpus in cpumask */ 116 /* the driver handles the cpus in cpumask */
117 struct cpumask *cpumask; 117 struct cpumask *cpumask;
118}; 118};
119 119
120#ifdef CONFIG_CPU_IDLE 120#ifdef CONFIG_CPU_IDLE
@@ -195,16 +195,10 @@ struct cpuidle_governor {
195}; 195};
196 196
197#ifdef CONFIG_CPU_IDLE 197#ifdef CONFIG_CPU_IDLE
198
199extern int cpuidle_register_governor(struct cpuidle_governor *gov); 198extern int cpuidle_register_governor(struct cpuidle_governor *gov);
200extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
201
202#else 199#else
203
204static inline int cpuidle_register_governor(struct cpuidle_governor *gov) 200static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
205{return 0;} 201{return 0;}
206static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
207
208#endif 202#endif
209 203
210#ifdef CONFIG_ARCH_HAS_CPU_RELAX 204#ifdef CONFIG_ARCH_HAS_CPU_RELAX
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index cc1b01cf2035..3fe661fe96d1 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -110,10 +110,14 @@ static inline bool put_mems_allowed(unsigned int seq)
110 110
111static inline void set_mems_allowed(nodemask_t nodemask) 111static inline void set_mems_allowed(nodemask_t nodemask)
112{ 112{
113 unsigned long flags;
114
113 task_lock(current); 115 task_lock(current);
116 local_irq_save(flags);
114 write_seqcount_begin(&current->mems_allowed_seq); 117 write_seqcount_begin(&current->mems_allowed_seq);
115 current->mems_allowed = nodemask; 118 current->mems_allowed = nodemask;
116 write_seqcount_end(&current->mems_allowed_seq); 119 write_seqcount_end(&current->mems_allowed_seq);
120 local_irq_restore(flags);
117 task_unlock(current); 121 task_unlock(current);
118} 122}
119 123
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index fe68a5a98583..7032518f8542 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -6,6 +6,8 @@
6#include <linux/proc_fs.h> 6#include <linux/proc_fs.h>
7#include <linux/elf.h> 7#include <linux/elf.h>
8 8
9#include <asm/pgtable.h> /* for pgprot_t */
10
9#define ELFCORE_ADDR_MAX (-1ULL) 11#define ELFCORE_ADDR_MAX (-1ULL)
10#define ELFCORE_ADDR_ERR (-2ULL) 12#define ELFCORE_ADDR_ERR (-2ULL)
11 13
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 68267b64bb98..7d275c4fc011 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -11,8 +11,48 @@
11extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len); 11extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
12extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); 12extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
13 13
14/**
15 * crc32_le_combine - Combine two crc32 check values into one. For two
16 * sequences of bytes, seq1 and seq2 with lengths len1
17 * and len2, crc32_le() check values were calculated
18 * for each, crc1 and crc2.
19 *
20 * @crc1: crc32 of the first block
21 * @crc2: crc32 of the second block
22 * @len2: length of the second block
23 *
24 * Return: The crc32_le() check value of seq1 and seq2 concatenated,
25 * requiring only crc1, crc2, and len2. Note: If seq_full denotes
26 * the concatenated memory area of seq1 with seq2, and crc_full
27 * the crc32_le() value of seq_full, then crc_full ==
28 * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
29 * with the same initializer as crc1, and crc2 seed was 0. See
30 * also crc32_combine_test().
31 */
32extern u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2);
33
14extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len); 34extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len);
15 35
36/**
37 * __crc32c_le_combine - Combine two crc32c check values into one. For two
38 * sequences of bytes, seq1 and seq2 with lengths len1
39 * and len2, __crc32c_le() check values were calculated
40 * for each, crc1 and crc2.
41 *
42 * @crc1: crc32c of the first block
43 * @crc2: crc32c of the second block
44 * @len2: length of the second block
45 *
46 * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
47 * requiring only crc1, crc2, and len2. Note: If seq_full denotes
48 * the concatenated memory area of seq1 with seq2, and crc_full
49 * the __crc32c_le() value of seq_full, then crc_full ==
50 * __crc32c_le_combine(crc1, crc2, len2) when crc_full was
51 * seeded with the same initializer as crc1, and crc2 seed
52 * was 0. See also crc32c_combine_test().
53 */
54extern u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2);
55
16#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) 56#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
17 57
18/* 58/*
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 59066e0b4ff1..bf72e9ac6de0 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -29,8 +29,10 @@ struct vfsmount;
29/* The hash is always the low bits of hash_len */ 29/* The hash is always the low bits of hash_len */
30#ifdef __LITTLE_ENDIAN 30#ifdef __LITTLE_ENDIAN
31 #define HASH_LEN_DECLARE u32 hash; u32 len; 31 #define HASH_LEN_DECLARE u32 hash; u32 len;
32 #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
32#else 33#else
33 #define HASH_LEN_DECLARE u32 len; u32 hash; 34 #define HASH_LEN_DECLARE u32 len; u32 hash;
35 #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
34#endif 36#endif
35 37
36/* 38/*
@@ -169,13 +171,13 @@ struct dentry_operations {
169 */ 171 */
170 172
171/* d_flags entries */ 173/* d_flags entries */
172#define DCACHE_OP_HASH 0x0001 174#define DCACHE_OP_HASH 0x00000001
173#define DCACHE_OP_COMPARE 0x0002 175#define DCACHE_OP_COMPARE 0x00000002
174#define DCACHE_OP_REVALIDATE 0x0004 176#define DCACHE_OP_REVALIDATE 0x00000004
175#define DCACHE_OP_DELETE 0x0008 177#define DCACHE_OP_DELETE 0x00000008
176#define DCACHE_OP_PRUNE 0x0010 178#define DCACHE_OP_PRUNE 0x00000010
177 179
178#define DCACHE_DISCONNECTED 0x0020 180#define DCACHE_DISCONNECTED 0x00000020
179 /* This dentry is possibly not currently connected to the dcache tree, in 181 /* This dentry is possibly not currently connected to the dcache tree, in
180 * which case its parent will either be itself, or will have this flag as 182 * which case its parent will either be itself, or will have this flag as
181 * well. nfsd will not use a dentry with this bit set, but will first 183 * well. nfsd will not use a dentry with this bit set, but will first
@@ -186,30 +188,38 @@ struct dentry_operations {
186 * dentry into place and return that dentry rather than the passed one, 188 * dentry into place and return that dentry rather than the passed one,
187 * typically using d_splice_alias. */ 189 * typically using d_splice_alias. */
188 190
189#define DCACHE_REFERENCED 0x0040 /* Recently used, don't discard. */ 191#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
190#define DCACHE_RCUACCESS 0x0080 /* Entry has ever been RCU-visible */ 192#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
191 193
192#define DCACHE_CANT_MOUNT 0x0100 194#define DCACHE_CANT_MOUNT 0x00000100
193#define DCACHE_GENOCIDE 0x0200 195#define DCACHE_GENOCIDE 0x00000200
194#define DCACHE_SHRINK_LIST 0x0400 196#define DCACHE_SHRINK_LIST 0x00000400
195 197
196#define DCACHE_OP_WEAK_REVALIDATE 0x0800 198#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
197 199
198#define DCACHE_NFSFS_RENAMED 0x1000 200#define DCACHE_NFSFS_RENAMED 0x00001000
199 /* this dentry has been "silly renamed" and has to be deleted on the last 201 /* this dentry has been "silly renamed" and has to be deleted on the last
200 * dput() */ 202 * dput() */
201#define DCACHE_COOKIE 0x2000 /* For use by dcookie subsystem */ 203#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
202#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x4000 204#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
203 /* Parent inode is watched by some fsnotify listener */ 205 /* Parent inode is watched by some fsnotify listener */
204 206
205#define DCACHE_MOUNTED 0x10000 /* is a mountpoint */ 207#define DCACHE_DENTRY_KILLED 0x00008000
206#define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */ 208
207#define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */ 209#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
210#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
211#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
208#define DCACHE_MANAGED_DENTRY \ 212#define DCACHE_MANAGED_DENTRY \
209 (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) 213 (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
210 214
211#define DCACHE_LRU_LIST 0x80000 215#define DCACHE_LRU_LIST 0x00080000
212#define DCACHE_DENTRY_KILLED 0x100000 216
217#define DCACHE_ENTRY_TYPE 0x00700000
218#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry */
219#define DCACHE_DIRECTORY_TYPE 0x00100000 /* Normal directory */
220#define DCACHE_AUTODIR_TYPE 0x00200000 /* Lookupless directory (presumed automount) */
221#define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */
222#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */
213 223
214extern seqlock_t rename_lock; 224extern seqlock_t rename_lock;
215 225
@@ -224,6 +234,7 @@ static inline int dname_external(const struct dentry *dentry)
224extern void d_instantiate(struct dentry *, struct inode *); 234extern void d_instantiate(struct dentry *, struct inode *);
225extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 235extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
226extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); 236extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
237extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
227extern void __d_drop(struct dentry *dentry); 238extern void __d_drop(struct dentry *dentry);
228extern void d_drop(struct dentry *dentry); 239extern void d_drop(struct dentry *dentry);
229extern void d_delete(struct dentry *); 240extern void d_delete(struct dentry *);
@@ -393,6 +404,61 @@ static inline bool d_mountpoint(const struct dentry *dentry)
393 return dentry->d_flags & DCACHE_MOUNTED; 404 return dentry->d_flags & DCACHE_MOUNTED;
394} 405}
395 406
407/*
408 * Directory cache entry type accessor functions.
409 */
410static inline void __d_set_type(struct dentry *dentry, unsigned type)
411{
412 dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
413}
414
415static inline void __d_clear_type(struct dentry *dentry)
416{
417 __d_set_type(dentry, DCACHE_MISS_TYPE);
418}
419
420static inline void d_set_type(struct dentry *dentry, unsigned type)
421{
422 spin_lock(&dentry->d_lock);
423 __d_set_type(dentry, type);
424 spin_unlock(&dentry->d_lock);
425}
426
427static inline unsigned __d_entry_type(const struct dentry *dentry)
428{
429 return dentry->d_flags & DCACHE_ENTRY_TYPE;
430}
431
432static inline bool d_is_directory(const struct dentry *dentry)
433{
434 return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
435}
436
437static inline bool d_is_autodir(const struct dentry *dentry)
438{
439 return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE;
440}
441
442static inline bool d_is_symlink(const struct dentry *dentry)
443{
444 return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
445}
446
447static inline bool d_is_file(const struct dentry *dentry)
448{
449 return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
450}
451
452static inline bool d_is_negative(const struct dentry *dentry)
453{
454 return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
455}
456
457static inline bool d_is_positive(const struct dentry *dentry)
458{
459 return !d_is_negative(dentry);
460}
461
396extern int sysctl_vfs_cache_pressure; 462extern int sysctl_vfs_cache_pressure;
397 463
398static inline unsigned long vfs_pressure_ratio(unsigned long val) 464static inline unsigned long vfs_pressure_ratio(unsigned long val)
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 263489d0788d..4d0b4d1aa132 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -206,6 +206,12 @@ static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mod
206 return ERR_PTR(-ENODEV); 206 return ERR_PTR(-ENODEV);
207} 207}
208 208
209static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
210 struct dentry *parent, atomic_t *value)
211{
212 return ERR_PTR(-ENODEV);
213}
214
209static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, 215static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
210 struct dentry *parent, 216 struct dentry *parent,
211 u32 *value) 217 u32 *value)
@@ -227,6 +233,12 @@ static inline struct dentry *debugfs_create_regset32(const char *name,
227 return ERR_PTR(-ENODEV); 233 return ERR_PTR(-ENODEV);
228} 234}
229 235
236static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
237 int nregs, void __iomem *base, char *prefix)
238{
239 return 0;
240}
241
230static inline bool debugfs_initialized(void) 242static inline bool debugfs_initialized(void)
231{ 243{
232 return false; 244 return false;
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 5f1ab92107e6..d48dc00232a4 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -15,7 +15,7 @@
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/opp.h> 18#include <linux/pm_opp.h>
19 19
20#define DEVFREQ_NAME_LEN 16 20#define DEVFREQ_NAME_LEN 16
21 21
@@ -168,7 +168,7 @@ struct devfreq {
168 unsigned long max_freq; 168 unsigned long max_freq;
169 bool stop_polling; 169 bool stop_polling;
170 170
171 /* information for device freqeuncy transition */ 171 /* information for device frequency transition */
172 unsigned int total_trans; 172 unsigned int total_trans;
173 unsigned int *trans_table; 173 unsigned int *trans_table;
174 unsigned long *time_in_state; 174 unsigned long *time_in_state;
@@ -187,7 +187,7 @@ extern int devfreq_suspend_device(struct devfreq *devfreq);
187extern int devfreq_resume_device(struct devfreq *devfreq); 187extern int devfreq_resume_device(struct devfreq *devfreq);
188 188
189/* Helper functions for devfreq user device driver with OPP. */ 189/* Helper functions for devfreq user device driver with OPP. */
190extern struct opp *devfreq_recommended_opp(struct device *dev, 190extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
191 unsigned long *freq, u32 flags); 191 unsigned long *freq, u32 flags);
192extern int devfreq_register_opp_notifier(struct device *dev, 192extern int devfreq_register_opp_notifier(struct device *dev,
193 struct devfreq *devfreq); 193 struct devfreq *devfreq);
@@ -238,7 +238,7 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
238 return 0; 238 return 0;
239} 239}
240 240
241static inline struct opp *devfreq_recommended_opp(struct device *dev, 241static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
242 unsigned long *freq, u32 flags) 242 unsigned long *freq, u32 flags)
243{ 243{
244 return ERR_PTR(-EINVAL); 244 return ERR_PTR(-EINVAL);
diff --git a/include/linux/device.h b/include/linux/device.h
index 2a9d6ed59579..952b01033c32 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -26,6 +26,7 @@
26#include <linux/atomic.h> 26#include <linux/atomic.h>
27#include <linux/ratelimit.h> 27#include <linux/ratelimit.h>
28#include <linux/uidgid.h> 28#include <linux/uidgid.h>
29#include <linux/gfp.h>
29#include <asm/device.h> 30#include <asm/device.h>
30 31
31struct device; 32struct device;
@@ -63,9 +64,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
63 * @name: The name of the bus. 64 * @name: The name of the bus.
64 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). 65 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
65 * @dev_root: Default device to use as the parent. 66 * @dev_root: Default device to use as the parent.
66 * @bus_attrs: Default attributes of the bus.
67 * @dev_attrs: Default attributes of the devices on the bus. 67 * @dev_attrs: Default attributes of the devices on the bus.
68 * @drv_attrs: Default attributes of the device drivers on the bus.
69 * @bus_groups: Default attributes of the bus. 68 * @bus_groups: Default attributes of the bus.
70 * @dev_groups: Default attributes of the devices on the bus. 69 * @dev_groups: Default attributes of the devices on the bus.
71 * @drv_groups: Default attributes of the device drivers on the bus. 70 * @drv_groups: Default attributes of the device drivers on the bus.
@@ -106,9 +105,7 @@ struct bus_type {
106 const char *name; 105 const char *name;
107 const char *dev_name; 106 const char *dev_name;
108 struct device *dev_root; 107 struct device *dev_root;
109 struct bus_attribute *bus_attrs; /* use bus_groups instead */
110 struct device_attribute *dev_attrs; /* use dev_groups instead */ 108 struct device_attribute *dev_attrs; /* use dev_groups instead */
111 struct driver_attribute *drv_attrs; /* use drv_groups instead */
112 const struct attribute_group **bus_groups; 109 const struct attribute_group **bus_groups;
113 const struct attribute_group **dev_groups; 110 const struct attribute_group **dev_groups;
114 const struct attribute_group **drv_groups; 111 const struct attribute_group **drv_groups;
@@ -329,8 +326,6 @@ int subsys_virtual_register(struct bus_type *subsys,
329 * @owner: The module owner. 326 * @owner: The module owner.
330 * @class_attrs: Default attributes of this class. 327 * @class_attrs: Default attributes of this class.
331 * @dev_groups: Default attributes of the devices that belong to the class. 328 * @dev_groups: Default attributes of the devices that belong to the class.
332 * @dev_attrs: Default attributes of the devices belong to the class.
333 * @dev_bin_attrs: Default binary attributes of the devices belong to the class.
334 * @dev_kobj: The kobject that represents this class and links it into the hierarchy. 329 * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
335 * @dev_uevent: Called when a device is added, removed from this class, or a 330 * @dev_uevent: Called when a device is added, removed from this class, or a
336 * few other things that generate uevents to add the environment 331 * few other things that generate uevents to add the environment
@@ -358,9 +353,7 @@ struct class {
358 struct module *owner; 353 struct module *owner;
359 354
360 struct class_attribute *class_attrs; 355 struct class_attribute *class_attrs;
361 struct device_attribute *dev_attrs; /* use dev_groups instead */
362 const struct attribute_group **dev_groups; 356 const struct attribute_group **dev_groups;
363 struct bin_attribute *dev_bin_attrs;
364 struct kobject *dev_kobj; 357 struct kobject *dev_kobj;
365 358
366 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); 359 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
@@ -427,8 +420,6 @@ struct class_attribute {
427 char *buf); 420 char *buf);
428 ssize_t (*store)(struct class *class, struct class_attribute *attr, 421 ssize_t (*store)(struct class *class, struct class_attribute *attr,
429 const char *buf, size_t count); 422 const char *buf, size_t count);
430 const void *(*namespace)(struct class *class,
431 const struct class_attribute *attr);
432}; 423};
433 424
434#define CLASS_ATTR(_name, _mode, _show, _store) \ 425#define CLASS_ATTR(_name, _mode, _show, _store) \
@@ -438,10 +429,24 @@ struct class_attribute {
438#define CLASS_ATTR_RO(_name) \ 429#define CLASS_ATTR_RO(_name) \
439 struct class_attribute class_attr_##_name = __ATTR_RO(_name) 430 struct class_attribute class_attr_##_name = __ATTR_RO(_name)
440 431
441extern int __must_check class_create_file(struct class *class, 432extern int __must_check class_create_file_ns(struct class *class,
442 const struct class_attribute *attr); 433 const struct class_attribute *attr,
443extern void class_remove_file(struct class *class, 434 const void *ns);
444 const struct class_attribute *attr); 435extern void class_remove_file_ns(struct class *class,
436 const struct class_attribute *attr,
437 const void *ns);
438
439static inline int __must_check class_create_file(struct class *class,
440 const struct class_attribute *attr)
441{
442 return class_create_file_ns(class, attr, NULL);
443}
444
445static inline void class_remove_file(struct class *class,
446 const struct class_attribute *attr)
447{
448 return class_remove_file_ns(class, attr, NULL);
449}
445 450
446/* Simple class attribute that is just a static string */ 451/* Simple class attribute that is just a static string */
447struct class_attribute_string { 452struct class_attribute_string {
@@ -602,8 +607,24 @@ extern void devres_close_group(struct device *dev, void *id);
602extern void devres_remove_group(struct device *dev, void *id); 607extern void devres_remove_group(struct device *dev, void *id);
603extern int devres_release_group(struct device *dev, void *id); 608extern int devres_release_group(struct device *dev, void *id);
604 609
605/* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */ 610/* managed devm_k.alloc/kfree for device drivers */
606extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); 611extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
612static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
613{
614 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
615}
616static inline void *devm_kmalloc_array(struct device *dev,
617 size_t n, size_t size, gfp_t flags)
618{
619 if (size != 0 && n > SIZE_MAX / size)
620 return NULL;
621 return devm_kmalloc(dev, n * size, flags);
622}
623static inline void *devm_kcalloc(struct device *dev,
624 size_t n, size_t size, gfp_t flags)
625{
626 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
627}
607extern void devm_kfree(struct device *dev, void *p); 628extern void devm_kfree(struct device *dev, void *p);
608 629
609void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); 630void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
@@ -623,9 +644,11 @@ struct device_dma_parameters {
623 unsigned long segment_boundary_mask; 644 unsigned long segment_boundary_mask;
624}; 645};
625 646
647struct acpi_device;
648
626struct acpi_dev_node { 649struct acpi_dev_node {
627#ifdef CONFIG_ACPI 650#ifdef CONFIG_ACPI
628 void *handle; 651 struct acpi_device *companion;
629#endif 652#endif
630}; 653};
631 654
@@ -769,14 +792,6 @@ static inline struct device *kobj_to_dev(struct kobject *kobj)
769 return container_of(kobj, struct device, kobj); 792 return container_of(kobj, struct device, kobj);
770} 793}
771 794
772#ifdef CONFIG_ACPI
773#define ACPI_HANDLE(dev) ((dev)->acpi_node.handle)
774#define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_)
775#else
776#define ACPI_HANDLE(dev) (NULL)
777#define ACPI_HANDLE_SET(dev, _handle_) do { } while (0)
778#endif
779
780/* Get the wakeup routines, which depend on struct device */ 795/* Get the wakeup routines, which depend on struct device */
781#include <linux/pm_wakeup.h> 796#include <linux/pm_wakeup.h>
782 797
@@ -1149,16 +1164,15 @@ do { \
1149#endif 1164#endif
1150 1165
1151/* 1166/*
1152 * dev_WARN*() acts like dev_printk(), but with the key difference 1167 * dev_WARN*() acts like dev_printk(), but with the key difference of
1153 * of using a WARN/WARN_ON to get the message out, including the 1168 * using WARN/WARN_ONCE to include file/line information and a backtrace.
1154 * file/line information and a backtrace.
1155 */ 1169 */
1156#define dev_WARN(dev, format, arg...) \ 1170#define dev_WARN(dev, format, arg...) \
1157 WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); 1171 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
1158 1172
1159#define dev_WARN_ONCE(dev, condition, format, arg...) \ 1173#define dev_WARN_ONCE(dev, condition, format, arg...) \
1160 WARN_ONCE(condition, "Device %s\n" format, \ 1174 WARN_ONCE(condition, "%s %s: " format, \
1161 dev_driver_string(dev), ## arg) 1175 dev_driver_string(dev), dev_name(dev), ## arg)
1162 1176
1163/* Create alias, so I can be autoloaded. */ 1177/* Create alias, so I can be autoloaded. */
1164#define MODULE_ALIAS_CHARDEV(major,minor) \ 1178#define MODULE_ALIAS_CHARDEV(major,minor) \
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3a8d0a2af607..fd4aee29ad10 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,30 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
97} 97}
98#endif 98#endif
99 99
100/*
101 * Set both the DMA mask and the coherent DMA mask to the same thing.
102 * Note that we don't check the return value from dma_set_coherent_mask()
103 * as the DMA API guarantees that the coherent DMA mask can be set to
104 * the same or smaller than the streaming DMA mask.
105 */
106static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
107{
108 int rc = dma_set_mask(dev, mask);
109 if (rc == 0)
110 dma_set_coherent_mask(dev, mask);
111 return rc;
112}
113
114/*
115 * Similar to the above, except it deals with the case where the device
116 * does not have dev->dma_mask appropriately setup.
117 */
118static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
119{
120 dev->dma_mask = &dev->coherent_dma_mask;
121 return dma_set_mask_and_coherent(dev, mask);
122}
123
100extern u64 dma_get_required_mask(struct device *dev); 124extern u64 dma_get_required_mask(struct device *dev);
101 125
102static inline unsigned int dma_get_max_seg_size(struct device *dev) 126static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
129 return -EIO; 153 return -EIO;
130} 154}
131 155
156#ifndef dma_max_pfn
157static inline unsigned long dma_max_pfn(struct device *dev)
158{
159 return *dev->dma_mask >> PAGE_SHIFT;
160}
161#endif
162
132static inline void *dma_zalloc_coherent(struct device *dev, size_t size, 163static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
133 dma_addr_t *dma_handle, gfp_t flag) 164 dma_addr_t *dma_handle, gfp_t flag)
134{ 165{
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..41cf0c399288 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
45 45
46/** 46/**
47 * enum dma_status - DMA transaction status 47 * enum dma_status - DMA transaction status
48 * @DMA_SUCCESS: transaction completed successfully 48 * @DMA_COMPLETE: transaction completed
49 * @DMA_IN_PROGRESS: transaction not yet processed 49 * @DMA_IN_PROGRESS: transaction not yet processed
50 * @DMA_PAUSED: transaction is paused 50 * @DMA_PAUSED: transaction is paused
51 * @DMA_ERROR: transaction failed 51 * @DMA_ERROR: transaction failed
52 */ 52 */
53enum dma_status { 53enum dma_status {
54 DMA_SUCCESS, 54 DMA_COMPLETE,
55 DMA_IN_PROGRESS, 55 DMA_IN_PROGRESS,
56 DMA_PAUSED, 56 DMA_PAUSED,
57 DMA_ERROR, 57 DMA_ERROR,
@@ -171,12 +171,6 @@ struct dma_interleaved_template {
171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client 171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
172 * acknowledges receipt, i.e. has has a chance to establish any dependency 172 * acknowledges receipt, i.e. has has a chance to establish any dependency
173 * chains 173 * chains
174 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
175 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
176 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
177 * (if not set, do the source dma-unmapping as page)
178 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
179 * (if not set, do the destination dma-unmapping as page)
180 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q 174 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
181 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P 175 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
182 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as 176 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -188,14 +182,10 @@ struct dma_interleaved_template {
188enum dma_ctrl_flags { 182enum dma_ctrl_flags {
189 DMA_PREP_INTERRUPT = (1 << 0), 183 DMA_PREP_INTERRUPT = (1 << 0),
190 DMA_CTRL_ACK = (1 << 1), 184 DMA_CTRL_ACK = (1 << 1),
191 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), 185 DMA_PREP_PQ_DISABLE_P = (1 << 2),
192 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 186 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
193 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 187 DMA_PREP_CONTINUE = (1 << 4),
194 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 188 DMA_PREP_FENCE = (1 << 5),
195 DMA_PREP_PQ_DISABLE_P = (1 << 6),
196 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
197 DMA_PREP_CONTINUE = (1 << 8),
198 DMA_PREP_FENCE = (1 << 9),
199}; 189};
200 190
201/** 191/**
@@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref);
413typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 403typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
414 404
415typedef void (*dma_async_tx_callback)(void *dma_async_param); 405typedef void (*dma_async_tx_callback)(void *dma_async_param);
406
407struct dmaengine_unmap_data {
408 u8 to_cnt;
409 u8 from_cnt;
410 u8 bidi_cnt;
411 struct device *dev;
412 struct kref kref;
413 size_t len;
414 dma_addr_t addr[0];
415};
416
416/** 417/**
417 * struct dma_async_tx_descriptor - async transaction descriptor 418 * struct dma_async_tx_descriptor - async transaction descriptor
418 * ---dma generic offload fields--- 419 * ---dma generic offload fields---
@@ -438,6 +439,7 @@ struct dma_async_tx_descriptor {
438 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 439 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
439 dma_async_tx_callback callback; 440 dma_async_tx_callback callback;
440 void *callback_param; 441 void *callback_param;
442 struct dmaengine_unmap_data *unmap;
441#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 443#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
442 struct dma_async_tx_descriptor *next; 444 struct dma_async_tx_descriptor *next;
443 struct dma_async_tx_descriptor *parent; 445 struct dma_async_tx_descriptor *parent;
@@ -445,6 +447,40 @@ struct dma_async_tx_descriptor {
445#endif 447#endif
446}; 448};
447 449
450#ifdef CONFIG_DMA_ENGINE
451static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
452 struct dmaengine_unmap_data *unmap)
453{
454 kref_get(&unmap->kref);
455 tx->unmap = unmap;
456}
457
458struct dmaengine_unmap_data *
459dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
460void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
461#else
462static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
463 struct dmaengine_unmap_data *unmap)
464{
465}
466static inline struct dmaengine_unmap_data *
467dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
468{
469 return NULL;
470}
471static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
472{
473}
474#endif
475
476static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
477{
478 if (tx->unmap) {
479 dmaengine_unmap_put(tx->unmap);
480 tx->unmap = NULL;
481 }
482}
483
448#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 484#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
449static inline void txd_lock(struct dma_async_tx_descriptor *txd) 485static inline void txd_lock(struct dma_async_tx_descriptor *txd)
450{ 486{
@@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
979{ 1015{
980 if (last_complete <= last_used) { 1016 if (last_complete <= last_used) {
981 if ((cookie <= last_complete) || (cookie > last_used)) 1017 if ((cookie <= last_complete) || (cookie > last_used))
982 return DMA_SUCCESS; 1018 return DMA_COMPLETE;
983 } else { 1019 } else {
984 if ((cookie <= last_complete) && (cookie > last_used)) 1020 if ((cookie <= last_complete) && (cookie > last_used))
985 return DMA_SUCCESS; 1021 return DMA_COMPLETE;
986 } 1022 }
987 return DMA_IN_PROGRESS; 1023 return DMA_IN_PROGRESS;
988} 1024}
@@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
1013} 1049}
1014static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 1050static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1015{ 1051{
1016 return DMA_SUCCESS; 1052 return DMA_COMPLETE;
1017} 1053}
1018static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1054static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1019{ 1055{
1020 return DMA_SUCCESS; 1056 return DMA_COMPLETE;
1021} 1057}
1022static inline void dma_issue_pending_all(void) 1058static inline void dma_issue_pending_all(void)
1023{ 1059{
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index b6eb7a05d58e..f820f0a336c9 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -99,6 +99,7 @@ extern const char * dmi_get_system_info(int field);
99extern const struct dmi_device * dmi_find_device(int type, const char *name, 99extern const struct dmi_device * dmi_find_device(int type, const char *name,
100 const struct dmi_device *from); 100 const struct dmi_device *from);
101extern void dmi_scan_machine(void); 101extern void dmi_scan_machine(void);
102extern void dmi_memdev_walk(void);
102extern void dmi_set_dump_stack_arch_desc(void); 103extern void dmi_set_dump_stack_arch_desc(void);
103extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); 104extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
104extern int dmi_name_in_vendors(const char *str); 105extern int dmi_name_in_vendors(const char *str);
@@ -107,6 +108,7 @@ extern int dmi_available;
107extern int dmi_walk(void (*decode)(const struct dmi_header *, void *), 108extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
108 void *private_data); 109 void *private_data);
109extern bool dmi_match(enum dmi_field f, const char *str); 110extern bool dmi_match(enum dmi_field f, const char *str);
111extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
110 112
111#else 113#else
112 114
@@ -115,6 +117,7 @@ static inline const char * dmi_get_system_info(int field) { return NULL; }
115static inline const struct dmi_device * dmi_find_device(int type, const char *name, 117static inline const struct dmi_device * dmi_find_device(int type, const char *name,
116 const struct dmi_device *from) { return NULL; } 118 const struct dmi_device *from) { return NULL; }
117static inline void dmi_scan_machine(void) { return; } 119static inline void dmi_scan_machine(void) { return; }
120static inline void dmi_memdev_walk(void) { }
118static inline void dmi_set_dump_stack_arch_desc(void) { } 121static inline void dmi_set_dump_stack_arch_desc(void) { }
119static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) 122static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
120{ 123{
@@ -133,6 +136,8 @@ static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
133 void *private_data) { return -1; } 136 void *private_data) { return -1; }
134static inline bool dmi_match(enum dmi_field f, const char *str) 137static inline bool dmi_match(enum dmi_field f, const char *str)
135 { return false; } 138 { return false; }
139static inline void dmi_memdev_name(u16 handle, const char **bank,
140 const char **device) { }
136static inline const struct dmi_system_id * 141static inline const struct dmi_system_id *
137 dmi_first_match(const struct dmi_system_id *list) { return NULL; } 142 dmi_first_match(const struct dmi_system_id *list) { return NULL; }
138 143
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 5c6d7fbaf89e..dbdffe8d4469 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -51,7 +51,7 @@ static inline void opstate_init(void)
51#define EDAC_MC_LABEL_LEN 31 51#define EDAC_MC_LABEL_LEN 31
52 52
53/* Maximum size of the location string */ 53/* Maximum size of the location string */
54#define LOCATION_SIZE 80 54#define LOCATION_SIZE 256
55 55
56/* Defines the maximum number of labels that can be reported */ 56/* Defines the maximum number of labels that can be reported */
57#define EDAC_MAX_LABELS 8 57#define EDAC_MAX_LABELS 8
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5f8f176154f7..11ce6784a196 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -39,6 +39,8 @@
39typedef unsigned long efi_status_t; 39typedef unsigned long efi_status_t;
40typedef u8 efi_bool_t; 40typedef u8 efi_bool_t;
41typedef u16 efi_char16_t; /* UNICODE character */ 41typedef u16 efi_char16_t; /* UNICODE character */
42typedef u64 efi_physical_addr_t;
43typedef void *efi_handle_t;
42 44
43 45
44typedef struct { 46typedef struct {
@@ -96,6 +98,7 @@ typedef struct {
96#define EFI_MEMORY_DESCRIPTOR_VERSION 1 98#define EFI_MEMORY_DESCRIPTOR_VERSION 1
97 99
98#define EFI_PAGE_SHIFT 12 100#define EFI_PAGE_SHIFT 12
101#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
99 102
100typedef struct { 103typedef struct {
101 u32 type; 104 u32 type;
@@ -157,11 +160,13 @@ typedef struct {
157 efi_table_hdr_t hdr; 160 efi_table_hdr_t hdr;
158 void *raise_tpl; 161 void *raise_tpl;
159 void *restore_tpl; 162 void *restore_tpl;
160 void *allocate_pages; 163 efi_status_t (*allocate_pages)(int, int, unsigned long,
161 void *free_pages; 164 efi_physical_addr_t *);
162 void *get_memory_map; 165 efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
163 void *allocate_pool; 166 efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
164 void *free_pool; 167 unsigned long *, u32 *);
168 efi_status_t (*allocate_pool)(int, unsigned long, void **);
169 efi_status_t (*free_pool)(void *);
165 void *create_event; 170 void *create_event;
166 void *set_timer; 171 void *set_timer;
167 void *wait_for_event; 172 void *wait_for_event;
@@ -171,7 +176,7 @@ typedef struct {
171 void *install_protocol_interface; 176 void *install_protocol_interface;
172 void *reinstall_protocol_interface; 177 void *reinstall_protocol_interface;
173 void *uninstall_protocol_interface; 178 void *uninstall_protocol_interface;
174 void *handle_protocol; 179 efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
175 void *__reserved; 180 void *__reserved;
176 void *register_protocol_notify; 181 void *register_protocol_notify;
177 void *locate_handle; 182 void *locate_handle;
@@ -181,7 +186,7 @@ typedef struct {
181 void *start_image; 186 void *start_image;
182 void *exit; 187 void *exit;
183 void *unload_image; 188 void *unload_image;
184 void *exit_boot_services; 189 efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
185 void *get_next_monotonic_count; 190 void *get_next_monotonic_count;
186 void *stall; 191 void *stall;
187 void *set_watchdog_timer; 192 void *set_watchdog_timer;
@@ -404,6 +409,12 @@ typedef struct {
404 unsigned long table; 409 unsigned long table;
405} efi_config_table_t; 410} efi_config_table_t;
406 411
412typedef struct {
413 efi_guid_t guid;
414 const char *name;
415 unsigned long *ptr;
416} efi_config_table_type_t;
417
407#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) 418#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
408 419
409#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30)) 420#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
@@ -488,10 +499,6 @@ typedef struct {
488 unsigned long unload; 499 unsigned long unload;
489} efi_loaded_image_t; 500} efi_loaded_image_t;
490 501
491typedef struct {
492 u64 revision;
493 void *open_volume;
494} efi_file_io_interface_t;
495 502
496typedef struct { 503typedef struct {
497 u64 size; 504 u64 size;
@@ -504,20 +511,30 @@ typedef struct {
504 efi_char16_t filename[1]; 511 efi_char16_t filename[1];
505} efi_file_info_t; 512} efi_file_info_t;
506 513
507typedef struct { 514typedef struct _efi_file_handle {
508 u64 revision; 515 u64 revision;
509 void *open; 516 efi_status_t (*open)(struct _efi_file_handle *,
510 void *close; 517 struct _efi_file_handle **,
518 efi_char16_t *, u64, u64);
519 efi_status_t (*close)(struct _efi_file_handle *);
511 void *delete; 520 void *delete;
512 void *read; 521 efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
522 void *);
513 void *write; 523 void *write;
514 void *get_position; 524 void *get_position;
515 void *set_position; 525 void *set_position;
516 void *get_info; 526 efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
527 unsigned long *, void *);
517 void *set_info; 528 void *set_info;
518 void *flush; 529 void *flush;
519} efi_file_handle_t; 530} efi_file_handle_t;
520 531
532typedef struct _efi_file_io_interface {
533 u64 revision;
534 int (*open_volume)(struct _efi_file_io_interface *,
535 efi_file_handle_t **);
536} efi_file_io_interface_t;
537
521#define EFI_FILE_MODE_READ 0x0000000000000001 538#define EFI_FILE_MODE_READ 0x0000000000000001
522#define EFI_FILE_MODE_WRITE 0x0000000000000002 539#define EFI_FILE_MODE_WRITE 0x0000000000000002
523#define EFI_FILE_MODE_CREATE 0x8000000000000000 540#define EFI_FILE_MODE_CREATE 0x8000000000000000
@@ -552,6 +569,7 @@ extern struct efi {
552 efi_get_next_high_mono_count_t *get_next_high_mono_count; 569 efi_get_next_high_mono_count_t *get_next_high_mono_count;
553 efi_reset_system_t *reset_system; 570 efi_reset_system_t *reset_system;
554 efi_set_virtual_address_map_t *set_virtual_address_map; 571 efi_set_virtual_address_map_t *set_virtual_address_map;
572 struct efi_memory_map *memmap;
555} efi; 573} efi;
556 574
557static inline int 575static inline int
@@ -587,6 +605,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
587} 605}
588#endif 606#endif
589extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); 607extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
608extern int efi_config_init(efi_config_table_type_t *arch_tables);
590extern u64 efi_get_iobase (void); 609extern u64 efi_get_iobase (void);
591extern u32 efi_mem_type (unsigned long phys_addr); 610extern u32 efi_mem_type (unsigned long phys_addr);
592extern u64 efi_mem_attributes (unsigned long phys_addr); 611extern u64 efi_mem_attributes (unsigned long phys_addr);
@@ -782,6 +801,15 @@ struct efivar_entry {
782 struct efi_variable var; 801 struct efi_variable var;
783 struct list_head list; 802 struct list_head list;
784 struct kobject kobj; 803 struct kobject kobj;
804 bool scanning;
805 bool deleting;
806};
807
808
809struct efi_simple_text_output_protocol {
810 void *reset;
811 efi_status_t (*output_string)(void *, void *);
812 void *test_string;
785}; 813};
786 814
787extern struct list_head efivar_sysfs_list; 815extern struct list_head efivar_sysfs_list;
@@ -840,6 +868,8 @@ void efivar_run_worker(void);
840#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) 868#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
841int efivars_sysfs_init(void); 869int efivars_sysfs_init(void);
842 870
871#define EFIVARS_DATA_SIZE_MAX 1024
872
843#endif /* CONFIG_EFI_VARS */ 873#endif /* CONFIG_EFI_VARS */
844 874
845#endif /* _LINUX_EFI_H */ 875#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 40a3c0e01b2b..67a5fa7830c4 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -39,13 +39,13 @@ extern Elf64_Dyn _DYNAMIC [];
39 39
40/* Optional callbacks to write extra ELF notes. */ 40/* Optional callbacks to write extra ELF notes. */
41struct file; 41struct file;
42struct coredump_params;
42 43
43#ifndef ARCH_HAVE_EXTRA_ELF_NOTES 44#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
44static inline int elf_coredump_extra_notes_size(void) { return 0; } 45static inline int elf_coredump_extra_notes_size(void) { return 0; }
45static inline int elf_coredump_extra_notes_write(struct file *file, 46static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
46 loff_t *foffset) { return 0; }
47#else 47#else
48extern int elf_coredump_extra_notes_size(void); 48extern int elf_coredump_extra_notes_size(void);
49extern int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset); 49extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
50#endif 50#endif
51#endif /* _LINUX_ELF_H */ 51#endif /* _LINUX_ELF_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index cdd3d13efce7..698d51a0eea3 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -6,6 +6,8 @@
6#include <asm/elf.h> 6#include <asm/elf.h>
7#include <uapi/linux/elfcore.h> 7#include <uapi/linux/elfcore.h>
8 8
9struct coredump_params;
10
9static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) 11static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
10{ 12{
11#ifdef ELF_CORE_COPY_REGS 13#ifdef ELF_CORE_COPY_REGS
@@ -63,10 +65,9 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
63 */ 65 */
64extern Elf_Half elf_core_extra_phdrs(void); 66extern Elf_Half elf_core_extra_phdrs(void);
65extern int 67extern int
66elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, 68elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
67 unsigned long limit);
68extern int 69extern int
69elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit); 70elf_core_write_extra_data(struct coredump_params *cprm);
70extern size_t elf_core_extra_data_size(void); 71extern size_t elf_core_extra_data_size(void);
71 72
72#endif /* _LINUX_ELFCORE_H */ 73#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index d8b512496e50..fc4a9aa7dd82 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -28,27 +28,24 @@
28#include <asm/unaligned.h> 28#include <asm/unaligned.h>
29 29
30#ifdef __KERNEL__ 30#ifdef __KERNEL__
31extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 31__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
32extern const struct header_ops eth_header_ops; 32extern const struct header_ops eth_header_ops;
33 33
34extern int eth_header(struct sk_buff *skb, struct net_device *dev, 34int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
35 unsigned short type, 35 const void *daddr, const void *saddr, unsigned len);
36 const void *daddr, const void *saddr, unsigned len); 36int eth_rebuild_header(struct sk_buff *skb);
37extern int eth_rebuild_header(struct sk_buff *skb); 37int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
38extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 38int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
39extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 39 __be16 type);
40extern void eth_header_cache_update(struct hh_cache *hh, 40void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
41 const struct net_device *dev, 41 const unsigned char *haddr);
42 const unsigned char *haddr); 42int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
43extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p); 43void eth_commit_mac_addr_change(struct net_device *dev, void *p);
44extern void eth_commit_mac_addr_change(struct net_device *dev, void *p); 44int eth_mac_addr(struct net_device *dev, void *p);
45extern int eth_mac_addr(struct net_device *dev, void *p); 45int eth_change_mtu(struct net_device *dev, int new_mtu);
46extern int eth_change_mtu(struct net_device *dev, int new_mtu); 46int eth_validate_addr(struct net_device *dev);
47extern int eth_validate_addr(struct net_device *dev); 47
48 48struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
49
50
51extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
52 unsigned int rxqs); 49 unsigned int rxqs);
53#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 50#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
54#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) 51#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
diff --git a/include/linux/export.h b/include/linux/export.h
index 412cd509effe..3f2793d51899 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,7 +43,7 @@ extern struct module __this_module;
43/* Mark the CRC weak since genksyms apparently decides not to 43/* Mark the CRC weak since genksyms apparently decides not to
44 * generate a checksums for some symbols */ 44 * generate a checksums for some symbols */
45#define __CRC_SYMBOL(sym, sec) \ 45#define __CRC_SYMBOL(sym, sec) \
46 extern void *__crc_##sym __attribute__((weak)); \ 46 extern __visible void *__crc_##sym __attribute__((weak)); \
47 static const unsigned long __kcrctab_##sym \ 47 static const unsigned long __kcrctab_##sym \
48 __used \ 48 __used \
49 __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ 49 __attribute__((section("___kcrctab" sec "+" #sym), unused)) \
@@ -59,7 +59,7 @@ extern struct module __this_module;
59 static const char __kstrtab_##sym[] \ 59 static const char __kstrtab_##sym[] \
60 __attribute__((section("__ksymtab_strings"), aligned(1))) \ 60 __attribute__((section("__ksymtab_strings"), aligned(1))) \
61 = VMLINUX_SYMBOL_STR(sym); \ 61 = VMLINUX_SYMBOL_STR(sym); \
62 static const struct kernel_symbol __ksymtab_##sym \ 62 __visible const struct kernel_symbol __ksymtab_##sym \
63 __used \ 63 __used \
64 __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ 64 __attribute__((section("___ksymtab" sec "+" #sym), unused)) \
65 = { (unsigned long)&sym, __kstrtab_##sym } 65 = { (unsigned long)&sym, __kstrtab_##sym }
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index fcb51c88319f..21c59af1150b 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -51,10 +51,10 @@
51enum extcon_cable_name { 51enum extcon_cable_name {
52 EXTCON_USB = 0, 52 EXTCON_USB = 0,
53 EXTCON_USB_HOST, 53 EXTCON_USB_HOST,
54 EXTCON_TA, /* Travel Adaptor */ 54 EXTCON_TA, /* Travel Adaptor */
55 EXTCON_FAST_CHARGER, 55 EXTCON_FAST_CHARGER,
56 EXTCON_SLOW_CHARGER, 56 EXTCON_SLOW_CHARGER,
57 EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */ 57 EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
58 EXTCON_HDMI, 58 EXTCON_HDMI,
59 EXTCON_MHL, 59 EXTCON_MHL,
60 EXTCON_DVI, 60 EXTCON_DVI,
@@ -76,8 +76,8 @@ struct extcon_cable;
76 76
77/** 77/**
78 * struct extcon_dev - An extcon device represents one external connector. 78 * struct extcon_dev - An extcon device represents one external connector.
79 * @name: The name of this extcon device. Parent device name is used 79 * @name: The name of this extcon device. Parent device name is
80 * if NULL. 80 * used if NULL.
81 * @supported_cable: Array of supported cable names ending with NULL. 81 * @supported_cable: Array of supported cable names ending with NULL.
82 * If supported_cable is NULL, cable name related APIs 82 * If supported_cable is NULL, cable name related APIs
83 * are disabled. 83 * are disabled.
@@ -89,21 +89,21 @@ struct extcon_cable;
89 * be attached simulataneously. {0x7, 0} is equivalent to 89 * be attached simulataneously. {0x7, 0} is equivalent to
90 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there 90 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
91 * can be no simultaneous connections. 91 * can be no simultaneous connections.
92 * @print_name: An optional callback to override the method to print the 92 * @print_name: An optional callback to override the method to print the
93 * name of the extcon device. 93 * name of the extcon device.
94 * @print_state: An optional callback to override the method to print the 94 * @print_state: An optional callback to override the method to print the
95 * status of the extcon device. 95 * status of the extcon device.
96 * @dev: Device of this extcon. Do not provide at register-time. 96 * @dev: Device of this extcon.
97 * @state: Attach/detach state of this extcon. Do not provide at 97 * @state: Attach/detach state of this extcon. Do not provide at
98 * register-time 98 * register-time.
99 * @nh: Notifier for the state change events from this extcon 99 * @nh: Notifier for the state change events from this extcon
100 * @entry: To support list of extcon devices so that users can search 100 * @entry: To support list of extcon devices so that users can search
101 * for extcon devices based on the extcon name. 101 * for extcon devices based on the extcon name.
102 * @lock: 102 * @lock:
103 * @max_supported: Internal value to store the number of cables. 103 * @max_supported: Internal value to store the number of cables.
104 * @extcon_dev_type: Device_type struct to provide attribute_groups 104 * @extcon_dev_type: Device_type struct to provide attribute_groups
105 * customized for each extcon device. 105 * customized for each extcon device.
106 * @cables: Sysfs subdirectories. Each represents one cable. 106 * @cables: Sysfs subdirectories. Each represents one cable.
107 * 107 *
108 * In most cases, users only need to provide "User initializing data" of 108 * In most cases, users only need to provide "User initializing data" of
109 * this struct when registering an extcon. In some exceptional cases, 109 * this struct when registering an extcon. In some exceptional cases,
@@ -111,26 +111,27 @@ struct extcon_cable;
111 * are overwritten by register function. 111 * are overwritten by register function.
112 */ 112 */
113struct extcon_dev { 113struct extcon_dev {
114 /* --- Optional user initializing data --- */ 114 /* Optional user initializing data */
115 const char *name; 115 const char *name;
116 const char **supported_cable; 116 const char **supported_cable;
117 const u32 *mutually_exclusive; 117 const u32 *mutually_exclusive;
118 118
119 /* --- Optional callbacks to override class functions --- */ 119 /* Optional callbacks to override class functions */
120 ssize_t (*print_name)(struct extcon_dev *edev, char *buf); 120 ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
121 ssize_t (*print_state)(struct extcon_dev *edev, char *buf); 121 ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
122 122
123 /* --- Internal data. Please do not set. --- */ 123 /* Internal data. Please do not set. */
124 struct device *dev; 124 struct device dev;
125 u32 state;
126 struct raw_notifier_head nh; 125 struct raw_notifier_head nh;
127 struct list_head entry; 126 struct list_head entry;
128 spinlock_t lock; /* could be called by irq handler */
129 int max_supported; 127 int max_supported;
128 spinlock_t lock; /* could be called by irq handler */
129 u32 state;
130 130
131 /* /sys/class/extcon/.../cable.n/... */ 131 /* /sys/class/extcon/.../cable.n/... */
132 struct device_type extcon_dev_type; 132 struct device_type extcon_dev_type;
133 struct extcon_cable *cables; 133 struct extcon_cable *cables;
134
134 /* /sys/class/extcon/.../mutually_exclusive/... */ 135 /* /sys/class/extcon/.../mutually_exclusive/... */
135 struct attribute_group attr_g_muex; 136 struct attribute_group attr_g_muex;
136 struct attribute **attrs_muex; 137 struct attribute **attrs_muex;
@@ -138,13 +139,13 @@ struct extcon_dev {
138}; 139};
139 140
140/** 141/**
141 * struct extcon_cable - An internal data for each cable of extcon device. 142 * struct extcon_cable - An internal data for each cable of extcon device.
142 * @edev: The extcon device 143 * @edev: The extcon device
143 * @cable_index: Index of this cable in the edev 144 * @cable_index: Index of this cable in the edev
144 * @attr_g: Attribute group for the cable 145 * @attr_g: Attribute group for the cable
145 * @attr_name: "name" sysfs entry 146 * @attr_name: "name" sysfs entry
146 * @attr_state: "state" sysfs entry 147 * @attr_state: "state" sysfs entry
147 * @attrs: Array pointing to attr_name and attr_state for attr_g 148 * @attrs: Array pointing to attr_name and attr_state for attr_g
148 */ 149 */
149struct extcon_cable { 150struct extcon_cable {
150 struct extcon_dev *edev; 151 struct extcon_dev *edev;
@@ -159,11 +160,13 @@ struct extcon_cable {
159 160
160/** 161/**
161 * struct extcon_specific_cable_nb - An internal data for 162 * struct extcon_specific_cable_nb - An internal data for
162 * extcon_register_interest(). 163 * extcon_register_interest().
163 * @internal_nb: a notifier block bridging extcon notifier and cable notifier. 164 * @internal_nb: A notifier block bridging extcon notifier
164 * @user_nb: user provided notifier block for events from a specific cable. 165 * and cable notifier.
166 * @user_nb: user provided notifier block for events from
167 * a specific cable.
165 * @cable_index: the target cable. 168 * @cable_index: the target cable.
166 * @edev: the target extcon device. 169 * @edev: the target extcon device.
167 * @previous_value: the saved previous event value. 170 * @previous_value: the saved previous event value.
168 */ 171 */
169struct extcon_specific_cable_nb { 172struct extcon_specific_cable_nb {
@@ -180,7 +183,7 @@ struct extcon_specific_cable_nb {
180 * Following APIs are for notifiers or configurations. 183 * Following APIs are for notifiers or configurations.
181 * Notifiers are the external port and connection devices. 184 * Notifiers are the external port and connection devices.
182 */ 185 */
183extern int extcon_dev_register(struct extcon_dev *edev, struct device *dev); 186extern int extcon_dev_register(struct extcon_dev *edev);
184extern void extcon_dev_unregister(struct extcon_dev *edev); 187extern void extcon_dev_unregister(struct extcon_dev *edev);
185extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); 188extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
186 189
@@ -238,8 +241,7 @@ extern int extcon_register_notifier(struct extcon_dev *edev,
238extern int extcon_unregister_notifier(struct extcon_dev *edev, 241extern int extcon_unregister_notifier(struct extcon_dev *edev,
239 struct notifier_block *nb); 242 struct notifier_block *nb);
240#else /* CONFIG_EXTCON */ 243#else /* CONFIG_EXTCON */
241static inline int extcon_dev_register(struct extcon_dev *edev, 244static inline int extcon_dev_register(struct extcon_dev *edev)
242 struct device *dev)
243{ 245{
244 return 0; 246 return 0;
245} 247}
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 20e9eef25d4c..9ca958c4e94c 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -20,10 +20,10 @@
20 20
21/** 21/**
22 * struct adc_jack_cond - condition to use an extcon state 22 * struct adc_jack_cond - condition to use an extcon state
23 * @state - the corresponding extcon state (if 0, this struct denotes 23 * @state: the corresponding extcon state (if 0, this struct
24 * the last adc_jack_cond element among the array) 24 * denotes the last adc_jack_cond element among the array)
25 * @min_adc - min adc value for this condition 25 * @min_adc: min adc value for this condition
26 * @max_adc - max adc value for this condition 26 * @max_adc: max adc value for this condition
27 * 27 *
28 * For example, if { .state = 0x3, .min_adc = 100, .max_adc = 200}, it means 28 * For example, if { .state = 0x3, .min_adc = 100, .max_adc = 200}, it means
29 * that if ADC value is between (inclusive) 100 and 200, than the cable 0 and 29 * that if ADC value is between (inclusive) 100 and 200, than the cable 0 and
@@ -33,34 +33,34 @@
33 * because when no adc_jack_cond is met, state = 0 is automatically chosen. 33 * because when no adc_jack_cond is met, state = 0 is automatically chosen.
34 */ 34 */
35struct adc_jack_cond { 35struct adc_jack_cond {
36 u32 state; /* extcon state value. 0 if invalid */ 36 u32 state; /* extcon state value. 0 if invalid */
37 u32 min_adc; 37 u32 min_adc;
38 u32 max_adc; 38 u32 max_adc;
39}; 39};
40 40
41/** 41/**
42 * struct adc_jack_pdata - platform data for adc jack device. 42 * struct adc_jack_pdata - platform data for adc jack device.
43 * @name - name of the extcon device. If null, "adc-jack" is used. 43 * @name: name of the extcon device. If null, "adc-jack" is used.
44 * @consumer_channel - Unique name to identify the channel on the consumer 44 * @consumer_channel: Unique name to identify the channel on the consumer
45 * side. This typically describes the channels used within 45 * side. This typically describes the channels used within
46 * the consumer. E.g. 'battery_voltage' 46 * the consumer. E.g. 'battery_voltage'
47 * @cable_names - array of cable names ending with null. 47 * @cable_names: array of cable names ending with null.
48 * @adc_contitions - array of struct adc_jack_cond conditions ending 48 * @adc_contitions: array of struct adc_jack_cond conditions ending
49 * with .state = 0 entry. This describes how to decode 49 * with .state = 0 entry. This describes how to decode
50 * adc values into extcon state. 50 * adc values into extcon state.
51 * @irq_flags - irq flags used for the @irq 51 * @irq_flags: irq flags used for the @irq
52 * @handling_delay_ms - in some devices, we need to read ADC value some 52 * @handling_delay_ms: in some devices, we need to read ADC value some
53 * milli-seconds after the interrupt occurs. You may 53 * milli-seconds after the interrupt occurs. You may
54 * describe such delays with @handling_delay_ms, which 54 * describe such delays with @handling_delay_ms, which
55 * is rounded-off by jiffies. 55 * is rounded-off by jiffies.
56 */ 56 */
57struct adc_jack_pdata { 57struct adc_jack_pdata {
58 const char *name; 58 const char *name;
59 const char *consumer_channel; 59 const char *consumer_channel;
60 /* 60
61 * The last entry should be NULL 61 /* The last entry should be NULL */
62 */
63 const char **cable_names; 62 const char **cable_names;
63
64 /* The last entry's state should be 0 */ 64 /* The last entry's state should be 0 */
65 struct adc_jack_cond *adc_conditions; 65 struct adc_jack_cond *adc_conditions;
66 66
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 2d8307f7d67d..4195810f87fe 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -25,14 +25,17 @@
25 25
26/** 26/**
27 * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device. 27 * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device.
28 * @name The name of this GPIO extcon device. 28 * @name: The name of this GPIO extcon device.
29 * @gpio Corresponding GPIO. 29 * @gpio: Corresponding GPIO.
30 * @debounce Debounce time for GPIO IRQ in ms. 30 * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
31 * @irq_flags IRQ Flags (e.g., IRQF_TRIGGER_LOW). 31 * If true, low state of gpio means active.
32 * @state_on print_state is overriden with state_on if attached. If Null, 32 * If false, high state of gpio means active.
33 * default method of extcon class is used. 33 * @debounce: Debounce time for GPIO IRQ in ms.
34 * @state_off print_state is overriden with state_on if detached. If Null, 34 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
35 * default method of extcon class is used. 35 * @state_on: print_state is overriden with state_on if attached.
36 * If NULL, default method of extcon class is used.
37 * @state_off: print_state is overriden with state_on if detached.
38 * If NUll, default method of extcon class is used.
36 * 39 *
37 * Note that in order for state_on or state_off to be valid, both state_on 40 * Note that in order for state_on or state_off to be valid, both state_on
38 * and state_off should be not NULL. If at least one of them is NULL, 41 * and state_off should be not NULL. If at least one of them is NULL,
@@ -41,6 +44,7 @@
41struct gpio_extcon_platform_data { 44struct gpio_extcon_platform_data {
42 const char *name; 45 const char *name;
43 unsigned gpio; 46 unsigned gpio;
47 bool gpio_active_low;
44 unsigned long debounce; 48 unsigned long debounce;
45 unsigned long irq_flags; 49 unsigned long irq_flags;
46 50
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ffac70aab3e9..70c4836e4a9f 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -792,4 +792,16 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
792 const struct fb_videomode *default_mode, 792 const struct fb_videomode *default_mode,
793 unsigned int default_bpp); 793 unsigned int default_bpp);
794 794
795/* Convenience logging macros */
796#define fb_err(fb_info, fmt, ...) \
797 pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
798#define fb_notice(info, fmt, ...) \
799 pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
800#define fb_warn(fb_info, fmt, ...) \
801 pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
802#define fb_info(fb_info, fmt, ...) \
803 pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
804#define fb_dbg(fb_info, fmt, ...) \
805 pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
806
795#endif /* _LINUX_FB_H */ 807#endif /* _LINUX_FB_H */
diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h
index e460ef831984..5009fa16b5d8 100644
--- a/include/linux/fcdevice.h
+++ b/include/linux/fcdevice.h
@@ -27,7 +27,7 @@
27#include <linux/if_fc.h> 27#include <linux/if_fc.h>
28 28
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30extern struct net_device *alloc_fcdev(int sizeof_priv); 30struct net_device *alloc_fcdev(int sizeof_priv);
31#endif 31#endif
32 32
33#endif /* _LINUX_FCDEVICE_H */ 33#endif /* _LINUX_FCDEVICE_H */
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 155bafd9e886..9a79f0106da1 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -25,10 +25,9 @@
25#include <linux/if_fddi.h> 25#include <linux/if_fddi.h>
26 26
27#ifdef __KERNEL__ 27#ifdef __KERNEL__
28extern __be16 fddi_type_trans(struct sk_buff *skb, 28__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
29 struct net_device *dev); 29int fddi_change_mtu(struct net_device *dev, int new_mtu);
30extern int fddi_change_mtu(struct net_device *dev, int new_mtu); 30struct net_device *alloc_fddidev(int sizeof_priv);
31extern struct net_device *alloc_fddidev(int sizeof_priv);
32#endif 31#endif
33 32
34#endif /* _LINUX_FDDIDEVICE_H */ 33#endif /* _LINUX_FDDIDEVICE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3f40547ba191..121f11f001c0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -623,10 +623,13 @@ static inline int inode_unhashed(struct inode *inode)
623 * 0: the object of the current VFS operation 623 * 0: the object of the current VFS operation
624 * 1: parent 624 * 1: parent
625 * 2: child/target 625 * 2: child/target
626 * 3: quota file 626 * 3: xattr
627 * 4: second non-directory
628 * The last is for certain operations (such as rename) which lock two
629 * non-directories at once.
627 * 630 *
628 * The locking order between these classes is 631 * The locking order between these classes is
629 * parent -> child -> normal -> xattr -> quota 632 * parent -> child -> normal -> xattr -> second non-directory
630 */ 633 */
631enum inode_i_mutex_lock_class 634enum inode_i_mutex_lock_class
632{ 635{
@@ -634,9 +637,12 @@ enum inode_i_mutex_lock_class
634 I_MUTEX_PARENT, 637 I_MUTEX_PARENT,
635 I_MUTEX_CHILD, 638 I_MUTEX_CHILD,
636 I_MUTEX_XATTR, 639 I_MUTEX_XATTR,
637 I_MUTEX_QUOTA 640 I_MUTEX_NONDIR2
638}; 641};
639 642
643void lock_two_nondirectories(struct inode *, struct inode*);
644void unlock_two_nondirectories(struct inode *, struct inode*);
645
640/* 646/*
641 * NOTE: in a 32bit arch with a preemptable kernel and 647 * NOTE: in a 32bit arch with a preemptable kernel and
642 * an UP compile the i_size_read/write must be atomic 648 * an UP compile the i_size_read/write must be atomic
@@ -764,12 +770,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
764#define FILE_MNT_WRITE_RELEASED 2 770#define FILE_MNT_WRITE_RELEASED 2
765 771
766struct file { 772struct file {
767 /*
768 * fu_list becomes invalid after file_free is called and queued via
769 * fu_rcuhead for RCU freeing
770 */
771 union { 773 union {
772 struct list_head fu_list;
773 struct llist_node fu_llist; 774 struct llist_node fu_llist;
774 struct rcu_head fu_rcuhead; 775 struct rcu_head fu_rcuhead;
775 } f_u; 776 } f_u;
@@ -783,9 +784,6 @@ struct file {
783 * Must not be taken from IRQ context. 784 * Must not be taken from IRQ context.
784 */ 785 */
785 spinlock_t f_lock; 786 spinlock_t f_lock;
786#ifdef CONFIG_SMP
787 int f_sb_list_cpu;
788#endif
789 atomic_long_t f_count; 787 atomic_long_t f_count;
790 unsigned int f_flags; 788 unsigned int f_flags;
791 fmode_t f_mode; 789 fmode_t f_mode;
@@ -882,6 +880,7 @@ static inline int file_check_writeable(struct file *filp)
882 880
883#define FL_POSIX 1 881#define FL_POSIX 1
884#define FL_FLOCK 2 882#define FL_FLOCK 2
883#define FL_DELEG 4 /* NFSv4 delegation */
885#define FL_ACCESS 8 /* not trying to lock, just looking */ 884#define FL_ACCESS 8 /* not trying to lock, just looking */
886#define FL_EXISTS 16 /* when unlocking, test for existence */ 885#define FL_EXISTS 16 /* when unlocking, test for existence */
887#define FL_LEASE 32 /* lease held on this file */ 886#define FL_LEASE 32 /* lease held on this file */
@@ -1023,7 +1022,7 @@ extern int vfs_test_lock(struct file *, struct file_lock *);
1023extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); 1022extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
1024extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); 1023extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
1025extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); 1024extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
1026extern int __break_lease(struct inode *inode, unsigned int flags); 1025extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
1027extern void lease_get_mtime(struct inode *, struct timespec *time); 1026extern void lease_get_mtime(struct inode *, struct timespec *time);
1028extern int generic_setlease(struct file *, long, struct file_lock **); 1027extern int generic_setlease(struct file *, long, struct file_lock **);
1029extern int vfs_setlease(struct file *, long, struct file_lock **); 1028extern int vfs_setlease(struct file *, long, struct file_lock **);
@@ -1132,7 +1131,7 @@ static inline int flock_lock_file_wait(struct file *filp,
1132 return -ENOLCK; 1131 return -ENOLCK;
1133} 1132}
1134 1133
1135static inline int __break_lease(struct inode *inode, unsigned int mode) 1134static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1136{ 1135{
1137 return 0; 1136 return 0;
1138} 1137}
@@ -1264,11 +1263,6 @@ struct super_block {
1264 1263
1265 struct list_head s_inodes; /* all inodes */ 1264 struct list_head s_inodes; /* all inodes */
1266 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ 1265 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1267#ifdef CONFIG_SMP
1268 struct list_head __percpu *s_files;
1269#else
1270 struct list_head s_files;
1271#endif
1272 struct list_head s_mounts; /* list of mounts; _not_ for fs use */ 1266 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1273 struct block_device *s_bdev; 1267 struct block_device *s_bdev;
1274 struct backing_dev_info *s_bdi; 1268 struct backing_dev_info *s_bdi;
@@ -1330,6 +1324,7 @@ struct super_block {
1330 */ 1324 */
1331 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1325 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1332 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1326 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1327 struct rcu_head rcu;
1333}; 1328};
1334 1329
1335extern struct timespec current_fs_time(struct super_block *sb); 1330extern struct timespec current_fs_time(struct super_block *sb);
@@ -1458,10 +1453,10 @@ extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
1458extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); 1453extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
1459extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); 1454extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
1460extern int vfs_symlink(struct inode *, struct dentry *, const char *); 1455extern int vfs_symlink(struct inode *, struct dentry *, const char *);
1461extern int vfs_link(struct dentry *, struct inode *, struct dentry *); 1456extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
1462extern int vfs_rmdir(struct inode *, struct dentry *); 1457extern int vfs_rmdir(struct inode *, struct dentry *);
1463extern int vfs_unlink(struct inode *, struct dentry *); 1458extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
1464extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); 1459extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **);
1465 1460
1466/* 1461/*
1467 * VFS dentry helper functions. 1462 * VFS dentry helper functions.
@@ -1875,6 +1870,17 @@ extern struct dentry *mount_pseudo(struct file_system_type *, char *,
1875 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) 1870 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
1876#define fops_put(fops) \ 1871#define fops_put(fops) \
1877 do { if (fops) module_put((fops)->owner); } while(0) 1872 do { if (fops) module_put((fops)->owner); } while(0)
1873/*
1874 * This one is to be used *ONLY* from ->open() instances.
1875 * fops must be non-NULL, pinned down *and* module dependencies
1876 * should be sufficient to pin the caller down as well.
1877 */
1878#define replace_fops(f, fops) \
1879 do { \
1880 struct file *__file = (f); \
1881 fops_put(__file->f_op); \
1882 BUG_ON(!(__file->f_op = (fops))); \
1883 } while(0)
1878 1884
1879extern int register_filesystem(struct file_system_type *); 1885extern int register_filesystem(struct file_system_type *);
1880extern int unregister_filesystem(struct file_system_type *); 1886extern int unregister_filesystem(struct file_system_type *);
@@ -1899,6 +1905,9 @@ extern bool fs_fully_visible(struct file_system_type *);
1899 1905
1900extern int current_umask(void); 1906extern int current_umask(void);
1901 1907
1908extern void ihold(struct inode * inode);
1909extern void iput(struct inode *);
1910
1902/* /sys/fs */ 1911/* /sys/fs */
1903extern struct kobject *fs_kobj; 1912extern struct kobject *fs_kobj;
1904 1913
@@ -1955,9 +1964,39 @@ static inline int locks_verify_truncate(struct inode *inode,
1955static inline int break_lease(struct inode *inode, unsigned int mode) 1964static inline int break_lease(struct inode *inode, unsigned int mode)
1956{ 1965{
1957 if (inode->i_flock) 1966 if (inode->i_flock)
1958 return __break_lease(inode, mode); 1967 return __break_lease(inode, mode, FL_LEASE);
1959 return 0; 1968 return 0;
1960} 1969}
1970
1971static inline int break_deleg(struct inode *inode, unsigned int mode)
1972{
1973 if (inode->i_flock)
1974 return __break_lease(inode, mode, FL_DELEG);
1975 return 0;
1976}
1977
1978static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
1979{
1980 int ret;
1981
1982 ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
1983 if (ret == -EWOULDBLOCK && delegated_inode) {
1984 *delegated_inode = inode;
1985 ihold(inode);
1986 }
1987 return ret;
1988}
1989
1990static inline int break_deleg_wait(struct inode **delegated_inode)
1991{
1992 int ret;
1993
1994 ret = break_deleg(*delegated_inode, O_WRONLY);
1995 iput(*delegated_inode);
1996 *delegated_inode = NULL;
1997 return ret;
1998}
1999
1961#else /* !CONFIG_FILE_LOCKING */ 2000#else /* !CONFIG_FILE_LOCKING */
1962static inline int locks_mandatory_locked(struct inode *inode) 2001static inline int locks_mandatory_locked(struct inode *inode)
1963{ 2002{
@@ -1997,6 +2036,22 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1997 return 0; 2036 return 0;
1998} 2037}
1999 2038
2039static inline int break_deleg(struct inode *inode, unsigned int mode)
2040{
2041 return 0;
2042}
2043
2044static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
2045{
2046 return 0;
2047}
2048
2049static inline int break_deleg_wait(struct inode **delegated_inode)
2050{
2051 BUG();
2052 return 0;
2053}
2054
2000#endif /* CONFIG_FILE_LOCKING */ 2055#endif /* CONFIG_FILE_LOCKING */
2001 2056
2002/* fs/open.c */ 2057/* fs/open.c */
@@ -2223,7 +2278,7 @@ extern void emergency_remount(void);
2223#ifdef CONFIG_BLOCK 2278#ifdef CONFIG_BLOCK
2224extern sector_t bmap(struct inode *, sector_t); 2279extern sector_t bmap(struct inode *, sector_t);
2225#endif 2280#endif
2226extern int notify_change(struct dentry *, struct iattr *); 2281extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2227extern int inode_permission(struct inode *, int); 2282extern int inode_permission(struct inode *, int);
2228extern int generic_permission(struct inode *, int); 2283extern int generic_permission(struct inode *, int);
2229 2284
@@ -2292,6 +2347,11 @@ static inline void allow_write_access(struct file *file)
2292 if (file) 2347 if (file)
2293 atomic_inc(&file_inode(file)->i_writecount); 2348 atomic_inc(&file_inode(file)->i_writecount);
2294} 2349}
2350static inline bool inode_is_open_for_write(const struct inode *inode)
2351{
2352 return atomic_read(&inode->i_writecount) > 0;
2353}
2354
2295#ifdef CONFIG_IMA 2355#ifdef CONFIG_IMA
2296static inline void i_readcount_dec(struct inode *inode) 2356static inline void i_readcount_dec(struct inode *inode)
2297{ 2357{
@@ -2332,8 +2392,6 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
2332extern int inode_init_always(struct super_block *, struct inode *); 2392extern int inode_init_always(struct super_block *, struct inode *);
2333extern void inode_init_once(struct inode *); 2393extern void inode_init_once(struct inode *);
2334extern void address_space_init_once(struct address_space *mapping); 2394extern void address_space_init_once(struct address_space *mapping);
2335extern void ihold(struct inode * inode);
2336extern void iput(struct inode *);
2337extern struct inode * igrab(struct inode *); 2395extern struct inode * igrab(struct inode *);
2338extern ino_t iunique(struct super_block *, ino_t); 2396extern ino_t iunique(struct super_block *, ino_t);
2339extern int inode_needs_sync(struct inode *inode); 2397extern int inode_needs_sync(struct inode *inode);
@@ -2502,8 +2560,10 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
2502 int nofs); 2560 int nofs);
2503extern int page_symlink(struct inode *inode, const char *symname, int len); 2561extern int page_symlink(struct inode *inode, const char *symname, int len);
2504extern const struct inode_operations page_symlink_inode_operations; 2562extern const struct inode_operations page_symlink_inode_operations;
2563extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
2505extern int generic_readlink(struct dentry *, char __user *, int); 2564extern int generic_readlink(struct dentry *, char __user *, int);
2506extern void generic_fillattr(struct inode *, struct kstat *); 2565extern void generic_fillattr(struct inode *, struct kstat *);
2566int vfs_getattr_nosec(struct path *path, struct kstat *stat);
2507extern int vfs_getattr(struct path *, struct kstat *); 2567extern int vfs_getattr(struct path *, struct kstat *);
2508void __inode_add_bytes(struct inode *inode, loff_t bytes); 2568void __inode_add_bytes(struct inode *inode, loff_t bytes);
2509void inode_add_bytes(struct inode *inode, loff_t bytes); 2569void inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -2562,6 +2622,9 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
2562extern int simple_write_end(struct file *file, struct address_space *mapping, 2622extern int simple_write_end(struct file *file, struct address_space *mapping,
2563 loff_t pos, unsigned len, unsigned copied, 2623 loff_t pos, unsigned len, unsigned copied,
2564 struct page *page, void *fsdata); 2624 struct page *page, void *fsdata);
2625extern int always_delete_dentry(const struct dentry *);
2626extern struct inode *alloc_anon_inode(struct super_block *);
2627extern const struct dentry_operations simple_dentry_operations;
2565 2628
2566extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); 2629extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
2567extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); 2630extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 7823e9ef995e..771484993ca7 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -308,36 +308,6 @@ struct fscache_cache_ops {
308 void (*dissociate_pages)(struct fscache_cache *cache); 308 void (*dissociate_pages)(struct fscache_cache *cache);
309}; 309};
310 310
311/*
312 * data file or index object cookie
313 * - a file will only appear in one cache
314 * - a request to cache a file may or may not be honoured, subject to
315 * constraints such as disk space
316 * - indices are created on disk just-in-time
317 */
318struct fscache_cookie {
319 atomic_t usage; /* number of users of this cookie */
320 atomic_t n_children; /* number of children of this cookie */
321 atomic_t n_active; /* number of active users of netfs ptrs */
322 spinlock_t lock;
323 spinlock_t stores_lock; /* lock on page store tree */
324 struct hlist_head backing_objects; /* object(s) backing this file/index */
325 const struct fscache_cookie_def *def; /* definition */
326 struct fscache_cookie *parent; /* parent of this entry */
327 void *netfs_data; /* back pointer to netfs */
328 struct radix_tree_root stores; /* pages to be stored on this cookie */
329#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
330#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
331
332 unsigned long flags;
333#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
334#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
335#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
336#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
337#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
338#define FSCACHE_COOKIE_RETIRED 5 /* T if cookie was retired */
339};
340
341extern struct fscache_cookie fscache_fsdef_index; 311extern struct fscache_cookie fscache_fsdef_index;
342 312
343/* 313/*
@@ -400,6 +370,7 @@ struct fscache_object {
400#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */ 370#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
401#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ 371#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
402#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ 372#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
373#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
403 374
404 struct list_head cache_link; /* link in cache->object_list */ 375 struct list_head cache_link; /* link in cache->object_list */
405 struct hlist_node cookie_link; /* link in cookie->backing_objects */ 376 struct hlist_node cookie_link; /* link in cookie->backing_objects */
@@ -511,6 +482,11 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
511 op->end_io_func(page, op->context, error); 482 op->end_io_func(page, op->context, error);
512} 483}
513 484
485static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
486{
487 atomic_inc(&cookie->n_active);
488}
489
514/** 490/**
515 * fscache_use_cookie - Request usage of cookie attached to an object 491 * fscache_use_cookie - Request usage of cookie attached to an object
516 * @object: Object description 492 * @object: Object description
@@ -524,6 +500,16 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
524 return atomic_inc_not_zero(&cookie->n_active) != 0; 500 return atomic_inc_not_zero(&cookie->n_active) != 0;
525} 501}
526 502
503static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
504{
505 return atomic_dec_and_test(&cookie->n_active);
506}
507
508static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
509{
510 wake_up_atomic_t(&cookie->n_active);
511}
512
527/** 513/**
528 * fscache_unuse_cookie - Cease usage of cookie attached to an object 514 * fscache_unuse_cookie - Cease usage of cookie attached to an object
529 * @object: Object description 515 * @object: Object description
@@ -534,8 +520,8 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
534static inline void fscache_unuse_cookie(struct fscache_object *object) 520static inline void fscache_unuse_cookie(struct fscache_object *object)
535{ 521{
536 struct fscache_cookie *cookie = object->cookie; 522 struct fscache_cookie *cookie = object->cookie;
537 if (atomic_dec_and_test(&cookie->n_active)) 523 if (__fscache_unuse_cookie(cookie))
538 wake_up_atomic_t(&cookie->n_active); 524 __fscache_wake_unused_cookie(cookie);
539} 525}
540 526
541/* 527/*
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 19b46458e4e8..115bb81912cc 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -167,6 +167,42 @@ struct fscache_netfs {
167}; 167};
168 168
169/* 169/*
170 * data file or index object cookie
171 * - a file will only appear in one cache
172 * - a request to cache a file may or may not be honoured, subject to
173 * constraints such as disk space
174 * - indices are created on disk just-in-time
175 */
176struct fscache_cookie {
177 atomic_t usage; /* number of users of this cookie */
178 atomic_t n_children; /* number of children of this cookie */
179 atomic_t n_active; /* number of active users of netfs ptrs */
180 spinlock_t lock;
181 spinlock_t stores_lock; /* lock on page store tree */
182 struct hlist_head backing_objects; /* object(s) backing this file/index */
183 const struct fscache_cookie_def *def; /* definition */
184 struct fscache_cookie *parent; /* parent of this entry */
185 void *netfs_data; /* back pointer to netfs */
186 struct radix_tree_root stores; /* pages to be stored on this cookie */
187#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
188#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
189
190 unsigned long flags;
191#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
192#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
193#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
194#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
195#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
196#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
197#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
198};
199
200static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
201{
202 return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
203}
204
205/*
170 * slow-path functions for when there is actually caching available, and the 206 * slow-path functions for when there is actually caching available, and the
171 * netfs does actually have a valid token 207 * netfs does actually have a valid token
172 * - these are not to be called directly 208 * - these are not to be called directly
@@ -181,8 +217,8 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
181extern struct fscache_cookie *__fscache_acquire_cookie( 217extern struct fscache_cookie *__fscache_acquire_cookie(
182 struct fscache_cookie *, 218 struct fscache_cookie *,
183 const struct fscache_cookie_def *, 219 const struct fscache_cookie_def *,
184 void *); 220 void *, bool);
185extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); 221extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
186extern int __fscache_check_consistency(struct fscache_cookie *); 222extern int __fscache_check_consistency(struct fscache_cookie *);
187extern void __fscache_update_cookie(struct fscache_cookie *); 223extern void __fscache_update_cookie(struct fscache_cookie *);
188extern int __fscache_attr_changed(struct fscache_cookie *); 224extern int __fscache_attr_changed(struct fscache_cookie *);
@@ -211,6 +247,9 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
211 struct inode *); 247 struct inode *);
212extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, 248extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
213 struct list_head *pages); 249 struct list_head *pages);
250extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
251extern void __fscache_enable_cookie(struct fscache_cookie *,
252 bool (*)(void *), void *);
214 253
215/** 254/**
216 * fscache_register_netfs - Register a filesystem as desiring caching services 255 * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -289,6 +328,7 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag)
289 * @def: A description of the cache object, including callback operations 328 * @def: A description of the cache object, including callback operations
290 * @netfs_data: An arbitrary piece of data to be kept in the cookie to 329 * @netfs_data: An arbitrary piece of data to be kept in the cookie to
291 * represent the cache object to the netfs 330 * represent the cache object to the netfs
331 * @enable: Whether or not to enable a data cookie immediately
292 * 332 *
293 * This function is used to inform FS-Cache about part of an index hierarchy 333 * This function is used to inform FS-Cache about part of an index hierarchy
294 * that can be used to locate files. This is done by requesting a cookie for 334 * that can be used to locate files. This is done by requesting a cookie for
@@ -301,10 +341,12 @@ static inline
301struct fscache_cookie *fscache_acquire_cookie( 341struct fscache_cookie *fscache_acquire_cookie(
302 struct fscache_cookie *parent, 342 struct fscache_cookie *parent,
303 const struct fscache_cookie_def *def, 343 const struct fscache_cookie_def *def,
304 void *netfs_data) 344 void *netfs_data,
345 bool enable)
305{ 346{
306 if (fscache_cookie_valid(parent)) 347 if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
307 return __fscache_acquire_cookie(parent, def, netfs_data); 348 return __fscache_acquire_cookie(parent, def, netfs_data,
349 enable);
308 else 350 else
309 return NULL; 351 return NULL;
310} 352}
@@ -322,7 +364,7 @@ struct fscache_cookie *fscache_acquire_cookie(
322 * description. 364 * description.
323 */ 365 */
324static inline 366static inline
325void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) 367void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
326{ 368{
327 if (fscache_cookie_valid(cookie)) 369 if (fscache_cookie_valid(cookie))
328 __fscache_relinquish_cookie(cookie, retire); 370 __fscache_relinquish_cookie(cookie, retire);
@@ -341,7 +383,7 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
341static inline 383static inline
342int fscache_check_consistency(struct fscache_cookie *cookie) 384int fscache_check_consistency(struct fscache_cookie *cookie)
343{ 385{
344 if (fscache_cookie_valid(cookie)) 386 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
345 return __fscache_check_consistency(cookie); 387 return __fscache_check_consistency(cookie);
346 else 388 else
347 return 0; 389 return 0;
@@ -360,7 +402,7 @@ int fscache_check_consistency(struct fscache_cookie *cookie)
360static inline 402static inline
361void fscache_update_cookie(struct fscache_cookie *cookie) 403void fscache_update_cookie(struct fscache_cookie *cookie)
362{ 404{
363 if (fscache_cookie_valid(cookie)) 405 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
364 __fscache_update_cookie(cookie); 406 __fscache_update_cookie(cookie);
365} 407}
366 408
@@ -407,7 +449,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie)
407static inline 449static inline
408int fscache_attr_changed(struct fscache_cookie *cookie) 450int fscache_attr_changed(struct fscache_cookie *cookie)
409{ 451{
410 if (fscache_cookie_valid(cookie)) 452 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
411 return __fscache_attr_changed(cookie); 453 return __fscache_attr_changed(cookie);
412 else 454 else
413 return -ENOBUFS; 455 return -ENOBUFS;
@@ -429,7 +471,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie)
429static inline 471static inline
430void fscache_invalidate(struct fscache_cookie *cookie) 472void fscache_invalidate(struct fscache_cookie *cookie)
431{ 473{
432 if (fscache_cookie_valid(cookie)) 474 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
433 __fscache_invalidate(cookie); 475 __fscache_invalidate(cookie);
434} 476}
435 477
@@ -503,7 +545,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
503 void *context, 545 void *context,
504 gfp_t gfp) 546 gfp_t gfp)
505{ 547{
506 if (fscache_cookie_valid(cookie)) 548 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
507 return __fscache_read_or_alloc_page(cookie, page, end_io_func, 549 return __fscache_read_or_alloc_page(cookie, page, end_io_func,
508 context, gfp); 550 context, gfp);
509 else 551 else
@@ -554,7 +596,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
554 void *context, 596 void *context,
555 gfp_t gfp) 597 gfp_t gfp)
556{ 598{
557 if (fscache_cookie_valid(cookie)) 599 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
558 return __fscache_read_or_alloc_pages(cookie, mapping, pages, 600 return __fscache_read_or_alloc_pages(cookie, mapping, pages,
559 nr_pages, end_io_func, 601 nr_pages, end_io_func,
560 context, gfp); 602 context, gfp);
@@ -585,7 +627,7 @@ int fscache_alloc_page(struct fscache_cookie *cookie,
585 struct page *page, 627 struct page *page,
586 gfp_t gfp) 628 gfp_t gfp)
587{ 629{
588 if (fscache_cookie_valid(cookie)) 630 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
589 return __fscache_alloc_page(cookie, page, gfp); 631 return __fscache_alloc_page(cookie, page, gfp);
590 else 632 else
591 return -ENOBUFS; 633 return -ENOBUFS;
@@ -634,7 +676,7 @@ int fscache_write_page(struct fscache_cookie *cookie,
634 struct page *page, 676 struct page *page,
635 gfp_t gfp) 677 gfp_t gfp)
636{ 678{
637 if (fscache_cookie_valid(cookie)) 679 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
638 return __fscache_write_page(cookie, page, gfp); 680 return __fscache_write_page(cookie, page, gfp);
639 else 681 else
640 return -ENOBUFS; 682 return -ENOBUFS;
@@ -744,4 +786,47 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
744 __fscache_uncache_all_inode_pages(cookie, inode); 786 __fscache_uncache_all_inode_pages(cookie, inode);
745} 787}
746 788
789/**
790 * fscache_disable_cookie - Disable a cookie
791 * @cookie: The cookie representing the cache object
792 * @invalidate: Invalidate the backing object
793 *
794 * Disable a cookie from accepting further alloc, read, write, invalidate,
795 * update or acquire operations. Outstanding operations can still be waited
796 * upon and pages can still be uncached and the cookie relinquished.
797 *
798 * This will not return until all outstanding operations have completed.
799 *
800 * If @invalidate is set, then the backing object will be invalidated and
801 * detached, otherwise it will just be detached.
802 */
803static inline
804void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
805{
806 if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
807 __fscache_disable_cookie(cookie, invalidate);
808}
809
810/**
811 * fscache_enable_cookie - Reenable a cookie
812 * @cookie: The cookie representing the cache object
813 * @can_enable: A function to permit enablement once lock is held
814 * @data: Data for can_enable()
815 *
816 * Reenable a previously disabled cookie, allowing it to accept further alloc,
817 * read, write, invalidate, update or acquire operations. An attempt will be
818 * made to immediately reattach the cookie to a backing object.
819 *
820 * The can_enable() function is called (if not NULL) once the enablement lock
821 * is held to rule on whether enablement is still permitted to go ahead.
822 */
823static inline
824void fscache_enable_cookie(struct fscache_cookie *cookie,
825 bool (*can_enable)(void *data),
826 void *data)
827{
828 if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
829 __fscache_enable_cookie(cookie, can_enable, data);
830}
831
747#endif /* _LINUX_FSCACHE_H */ 832#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9f15c0064c50..31ea4b428360 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -533,11 +533,11 @@ static inline int ftrace_force_update(void) { return 0; }
533static inline void ftrace_disable_daemon(void) { } 533static inline void ftrace_disable_daemon(void) { }
534static inline void ftrace_enable_daemon(void) { } 534static inline void ftrace_enable_daemon(void) { }
535static inline void ftrace_release_mod(struct module *mod) {} 535static inline void ftrace_release_mod(struct module *mod) {}
536static inline int register_ftrace_command(struct ftrace_func_command *cmd) 536static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
537{ 537{
538 return -EINVAL; 538 return -EINVAL;
539} 539}
540static inline int unregister_ftrace_command(char *cmd_name) 540static inline __init int unregister_ftrace_command(char *cmd_name)
541{ 541{
542 return -EINVAL; 542 return -EINVAL;
543} 543}
@@ -721,6 +721,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
721extern char __irqentry_text_start[]; 721extern char __irqentry_text_start[];
722extern char __irqentry_text_end[]; 722extern char __irqentry_text_end[];
723 723
724#define FTRACE_NOTRACE_DEPTH 65536
724#define FTRACE_RETFUNC_DEPTH 50 725#define FTRACE_RETFUNC_DEPTH 50
725#define FTRACE_RETSTACK_ALLOC_SIZE 32 726#define FTRACE_RETSTACK_ALLOC_SIZE 32
726extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 727extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5eaa746735ff..8c9b7a1c4138 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -202,6 +202,7 @@ enum {
202 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 202 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
203 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 203 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
204 TRACE_EVENT_FL_WAS_ENABLED_BIT, 204 TRACE_EVENT_FL_WAS_ENABLED_BIT,
205 TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
205}; 206};
206 207
207/* 208/*
@@ -213,6 +214,7 @@ enum {
213 * WAS_ENABLED - Set and stays set when an event was ever enabled 214 * WAS_ENABLED - Set and stays set when an event was ever enabled
214 * (used for module unloading, if a module event is enabled, 215 * (used for module unloading, if a module event is enabled,
215 * it is best to clear the buffers that used it). 216 * it is best to clear the buffers that used it).
217 * USE_CALL_FILTER - For ftrace internal events, don't use file filter
216 */ 218 */
217enum { 219enum {
218 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 220 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -220,6 +222,7 @@ enum {
220 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 222 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
221 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 223 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
222 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), 224 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
225 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
223}; 226};
224 227
225struct ftrace_event_call { 228struct ftrace_event_call {
@@ -238,12 +241,16 @@ struct ftrace_event_call {
238 * bit 2: failed to apply filter 241 * bit 2: failed to apply filter
239 * bit 3: ftrace internal event (do not enable) 242 * bit 3: ftrace internal event (do not enable)
240 * bit 4: Event was enabled by module 243 * bit 4: Event was enabled by module
244 * bit 5: use call filter rather than file filter
241 */ 245 */
242 int flags; /* static flags of different events */ 246 int flags; /* static flags of different events */
243 247
244#ifdef CONFIG_PERF_EVENTS 248#ifdef CONFIG_PERF_EVENTS
245 int perf_refcount; 249 int perf_refcount;
246 struct hlist_head __percpu *perf_events; 250 struct hlist_head __percpu *perf_events;
251
252 int (*perf_perm)(struct ftrace_event_call *,
253 struct perf_event *);
247#endif 254#endif
248}; 255};
249 256
@@ -253,6 +260,8 @@ struct ftrace_subsystem_dir;
253enum { 260enum {
254 FTRACE_EVENT_FL_ENABLED_BIT, 261 FTRACE_EVENT_FL_ENABLED_BIT,
255 FTRACE_EVENT_FL_RECORDED_CMD_BIT, 262 FTRACE_EVENT_FL_RECORDED_CMD_BIT,
263 FTRACE_EVENT_FL_FILTERED_BIT,
264 FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
256 FTRACE_EVENT_FL_SOFT_MODE_BIT, 265 FTRACE_EVENT_FL_SOFT_MODE_BIT,
257 FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 266 FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
258}; 267};
@@ -261,6 +270,8 @@ enum {
261 * Ftrace event file flags: 270 * Ftrace event file flags:
262 * ENABLED - The event is enabled 271 * ENABLED - The event is enabled
263 * RECORDED_CMD - The comms should be recorded at sched_switch 272 * RECORDED_CMD - The comms should be recorded at sched_switch
273 * FILTERED - The event has a filter attached
274 * NO_SET_FILTER - Set when filter has error and is to be ignored
264 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED 275 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
265 * SOFT_DISABLED - When set, do not trace the event (even though its 276 * SOFT_DISABLED - When set, do not trace the event (even though its
266 * tracepoint may be enabled) 277 * tracepoint may be enabled)
@@ -268,6 +279,8 @@ enum {
268enum { 279enum {
269 FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), 280 FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
270 FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), 281 FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
282 FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
283 FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
271 FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), 284 FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
272 FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), 285 FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
273}; 286};
@@ -275,6 +288,7 @@ enum {
275struct ftrace_event_file { 288struct ftrace_event_file {
276 struct list_head list; 289 struct list_head list;
277 struct ftrace_event_call *event_call; 290 struct ftrace_event_call *event_call;
291 struct event_filter *filter;
278 struct dentry *dir; 292 struct dentry *dir;
279 struct trace_array *tr; 293 struct trace_array *tr;
280 struct ftrace_subsystem_dir *system; 294 struct ftrace_subsystem_dir *system;
@@ -306,16 +320,33 @@ struct ftrace_event_file {
306 } \ 320 } \
307 early_initcall(trace_init_flags_##name); 321 early_initcall(trace_init_flags_##name);
308 322
323#define __TRACE_EVENT_PERF_PERM(name, expr...) \
324 static int perf_perm_##name(struct ftrace_event_call *tp_event, \
325 struct perf_event *p_event) \
326 { \
327 return ({ expr; }); \
328 } \
329 static int __init trace_init_perf_perm_##name(void) \
330 { \
331 event_##name.perf_perm = &perf_perm_##name; \
332 return 0; \
333 } \
334 early_initcall(trace_init_perf_perm_##name);
335
309#define PERF_MAX_TRACE_SIZE 2048 336#define PERF_MAX_TRACE_SIZE 2048
310 337
311#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 338#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
312 339
313extern void destroy_preds(struct ftrace_event_call *call); 340extern void destroy_preds(struct ftrace_event_file *file);
341extern void destroy_call_preds(struct ftrace_event_call *call);
314extern int filter_match_preds(struct event_filter *filter, void *rec); 342extern int filter_match_preds(struct event_filter *filter, void *rec);
315extern int filter_current_check_discard(struct ring_buffer *buffer, 343
316 struct ftrace_event_call *call, 344extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
317 void *rec, 345 struct ring_buffer *buffer,
318 struct ring_buffer_event *event); 346 struct ring_buffer_event *event);
347extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
348 struct ring_buffer *buffer,
349 struct ring_buffer_event *event);
319 350
320enum { 351enum {
321 FILTER_OTHER = 0, 352 FILTER_OTHER = 0,
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index f8d41cb1cbe0..1eda33d7cb10 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -94,6 +94,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
94} 94}
95extern void gen_pool_destroy(struct gen_pool *); 95extern void gen_pool_destroy(struct gen_pool *);
96extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); 96extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
97extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
98 dma_addr_t *dma);
97extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); 99extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
98extern void gen_pool_for_each_chunk(struct gen_pool *, 100extern void gen_pool_for_each_chunk(struct gen_pool *,
99 void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); 101 void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 023bc346b877..c0894dd8827b 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -273,49 +273,40 @@ static struct genl_family ZZZ_genl_family __read_mostly = {
273 * Magic: define multicast groups 273 * Magic: define multicast groups
274 * Magic: define multicast group registration helper 274 * Magic: define multicast group registration helper
275 */ 275 */
276#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
277static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
278#undef GENL_mc_group
279#define GENL_mc_group(group) { .name = #group, },
280#include GENL_MAGIC_INCLUDE_FILE
281};
282
283enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
284#undef GENL_mc_group
285#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
286#include GENL_MAGIC_INCLUDE_FILE
287};
288
276#undef GENL_mc_group 289#undef GENL_mc_group
277#define GENL_mc_group(group) \ 290#define GENL_mc_group(group) \
278static struct genl_multicast_group \
279CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group) __read_mostly = { \
280 .name = #group, \
281}; \
282static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ 291static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
283 struct sk_buff *skb, gfp_t flags) \ 292 struct sk_buff *skb, gfp_t flags) \
284{ \ 293{ \
285 unsigned int group_id = \ 294 unsigned int group_id = \
286 CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id; \ 295 CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
287 if (!group_id) \ 296 return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
288 return -EINVAL; \ 297 group_id, flags); \
289 return genlmsg_multicast(skb, 0, group_id, flags); \
290} 298}
291 299
292#include GENL_MAGIC_INCLUDE_FILE 300#include GENL_MAGIC_INCLUDE_FILE
293 301
294int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
295{
296 int err = genl_register_family_with_ops(&ZZZ_genl_family,
297 ZZZ_genl_ops, ARRAY_SIZE(ZZZ_genl_ops));
298 if (err)
299 return err;
300#undef GENL_mc_group
301#define GENL_mc_group(group) \
302 err = genl_register_mc_group(&ZZZ_genl_family, \
303 &CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group)); \
304 if (err) \
305 goto fail; \
306 else \
307 pr_info("%s: mcg %s: %u\n", #group, \
308 __stringify(GENL_MAGIC_FAMILY), \
309 CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id);
310
311#include GENL_MAGIC_INCLUDE_FILE
312
313#undef GENL_mc_group 302#undef GENL_mc_group
314#define GENL_mc_group(group) 303#define GENL_mc_group(group)
315 return 0; 304
316fail: 305int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
317 genl_unregister_family(&ZZZ_genl_family); 306{
318 return err; 307 return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
308 ZZZ_genl_ops, \
309 ZZZ_genl_mcgrps);
319} 310}
320 311
321void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) 312void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 552e3f46e4a3..13dfd24d01ab 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -16,14 +16,17 @@
16#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) 16#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
17#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) 17#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
18 18
19/* Gpio pin is active-low */
20#define GPIOF_ACTIVE_LOW (1 << 2)
21
19/* Gpio pin is open drain */ 22/* Gpio pin is open drain */
20#define GPIOF_OPEN_DRAIN (1 << 2) 23#define GPIOF_OPEN_DRAIN (1 << 3)
21 24
22/* Gpio pin is open source */ 25/* Gpio pin is open source */
23#define GPIOF_OPEN_SOURCE (1 << 3) 26#define GPIOF_OPEN_SOURCE (1 << 4)
24 27
25#define GPIOF_EXPORT (1 << 4) 28#define GPIOF_EXPORT (1 << 5)
26#define GPIOF_EXPORT_CHANGEABLE (1 << 5) 29#define GPIOF_EXPORT_CHANGEABLE (1 << 6)
27#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT) 30#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
28#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE) 31#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
29 32
@@ -74,12 +77,22 @@ static inline int irq_to_gpio(unsigned int irq)
74 77
75#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */ 78#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
76 79
80/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
81
82struct device;
83
84int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
85int devm_gpio_request_one(struct device *dev, unsigned gpio,
86 unsigned long flags, const char *label);
87void devm_gpio_free(struct device *dev, unsigned int gpio);
88
77#else /* ! CONFIG_GPIOLIB */ 89#else /* ! CONFIG_GPIOLIB */
78 90
79#include <linux/kernel.h> 91#include <linux/kernel.h>
80#include <linux/types.h> 92#include <linux/types.h>
81#include <linux/errno.h> 93#include <linux/errno.h>
82#include <linux/bug.h> 94#include <linux/bug.h>
95#include <linux/pinctrl/pinctrl.h>
83 96
84struct device; 97struct device;
85struct gpio_chip; 98struct gpio_chip;
@@ -204,6 +217,18 @@ static inline int gpio_to_irq(unsigned gpio)
204 return -EINVAL; 217 return -EINVAL;
205} 218}
206 219
220static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
221{
222 WARN_ON(1);
223 return -EINVAL;
224}
225
226static inline void gpio_unlock_as_irq(struct gpio_chip *chip,
227 unsigned int offset)
228{
229 WARN_ON(1);
230}
231
207static inline int irq_to_gpio(unsigned irq) 232static inline int irq_to_gpio(unsigned irq)
208{ 233{
209 /* irq can never have been returned from gpio_to_irq() */ 234 /* irq can never have been returned from gpio_to_irq() */
@@ -220,20 +245,40 @@ gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
220 return -EINVAL; 245 return -EINVAL;
221} 246}
222 247
248static inline int
249gpiochip_add_pingroup_range(struct gpio_chip *chip,
250 struct pinctrl_dev *pctldev,
251 unsigned int gpio_offset, const char *pin_group)
252{
253 WARN_ON(1);
254 return -EINVAL;
255}
256
223static inline void 257static inline void
224gpiochip_remove_pin_ranges(struct gpio_chip *chip) 258gpiochip_remove_pin_ranges(struct gpio_chip *chip)
225{ 259{
226 WARN_ON(1); 260 WARN_ON(1);
227} 261}
228 262
229#endif /* ! CONFIG_GPIOLIB */ 263static inline int devm_gpio_request(struct device *dev, unsigned gpio,
264 const char *label)
265{
266 WARN_ON(1);
267 return -EINVAL;
268}
230 269
231struct device; 270static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
271 unsigned long flags, const char *label)
272{
273 WARN_ON(1);
274 return -EINVAL;
275}
232 276
233/* bindings for managed devices that want to request gpios */ 277static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
234int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); 278{
235int devm_gpio_request_one(struct device *dev, unsigned gpio, 279 WARN_ON(1);
236 unsigned long flags, const char *label); 280}
237void devm_gpio_free(struct device *dev, unsigned int gpio); 281
282#endif /* ! CONFIG_GPIOLIB */
238 283
239#endif /* __LINUX_GPIO_H */ 284#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
new file mode 100644
index 000000000000..4d34dbbbad4d
--- /dev/null
+++ b/include/linux/gpio/consumer.h
@@ -0,0 +1,253 @@
1#ifndef __LINUX_GPIO_CONSUMER_H
2#define __LINUX_GPIO_CONSUMER_H
3
4#include <linux/err.h>
5#include <linux/kernel.h>
6
7#ifdef CONFIG_GPIOLIB
8
9struct device;
10struct gpio_chip;
11
12/**
13 * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
14 * preferable to the old integer-based handles.
15 *
16 * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
17 * until the GPIO is released.
18 */
19struct gpio_desc;
20
21/* Acquire and dispose GPIOs */
22struct gpio_desc *__must_check gpiod_get(struct device *dev,
23 const char *con_id);
24struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
25 const char *con_id,
26 unsigned int idx);
27void gpiod_put(struct gpio_desc *desc);
28
29struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
30 const char *con_id);
31struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
32 const char *con_id,
33 unsigned int idx);
34void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
35
36int gpiod_get_direction(const struct gpio_desc *desc);
37int gpiod_direction_input(struct gpio_desc *desc);
38int gpiod_direction_output(struct gpio_desc *desc, int value);
39
40/* Value get/set from non-sleeping context */
41int gpiod_get_value(const struct gpio_desc *desc);
42void gpiod_set_value(struct gpio_desc *desc, int value);
43int gpiod_get_raw_value(const struct gpio_desc *desc);
44void gpiod_set_raw_value(struct gpio_desc *desc, int value);
45
46/* Value get/set from sleeping context */
47int gpiod_get_value_cansleep(const struct gpio_desc *desc);
48void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
49int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
50void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
51
52int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
53
54int gpiod_is_active_low(const struct gpio_desc *desc);
55int gpiod_cansleep(const struct gpio_desc *desc);
56
57int gpiod_to_irq(const struct gpio_desc *desc);
58
59/* Convert between the old gpio_ and new gpiod_ interfaces */
60struct gpio_desc *gpio_to_desc(unsigned gpio);
61int desc_to_gpio(const struct gpio_desc *desc);
62struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
63
64#else /* CONFIG_GPIOLIB */
65
66static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
67 const char *con_id)
68{
69 return ERR_PTR(-ENOSYS);
70}
71static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
72 const char *con_id,
73 unsigned int idx)
74{
75 return ERR_PTR(-ENOSYS);
76}
77static inline void gpiod_put(struct gpio_desc *desc)
78{
79 might_sleep();
80
81 /* GPIO can never have been requested */
82 WARN_ON(1);
83}
84
85static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
86 const char *con_id)
87{
88 return ERR_PTR(-ENOSYS);
89}
90static inline
91struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
92 const char *con_id,
93 unsigned int idx)
94{
95 return ERR_PTR(-ENOSYS);
96}
97static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
98{
99 might_sleep();
100
101 /* GPIO can never have been requested */
102 WARN_ON(1);
103}
104
105
106static inline int gpiod_get_direction(const struct gpio_desc *desc)
107{
108 /* GPIO can never have been requested */
109 WARN_ON(1);
110 return -ENOSYS;
111}
112static inline int gpiod_direction_input(struct gpio_desc *desc)
113{
114 /* GPIO can never have been requested */
115 WARN_ON(1);
116 return -ENOSYS;
117}
118static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
119{
120 /* GPIO can never have been requested */
121 WARN_ON(1);
122 return -ENOSYS;
123}
124
125
126static inline int gpiod_get_value(const struct gpio_desc *desc)
127{
128 /* GPIO can never have been requested */
129 WARN_ON(1);
130 return 0;
131}
132static inline void gpiod_set_value(struct gpio_desc *desc, int value)
133{
134 /* GPIO can never have been requested */
135 WARN_ON(1);
136}
137static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
138{
139 /* GPIO can never have been requested */
140 WARN_ON(1);
141 return 0;
142}
143static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
144{
145 /* GPIO can never have been requested */
146 WARN_ON(1);
147}
148
149static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
150{
151 /* GPIO can never have been requested */
152 WARN_ON(1);
153 return 0;
154}
155static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
156{
157 /* GPIO can never have been requested */
158 WARN_ON(1);
159}
160static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
161{
162 /* GPIO can never have been requested */
163 WARN_ON(1);
164 return 0;
165}
166static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
167 int value)
168{
169 /* GPIO can never have been requested */
170 WARN_ON(1);
171}
172
173static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
174{
175 /* GPIO can never have been requested */
176 WARN_ON(1);
177 return -ENOSYS;
178}
179
180static inline int gpiod_is_active_low(const struct gpio_desc *desc)
181{
182 /* GPIO can never have been requested */
183 WARN_ON(1);
184 return 0;
185}
186static inline int gpiod_cansleep(const struct gpio_desc *desc)
187{
188 /* GPIO can never have been requested */
189 WARN_ON(1);
190 return 0;
191}
192
193static inline int gpiod_to_irq(const struct gpio_desc *desc)
194{
195 /* GPIO can never have been requested */
196 WARN_ON(1);
197 return -EINVAL;
198}
199
200static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
201{
202 return ERR_PTR(-EINVAL);
203}
204static inline int desc_to_gpio(const struct gpio_desc *desc)
205{
206 /* GPIO can never have been requested */
207 WARN_ON(1);
208 return -EINVAL;
209}
210static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
211{
212 /* GPIO can never have been requested */
213 WARN_ON(1);
214 return ERR_PTR(-ENODEV);
215}
216
217
218#endif /* CONFIG_GPIOLIB */
219
220#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
221
222int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
223int gpiod_export_link(struct device *dev, const char *name,
224 struct gpio_desc *desc);
225int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
226void gpiod_unexport(struct gpio_desc *desc);
227
228#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
229
230static inline int gpiod_export(struct gpio_desc *desc,
231 bool direction_may_change)
232{
233 return -ENOSYS;
234}
235
236static inline int gpiod_export_link(struct device *dev, const char *name,
237 struct gpio_desc *desc)
238{
239 return -ENOSYS;
240}
241
242static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
243{
244 return -ENOSYS;
245}
246
247static inline void gpiod_unexport(struct gpio_desc *desc)
248{
249}
250
251#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
252
253#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
new file mode 100644
index 000000000000..3ea2cf6b0e6c
--- /dev/null
+++ b/include/linux/gpio/driver.h
@@ -0,0 +1,194 @@
1#ifndef __LINUX_GPIO_DRIVER_H
2#define __LINUX_GPIO_DRIVER_H
3
4#include <linux/types.h>
5#include <linux/module.h>
6
7struct device;
8struct gpio_desc;
9struct of_phandle_args;
10struct device_node;
11struct seq_file;
12
13/**
14 * struct gpio_chip - abstract a GPIO controller
15 * @label: for diagnostics
16 * @dev: optional device providing the GPIOs
17 * @owner: helps prevent removal of modules exporting active GPIOs
18 * @list: links gpio_chips together for traversal
19 * @request: optional hook for chip-specific activation, such as
20 * enabling module power and clock; may sleep
21 * @free: optional hook for chip-specific deactivation, such as
22 * disabling module power and clock; may sleep
23 * @get_direction: returns direction for signal "offset", 0=out, 1=in,
24 * (same as GPIOF_DIR_XXX), or negative error
25 * @direction_input: configures signal "offset" as input, or returns error
26 * @direction_output: configures signal "offset" as output, or returns error
27 * @get: returns value for signal "offset"; for output signals this
28 * returns either the value actually sensed, or zero
29 * @set: assigns output value for signal "offset"
30 * @set_debounce: optional hook for setting debounce time for specified gpio in
31 * interrupt triggered gpio chips
32 * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
33 * implementation may not sleep
34 * @dbg_show: optional routine to show contents in debugfs; default code
35 * will be used when this is omitted, but custom code can show extra
36 * state (such as pullup/pulldown configuration).
37 * @base: identifies the first GPIO number handled by this chip; or, if
38 * negative during registration, requests dynamic ID allocation.
39 * @ngpio: the number of GPIOs handled by this controller; the last GPIO
40 * handled is (base + ngpio - 1).
41 * @desc: array of ngpio descriptors. Private.
42 * @can_sleep: flag must be set iff get()/set() methods sleep, as they
43 * must while accessing GPIO expander chips over I2C or SPI
44 * @names: if set, must be an array of strings to use as alternative
45 * names for the GPIOs in this chip. Any entry in the array
46 * may be NULL if there is no alias for the GPIO, however the
47 * array must be @ngpio entries long. A name can include a single printk
48 * format specifier for an unsigned int. It is substituted by the actual
49 * number of the gpio.
50 *
51 * A gpio_chip can help platforms abstract various sources of GPIOs so
52 * they can all be accessed through a common programing interface.
53 * Example sources would be SOC controllers, FPGAs, multifunction
54 * chips, dedicated GPIO expanders, and so on.
55 *
56 * Each chip controls a number of signals, identified in method calls
57 * by "offset" values in the range 0..(@ngpio - 1). When those signals
58 * are referenced through calls like gpio_get_value(gpio), the offset
59 * is calculated by subtracting @base from the gpio number.
60 */
61struct gpio_chip {
62 const char *label;
63 struct device *dev;
64 struct module *owner;
65 struct list_head list;
66
67 int (*request)(struct gpio_chip *chip,
68 unsigned offset);
69 void (*free)(struct gpio_chip *chip,
70 unsigned offset);
71 int (*get_direction)(struct gpio_chip *chip,
72 unsigned offset);
73 int (*direction_input)(struct gpio_chip *chip,
74 unsigned offset);
75 int (*direction_output)(struct gpio_chip *chip,
76 unsigned offset, int value);
77 int (*get)(struct gpio_chip *chip,
78 unsigned offset);
79 void (*set)(struct gpio_chip *chip,
80 unsigned offset, int value);
81 int (*set_debounce)(struct gpio_chip *chip,
82 unsigned offset,
83 unsigned debounce);
84
85 int (*to_irq)(struct gpio_chip *chip,
86 unsigned offset);
87
88 void (*dbg_show)(struct seq_file *s,
89 struct gpio_chip *chip);
90 int base;
91 u16 ngpio;
92 struct gpio_desc *desc;
93 const char *const *names;
94 unsigned can_sleep:1;
95 unsigned exported:1;
96
97#if defined(CONFIG_OF_GPIO)
98 /*
99 * If CONFIG_OF is enabled, then all GPIO controllers described in the
100 * device tree automatically may have an OF translation
101 */
102 struct device_node *of_node;
103 int of_gpio_n_cells;
104 int (*of_xlate)(struct gpio_chip *gc,
105 const struct of_phandle_args *gpiospec, u32 *flags);
106#endif
107#ifdef CONFIG_PINCTRL
108 /*
109 * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally
110 * describe the actual pin range which they serve in an SoC. This
111 * information would be used by pinctrl subsystem to configure
112 * corresponding pins for gpio usage.
113 */
114 struct list_head pin_ranges;
115#endif
116};
117
118extern const char *gpiochip_is_requested(struct gpio_chip *chip,
119 unsigned offset);
120
121/* add/remove chips */
122extern int gpiochip_add(struct gpio_chip *chip);
123extern int __must_check gpiochip_remove(struct gpio_chip *chip);
124extern struct gpio_chip *gpiochip_find(void *data,
125 int (*match)(struct gpio_chip *chip, void *data));
126
127/* lock/unlock as IRQ */
128int gpiod_lock_as_irq(struct gpio_desc *desc);
129void gpiod_unlock_as_irq(struct gpio_desc *desc);
130
131enum gpio_lookup_flags {
132 GPIO_ACTIVE_HIGH = (0 << 0),
133 GPIO_ACTIVE_LOW = (1 << 0),
134 GPIO_OPEN_DRAIN = (1 << 1),
135 GPIO_OPEN_SOURCE = (1 << 2),
136};
137
138/**
139 * Lookup table for associating GPIOs to specific devices and functions using
140 * platform data.
141 */
142struct gpiod_lookup {
143 struct list_head list;
144 /*
145 * name of the chip the GPIO belongs to
146 */
147 const char *chip_label;
148 /*
149 * hardware number (i.e. relative to the chip) of the GPIO
150 */
151 u16 chip_hwnum;
152 /*
153 * name of device that can claim this GPIO
154 */
155 const char *dev_id;
156 /*
157 * name of the GPIO from the device's point of view
158 */
159 const char *con_id;
160 /*
161 * index of the GPIO in case several GPIOs share the same name
162 */
163 unsigned int idx;
164 /*
165 * mask of GPIO_* values
166 */
167 enum gpio_lookup_flags flags;
168};
169
170/*
171 * Simple definition of a single GPIO under a con_id
172 */
173#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _dev_id, _con_id, _flags) \
174 GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _dev_id, _con_id, 0, _flags)
175
176/*
177 * Use this macro if you need to have several GPIOs under the same con_id.
178 * Each GPIO needs to use a different index and can be accessed using
179 * gpiod_get_index()
180 */
181#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _dev_id, _con_id, _idx, \
182 _flags) \
183{ \
184 .chip_label = _chip_label, \
185 .chip_hwnum = _chip_hwnum, \
186 .dev_id = _dev_id, \
187 .con_id = _con_id, \
188 .idx = _idx, \
189 .flags = _flags, \
190}
191
192void gpiod_add_table(struct gpiod_lookup *table, size_t size);
193
194#endif
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 1e041063b226..d9cf963ac832 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -33,7 +33,7 @@ extern void rcu_nmi_exit(void);
33#define __irq_enter() \ 33#define __irq_enter() \
34 do { \ 34 do { \
35 account_irq_enter_time(current); \ 35 account_irq_enter_time(current); \
36 add_preempt_count(HARDIRQ_OFFSET); \ 36 preempt_count_add(HARDIRQ_OFFSET); \
37 trace_hardirq_enter(); \ 37 trace_hardirq_enter(); \
38 } while (0) 38 } while (0)
39 39
@@ -49,7 +49,7 @@ extern void irq_enter(void);
49 do { \ 49 do { \
50 trace_hardirq_exit(); \ 50 trace_hardirq_exit(); \
51 account_irq_exit_time(current); \ 51 account_irq_exit_time(current); \
52 sub_preempt_count(HARDIRQ_OFFSET); \ 52 preempt_count_sub(HARDIRQ_OFFSET); \
53 } while (0) 53 } while (0)
54 54
55/* 55/*
@@ -62,7 +62,7 @@ extern void irq_exit(void);
62 lockdep_off(); \ 62 lockdep_off(); \
63 ftrace_nmi_enter(); \ 63 ftrace_nmi_enter(); \
64 BUG_ON(in_nmi()); \ 64 BUG_ON(in_nmi()); \
65 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 65 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
66 rcu_nmi_enter(); \ 66 rcu_nmi_enter(); \
67 trace_hardirq_enter(); \ 67 trace_hardirq_enter(); \
68 } while (0) 68 } while (0)
@@ -72,7 +72,7 @@ extern void irq_exit(void);
72 trace_hardirq_exit(); \ 72 trace_hardirq_exit(); \
73 rcu_nmi_exit(); \ 73 rcu_nmi_exit(); \
74 BUG_ON(!in_nmi()); \ 74 BUG_ON(!in_nmi()); \
75 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 75 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
76 ftrace_nmi_exit(); \ 76 ftrace_nmi_exit(); \
77 lockdep_on(); \ 77 lockdep_on(); \
78 } while (0) 78 } while (0)
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index a9df51f5d54c..519b6e2d769e 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -174,6 +174,21 @@ static inline void hash_del_rcu(struct hlist_node *node)
174 member) 174 member)
175 175
176/** 176/**
177 * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
178 * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
179 * @name: hashtable to iterate
180 * @obj: the type * to use as a loop cursor for each entry
181 * @member: the name of the hlist_node within the struct
182 * @key: the key of the objects to iterate over
183 *
184 * This is the same as hash_for_each_possible_rcu() except that it does
185 * not do any RCU debugging or tracing.
186 */
187#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
188 hlist_for_each_entry_rcu_notrace(obj, \
189 &name[hash_min(key, HASH_BITS(name))], member)
190
191/**
177 * hash_for_each_possible_safe - iterate over all possible objects hashing to the 192 * hash_for_each_possible_safe - iterate over all possible objects hashing to the
178 * same bucket safe against removals 193 * same bucket safe against removals
179 * @name: hashtable to iterate 194 * @name: hashtable to iterate
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 32ba45158d39..b914ca3f57ba 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -21,6 +21,8 @@
21 21
22#include <linux/hid.h> 22#include <linux/hid.h>
23#include <linux/hid-sensor-ids.h> 23#include <linux/hid-sensor-ids.h>
24#include <linux/iio/iio.h>
25#include <linux/iio/trigger.h>
24 26
25/** 27/**
26 * struct hid_sensor_hub_attribute_info - Attribute info 28 * struct hid_sensor_hub_attribute_info - Attribute info
@@ -40,6 +42,8 @@ struct hid_sensor_hub_attribute_info {
40 s32 units; 42 s32 units;
41 s32 unit_expo; 43 s32 unit_expo;
42 s32 size; 44 s32 size;
45 s32 logical_minimum;
46 s32 logical_maximum;
43}; 47};
44 48
45/** 49/**
@@ -47,11 +51,13 @@ struct hid_sensor_hub_attribute_info {
47 * @hdev: Stores the hid instance. 51 * @hdev: Stores the hid instance.
48 * @vendor_id: Vendor id of hub device. 52 * @vendor_id: Vendor id of hub device.
49 * @product_id: Product id of hub device. 53 * @product_id: Product id of hub device.
54 * @ref_cnt: Number of MFD clients have opened this device
50 */ 55 */
51struct hid_sensor_hub_device { 56struct hid_sensor_hub_device {
52 struct hid_device *hdev; 57 struct hid_device *hdev;
53 u32 vendor_id; 58 u32 vendor_id;
54 u32 product_id; 59 u32 product_id;
60 int ref_cnt;
55}; 61};
56 62
57/** 63/**
@@ -74,6 +80,22 @@ struct hid_sensor_hub_callbacks {
74 void *priv); 80 void *priv);
75}; 81};
76 82
83/**
84* sensor_hub_device_open() - Open hub device
85* @hsdev: Hub device instance.
86*
87* Used to open hid device for sensor hub.
88*/
89int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev);
90
91/**
92* sensor_hub_device_clode() - Close hub device
93* @hsdev: Hub device instance.
94*
95* Used to clode hid device for sensor hub.
96*/
97void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev);
98
77/* Registration functions */ 99/* Registration functions */
78 100
79/** 101/**
@@ -166,6 +188,7 @@ struct hid_sensor_common {
166 struct platform_device *pdev; 188 struct platform_device *pdev;
167 unsigned usage_id; 189 unsigned usage_id;
168 bool data_ready; 190 bool data_ready;
191 struct iio_trigger *trigger;
169 struct hid_sensor_hub_attribute_info poll; 192 struct hid_sensor_hub_attribute_info poll;
170 struct hid_sensor_hub_attribute_info report_state; 193 struct hid_sensor_hub_attribute_info report_state;
171 struct hid_sensor_hub_attribute_info power_state; 194 struct hid_sensor_hub_attribute_info power_state;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 4f945d3ed49f..8323775ac21d 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -117,4 +117,16 @@
117#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 117#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
118#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 118#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
119 119
120/* Power state enumerations */
121#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00
122#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01
123#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x02
124#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x03
125#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x04
126#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x05
127
128/* Report State enumerations */
129#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x00
130#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x01
131
120#endif 132#endif
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index f148e4908410..8ec23fb0b412 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -31,11 +31,11 @@ struct hippi_cb {
31 __u32 ifield; 31 __u32 ifield;
32}; 32};
33 33
34extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); 34__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
35extern int hippi_change_mtu(struct net_device *dev, int new_mtu); 35int hippi_change_mtu(struct net_device *dev, int new_mtu);
36extern int hippi_mac_addr(struct net_device *dev, void *p); 36int hippi_mac_addr(struct net_device *dev, void *p);
37extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); 37int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
38extern struct net_device *alloc_hippi_dev(int sizeof_priv); 38struct net_device *alloc_hippi_dev(int sizeof_priv);
39#endif 39#endif
40 40
41#endif /* _LINUX_HIPPIDEVICE_H */ 41#endif /* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644
index 000000000000..f5b9b87ac9a9
--- /dev/null
+++ b/include/linux/host1x.h
@@ -0,0 +1,284 @@
1/*
2 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __LINUX_HOST1X_H
20#define __LINUX_HOST1X_H
21
22#include <linux/device.h>
23#include <linux/types.h>
24
25enum host1x_class {
26 HOST1X_CLASS_HOST1X = 0x1,
27 HOST1X_CLASS_GR2D = 0x51,
28 HOST1X_CLASS_GR2D_SB = 0x52,
29 HOST1X_CLASS_GR3D = 0x60,
30};
31
32struct host1x_client;
33
34struct host1x_client_ops {
35 int (*init)(struct host1x_client *client);
36 int (*exit)(struct host1x_client *client);
37};
38
39struct host1x_client {
40 struct list_head list;
41 struct device *parent;
42 struct device *dev;
43
44 const struct host1x_client_ops *ops;
45
46 enum host1x_class class;
47 struct host1x_channel *channel;
48
49 struct host1x_syncpt **syncpts;
50 unsigned int num_syncpts;
51};
52
53/*
54 * host1x buffer objects
55 */
56
57struct host1x_bo;
58struct sg_table;
59
60struct host1x_bo_ops {
61 struct host1x_bo *(*get)(struct host1x_bo *bo);
62 void (*put)(struct host1x_bo *bo);
63 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
64 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
65 void *(*mmap)(struct host1x_bo *bo);
66 void (*munmap)(struct host1x_bo *bo, void *addr);
67 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
68 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
69};
70
71struct host1x_bo {
72 const struct host1x_bo_ops *ops;
73};
74
75static inline void host1x_bo_init(struct host1x_bo *bo,
76 const struct host1x_bo_ops *ops)
77{
78 bo->ops = ops;
79}
80
81static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
82{
83 return bo->ops->get(bo);
84}
85
86static inline void host1x_bo_put(struct host1x_bo *bo)
87{
88 bo->ops->put(bo);
89}
90
91static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
92 struct sg_table **sgt)
93{
94 return bo->ops->pin(bo, sgt);
95}
96
97static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
98{
99 bo->ops->unpin(bo, sgt);
100}
101
102static inline void *host1x_bo_mmap(struct host1x_bo *bo)
103{
104 return bo->ops->mmap(bo);
105}
106
107static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
108{
109 bo->ops->munmap(bo, addr);
110}
111
112static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
113{
114 return bo->ops->kmap(bo, pagenum);
115}
116
117static inline void host1x_bo_kunmap(struct host1x_bo *bo,
118 unsigned int pagenum, void *addr)
119{
120 bo->ops->kunmap(bo, pagenum, addr);
121}
122
123/*
124 * host1x syncpoints
125 */
126
127#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
128#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
129
130struct host1x_syncpt_base;
131struct host1x_syncpt;
132struct host1x;
133
134struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
135u32 host1x_syncpt_id(struct host1x_syncpt *sp);
136u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
137u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
138int host1x_syncpt_incr(struct host1x_syncpt *sp);
139int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
140 u32 *value);
141struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
142 unsigned long flags);
143void host1x_syncpt_free(struct host1x_syncpt *sp);
144
145struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
146u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
147
148/*
149 * host1x channel
150 */
151
152struct host1x_channel;
153struct host1x_job;
154
155struct host1x_channel *host1x_channel_request(struct device *dev);
156void host1x_channel_free(struct host1x_channel *channel);
157struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
158void host1x_channel_put(struct host1x_channel *channel);
159int host1x_job_submit(struct host1x_job *job);
160
161/*
162 * host1x job
163 */
164
165struct host1x_reloc {
166 struct host1x_bo *cmdbuf;
167 u32 cmdbuf_offset;
168 struct host1x_bo *target;
169 u32 target_offset;
170 u32 shift;
171 u32 pad;
172};
173
174struct host1x_job {
175 /* When refcount goes to zero, job can be freed */
176 struct kref ref;
177
178 /* List entry */
179 struct list_head list;
180
181 /* Channel where job is submitted to */
182 struct host1x_channel *channel;
183
184 u32 client;
185
186 /* Gathers and their memory */
187 struct host1x_job_gather *gathers;
188 unsigned int num_gathers;
189
190 /* Wait checks to be processed at submit time */
191 struct host1x_waitchk *waitchk;
192 unsigned int num_waitchk;
193 u32 waitchk_mask;
194
195 /* Array of handles to be pinned & unpinned */
196 struct host1x_reloc *relocarray;
197 unsigned int num_relocs;
198 struct host1x_job_unpin_data *unpins;
199 unsigned int num_unpins;
200
201 dma_addr_t *addr_phys;
202 dma_addr_t *gather_addr_phys;
203 dma_addr_t *reloc_addr_phys;
204
205 /* Sync point id, number of increments and end related to the submit */
206 u32 syncpt_id;
207 u32 syncpt_incrs;
208 u32 syncpt_end;
209
210 /* Maximum time to wait for this job */
211 unsigned int timeout;
212
213 /* Index and number of slots used in the push buffer */
214 unsigned int first_get;
215 unsigned int num_slots;
216
217 /* Copy of gathers */
218 size_t gather_copy_size;
219 dma_addr_t gather_copy;
220 u8 *gather_copy_mapped;
221
222 /* Check if register is marked as an address reg */
223 int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
224
225 /* Request a SETCLASS to this class */
226 u32 class;
227
228 /* Add a channel wait for previous ops to complete */
229 bool serialize;
230};
231
232struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
233 u32 num_cmdbufs, u32 num_relocs,
234 u32 num_waitchks);
235void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
236 u32 words, u32 offset);
237struct host1x_job *host1x_job_get(struct host1x_job *job);
238void host1x_job_put(struct host1x_job *job);
239int host1x_job_pin(struct host1x_job *job, struct device *dev);
240void host1x_job_unpin(struct host1x_job *job);
241
242/*
243 * subdevice probe infrastructure
244 */
245
246struct host1x_device;
247
248struct host1x_driver {
249 const struct of_device_id *subdevs;
250 struct list_head list;
251 const char *name;
252
253 int (*probe)(struct host1x_device *device);
254 int (*remove)(struct host1x_device *device);
255};
256
257int host1x_driver_register(struct host1x_driver *driver);
258void host1x_driver_unregister(struct host1x_driver *driver);
259
260struct host1x_device {
261 struct host1x_driver *driver;
262 struct list_head list;
263 struct device dev;
264
265 struct mutex subdevs_lock;
266 struct list_head subdevs;
267 struct list_head active;
268
269 struct mutex clients_lock;
270 struct list_head clients;
271};
272
273static inline struct host1x_device *to_host1x_device(struct device *dev)
274{
275 return container_of(dev, struct host1x_device, dev);
276}
277
278int host1x_device_init(struct host1x_device *device);
279int host1x_device_exit(struct host1x_device *device);
280
281int host1x_client_register(struct host1x_client *client);
282int host1x_client_unregister(struct host1x_client *client);
283
284#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3935428c57cf..91672e2deec3 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -54,7 +54,8 @@ enum page_check_address_pmd_flag {
54extern pmd_t *page_check_address_pmd(struct page *page, 54extern pmd_t *page_check_address_pmd(struct page *page,
55 struct mm_struct *mm, 55 struct mm_struct *mm,
56 unsigned long address, 56 unsigned long address,
57 enum page_check_address_pmd_flag flag); 57 enum page_check_address_pmd_flag flag,
58 spinlock_t **ptl);
58 59
59#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 60#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
60#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 61#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
@@ -129,15 +130,15 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
129 unsigned long start, 130 unsigned long start,
130 unsigned long end, 131 unsigned long end,
131 long adjust_next); 132 long adjust_next);
132extern int __pmd_trans_huge_lock(pmd_t *pmd, 133extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
133 struct vm_area_struct *vma); 134 spinlock_t **ptl);
134/* mmap_sem must be held on entry */ 135/* mmap_sem must be held on entry */
135static inline int pmd_trans_huge_lock(pmd_t *pmd, 136static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
136 struct vm_area_struct *vma) 137 spinlock_t **ptl)
137{ 138{
138 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); 139 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
139 if (pmd_trans_huge(*pmd)) 140 if (pmd_trans_huge(*pmd))
140 return __pmd_trans_huge_lock(pmd, vma); 141 return __pmd_trans_huge_lock(pmd, vma, ptl);
141 else 142 else
142 return 0; 143 return 0;
143} 144}
@@ -215,8 +216,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
215 long adjust_next) 216 long adjust_next)
216{ 217{
217} 218}
218static inline int pmd_trans_huge_lock(pmd_t *pmd, 219static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
219 struct vm_area_struct *vma) 220 spinlock_t **ptl)
220{ 221{
221 return 0; 222 return 0;
222} 223}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0393270466c3..bd7e98752222 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
31void hugepage_put_subpool(struct hugepage_subpool *spool); 31void hugepage_put_subpool(struct hugepage_subpool *spool);
32 32
33int PageHuge(struct page *page); 33int PageHuge(struct page *page);
34int PageHeadHuge(struct page *page_head);
34 35
35void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 36void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
36int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 37int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
69bool isolate_huge_page(struct page *page, struct list_head *list); 70bool isolate_huge_page(struct page *page, struct list_head *list);
70void putback_active_hugepage(struct page *page); 71void putback_active_hugepage(struct page *page);
71bool is_hugepage_active(struct page *page); 72bool is_hugepage_active(struct page *page);
72void copy_huge_page(struct page *dst, struct page *src);
73 73
74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page)
104 return 0; 104 return 0;
105} 105}
106 106
107static inline int PageHeadHuge(struct page *page_head)
108{
109 return 0;
110}
111
107static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 112static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
108{ 113{
109} 114}
@@ -137,12 +142,12 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
137 return 0; 142 return 0;
138} 143}
139 144
140#define isolate_huge_page(p, l) false 145static inline bool isolate_huge_page(struct page *page, struct list_head *list)
141#define putback_active_hugepage(p) do {} while (0)
142#define is_hugepage_active(x) false
143static inline void copy_huge_page(struct page *dst, struct page *src)
144{ 146{
147 return false;
145} 148}
149#define putback_active_hugepage(p) do {} while (0)
150#define is_hugepage_active(x) false
146 151
147static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 152static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
148 unsigned long address, unsigned long end, pgprot_t newprot) 153 unsigned long address, unsigned long end, pgprot_t newprot)
@@ -392,6 +397,15 @@ static inline int hugepage_migration_support(struct hstate *h)
392 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); 397 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
393} 398}
394 399
400static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
401 struct mm_struct *mm, pte_t *pte)
402{
403 if (huge_page_size(h) == PMD_SIZE)
404 return pmd_lockptr(mm, (pmd_t *) pte);
405 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
406 return &mm->page_table_lock;
407}
408
395#else /* CONFIG_HUGETLB_PAGE */ 409#else /* CONFIG_HUGETLB_PAGE */
396struct hstate {}; 410struct hstate {};
397#define alloc_huge_page_node(h, nid) NULL 411#define alloc_huge_page_node(h, nid) NULL
@@ -401,6 +415,7 @@ struct hstate {};
401#define hstate_sizelog(s) NULL 415#define hstate_sizelog(s) NULL
402#define hstate_vma(v) NULL 416#define hstate_vma(v) NULL
403#define hstate_inode(i) NULL 417#define hstate_inode(i) NULL
418#define page_hstate(page) NULL
404#define huge_page_size(h) PAGE_SIZE 419#define huge_page_size(h) PAGE_SIZE
405#define huge_page_mask(h) PAGE_MASK 420#define huge_page_mask(h) PAGE_MASK
406#define vma_kernel_pagesize(v) PAGE_SIZE 421#define vma_kernel_pagesize(v) PAGE_SIZE
@@ -421,6 +436,22 @@ static inline pgoff_t basepage_index(struct page *page)
421#define dissolve_free_huge_pages(s, e) do {} while (0) 436#define dissolve_free_huge_pages(s, e) do {} while (0)
422#define pmd_huge_support() 0 437#define pmd_huge_support() 0
423#define hugepage_migration_support(h) 0 438#define hugepage_migration_support(h) 0
439
440static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
441 struct mm_struct *mm, pte_t *pte)
442{
443 return &mm->page_table_lock;
444}
424#endif /* CONFIG_HUGETLB_PAGE */ 445#endif /* CONFIG_HUGETLB_PAGE */
425 446
447static inline spinlock_t *huge_pte_lock(struct hstate *h,
448 struct mm_struct *mm, pte_t *pte)
449{
450 spinlock_t *ptl;
451
452 ptl = huge_pte_lockptr(h, mm, pte);
453 spin_lock(ptl);
454 return ptl;
455}
456
426#endif /* _LINUX_HUGETLB_H */ 457#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h
index f346e4d5381c..da0a680e2f6d 100644
--- a/include/linux/hwmon-vid.h
+++ b/include/linux/hwmon-vid.h
@@ -38,7 +38,7 @@ static inline int vid_to_reg(int val, u8 vrm)
38 return ((val >= 1100) && (val <= 1850) ? 38 return ((val >= 1100) && (val <= 1850) ?
39 ((18499 - val * 10) / 25 + 5) / 10 : -1); 39 ((18499 - val * 10) / 25 + 5) / 10 : -1);
40 default: 40 default:
41 return -1; 41 return -EINVAL;
42 } 42 }
43} 43}
44 44
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index b2514f70d591..09354f6c1d63 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -15,9 +15,19 @@
15#define _HWMON_H_ 15#define _HWMON_H_
16 16
17struct device; 17struct device;
18struct attribute_group;
18 19
19struct device *hwmon_device_register(struct device *dev); 20struct device *hwmon_device_register(struct device *dev);
21struct device *
22hwmon_device_register_with_groups(struct device *dev, const char *name,
23 void *drvdata,
24 const struct attribute_group **groups);
25struct device *
26devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
27 void *drvdata,
28 const struct attribute_group **groups);
20 29
21void hwmon_device_unregister(struct device *dev); 30void hwmon_device_unregister(struct device *dev);
31void devm_hwmon_device_unregister(struct device *dev);
22 32
23#endif 33#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index d98503bde7e9..15da677478dd 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -432,15 +432,6 @@ struct hv_ring_buffer_info {
432 u32 ring_data_startoffset; 432 u32 ring_data_startoffset;
433}; 433};
434 434
435struct hv_ring_buffer_debug_info {
436 u32 current_interrupt_mask;
437 u32 current_read_index;
438 u32 current_write_index;
439 u32 bytes_avail_toread;
440 u32 bytes_avail_towrite;
441};
442
443
444/* 435/*
445 * 436 *
446 * hv_get_ringbuffer_availbytes() 437 * hv_get_ringbuffer_availbytes()
@@ -902,23 +893,6 @@ enum vmbus_channel_state {
902 CHANNEL_OPENED_STATE, 893 CHANNEL_OPENED_STATE,
903}; 894};
904 895
905struct vmbus_channel_debug_info {
906 u32 relid;
907 enum vmbus_channel_state state;
908 uuid_le interfacetype;
909 uuid_le interface_instance;
910 u32 monitorid;
911 u32 servermonitor_pending;
912 u32 servermonitor_latency;
913 u32 servermonitor_connectionid;
914 u32 clientmonitor_pending;
915 u32 clientmonitor_latency;
916 u32 clientmonitor_connectionid;
917
918 struct hv_ring_buffer_debug_info inbound;
919 struct hv_ring_buffer_debug_info outbound;
920};
921
922/* 896/*
923 * Represents each channel msg on the vmbus connection This is a 897 * Represents each channel msg on the vmbus connection This is a
924 * variable-size data structure depending on the msg type itself 898 * variable-size data structure depending on the msg type itself
@@ -1184,19 +1158,8 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1184 u64 *requestid); 1158 u64 *requestid);
1185 1159
1186 1160
1187extern void vmbus_get_debug_info(struct vmbus_channel *channel,
1188 struct vmbus_channel_debug_info *debug);
1189
1190extern void vmbus_ontimer(unsigned long data); 1161extern void vmbus_ontimer(unsigned long data);
1191 1162
1192struct hv_dev_port_info {
1193 u32 int_mask;
1194 u32 read_idx;
1195 u32 write_idx;
1196 u32 bytes_avail_toread;
1197 u32 bytes_avail_towrite;
1198};
1199
1200/* Base driver object */ 1163/* Base driver object */
1201struct hv_driver { 1164struct hv_driver {
1202 const char *name; 1165 const char *name;
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2ab11dc38077..d9c8dbd3373f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -205,7 +205,6 @@ struct i2c_driver {
205 * @name: Indicates the type of the device, usually a chip name that's 205 * @name: Indicates the type of the device, usually a chip name that's
206 * generic enough to hide second-sourcing and compatible revisions. 206 * generic enough to hide second-sourcing and compatible revisions.
207 * @adapter: manages the bus segment hosting this I2C device 207 * @adapter: manages the bus segment hosting this I2C device
208 * @driver: device's driver, hence pointer to access routines
209 * @dev: Driver model device node for the slave. 208 * @dev: Driver model device node for the slave.
210 * @irq: indicates the IRQ generated by this device (if any) 209 * @irq: indicates the IRQ generated by this device (if any)
211 * @detected: member of an i2c_driver.clients list or i2c-core's 210 * @detected: member of an i2c_driver.clients list or i2c-core's
@@ -222,7 +221,6 @@ struct i2c_client {
222 /* _LOWER_ 7 bits */ 221 /* _LOWER_ 7 bits */
223 char name[I2C_NAME_SIZE]; 222 char name[I2C_NAME_SIZE];
224 struct i2c_adapter *adapter; /* the adapter we sit on */ 223 struct i2c_adapter *adapter; /* the adapter we sit on */
225 struct i2c_driver *driver; /* and our access routines */
226 struct device dev; /* the device structure */ 224 struct device dev; /* the device structure */
227 int irq; /* irq issued by device */ 225 int irq; /* irq issued by device */
228 struct list_head detected; 226 struct list_head detected;
@@ -447,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
447static inline struct i2c_adapter * 445static inline struct i2c_adapter *
448i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) 446i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
449{ 447{
450#if IS_ENABLED(I2C_MUX) 448#if IS_ENABLED(CONFIG_I2C_MUX)
451 struct device *parent = adapter->dev.parent; 449 struct device *parent = adapter->dev.parent;
452 450
453 if (parent != NULL && parent->type == &i2c_adapter_type) 451 if (parent != NULL && parent->type == &i2c_adapter_type)
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 81cbbdb96aae..673a3ce67f31 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -26,6 +26,7 @@
26#define __TWL_H_ 26#define __TWL_H_
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/phy/phy.h>
29#include <linux/input/matrix_keypad.h> 30#include <linux/input/matrix_keypad.h>
30 31
31/* 32/*
@@ -615,6 +616,7 @@ enum twl4030_usb_mode {
615struct twl4030_usb_data { 616struct twl4030_usb_data {
616 enum twl4030_usb_mode usb_mode; 617 enum twl4030_usb_mode usb_mode;
617 unsigned long features; 618 unsigned long features;
619 struct phy_init_data *init_data;
618 620
619 int (*phy_init)(struct device *dev); 621 int (*phy_init)(struct device *dev);
620 int (*phy_exit)(struct device *dev); 622 int (*phy_exit)(struct device *dev);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index b17974917dbf..46a14229a162 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1514,7 +1514,7 @@ static inline void ide_set_max_pio(ide_drive_t *drive)
1514 1514
1515char *ide_media_string(ide_drive_t *); 1515char *ide_media_string(ide_drive_t *);
1516 1516
1517extern struct device_attribute ide_dev_attrs[]; 1517extern const struct attribute_group *ide_dev_groups[];
1518extern struct bus_type ide_bus_type; 1518extern struct bus_type ide_bus_type;
1519extern struct class *ide_port_class; 1519extern struct class *ide_port_class;
1520 1520
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a5b598a79bec..8c3b26a21574 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -697,6 +697,18 @@ struct ieee80211_sec_chan_offs_ie {
697} __packed; 697} __packed;
698 698
699/** 699/**
700 * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
701 *
702 * This structure represents the "Mesh Channel Switch Paramters element"
703 */
704struct ieee80211_mesh_chansw_params_ie {
705 u8 mesh_ttl;
706 u8 mesh_flags;
707 __le16 mesh_reason;
708 __le16 mesh_pre_value;
709} __packed;
710
711/**
700 * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE 712 * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
701 */ 713 */
702struct ieee80211_wide_bw_chansw_ie { 714struct ieee80211_wide_bw_chansw_ie {
@@ -751,6 +763,14 @@ enum mesh_config_capab_flags {
751}; 763};
752 764
753/** 765/**
766 * mesh channel switch parameters element's flag indicator
767 *
768 */
769#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
770#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
771#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
772
773/**
754 * struct ieee80211_rann_ie 774 * struct ieee80211_rann_ie
755 * 775 *
756 * This structure refers to "Root Announcement information element" 776 * This structure refers to "Root Announcement information element"
@@ -1391,8 +1411,8 @@ struct ieee80211_vht_operation {
1391#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 1411#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
1392#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 1412#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
1393#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 1413#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
1394#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000 1414#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX 0x0000e000
1395#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00030000 1415#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00070000
1396#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000 1416#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
1397#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000 1417#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
1398#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000 1418#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index ddd33fd5904d..84ba5ac39e03 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -61,6 +61,7 @@ struct macvlan_dev {
61 struct hlist_node hlist; 61 struct hlist_node hlist;
62 struct macvlan_port *port; 62 struct macvlan_port *port;
63 struct net_device *lowerdev; 63 struct net_device *lowerdev;
64 void *fwd_priv;
64 struct macvlan_pcpu_stats __percpu *pcpu_stats; 65 struct macvlan_pcpu_stats __percpu *pcpu_stats;
65 66
66 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); 67 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
@@ -118,4 +119,21 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops);
118extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 119extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
119 struct net_device *dev); 120 struct net_device *dev);
120 121
122#if IS_ENABLED(CONFIG_MACVLAN)
123static inline struct net_device *
124macvlan_dev_real_dev(const struct net_device *dev)
125{
126 struct macvlan_dev *macvlan = netdev_priv(dev);
127
128 return macvlan->lowerdev;
129}
130#else
131static inline struct net_device *
132macvlan_dev_real_dev(const struct net_device *dev)
133{
134 BUG();
135 return NULL;
136}
137#endif
138
121#endif /* _LINUX_IF_MACVLAN_H */ 139#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 715c343f7c00..f252deb99454 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -89,6 +89,101 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
89extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); 89extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
90extern u16 vlan_dev_vlan_id(const struct net_device *dev); 90extern u16 vlan_dev_vlan_id(const struct net_device *dev);
91 91
92/**
93 * struct vlan_priority_tci_mapping - vlan egress priority mappings
94 * @priority: skb priority
95 * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
96 * @next: pointer to next struct
97 */
98struct vlan_priority_tci_mapping {
99 u32 priority;
100 u16 vlan_qos;
101 struct vlan_priority_tci_mapping *next;
102};
103
104/**
105 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
106 * @rx_packets: number of received packets
107 * @rx_bytes: number of received bytes
108 * @rx_multicast: number of received multicast packets
109 * @tx_packets: number of transmitted packets
110 * @tx_bytes: number of transmitted bytes
111 * @syncp: synchronization point for 64bit counters
112 * @rx_errors: number of rx errors
113 * @tx_dropped: number of tx drops
114 */
115struct vlan_pcpu_stats {
116 u64 rx_packets;
117 u64 rx_bytes;
118 u64 rx_multicast;
119 u64 tx_packets;
120 u64 tx_bytes;
121 struct u64_stats_sync syncp;
122 u32 rx_errors;
123 u32 tx_dropped;
124};
125
126struct proc_dir_entry;
127struct netpoll;
128
129/**
130 * struct vlan_dev_priv - VLAN private device data
131 * @nr_ingress_mappings: number of ingress priority mappings
132 * @ingress_priority_map: ingress priority mappings
133 * @nr_egress_mappings: number of egress priority mappings
134 * @egress_priority_map: hash of egress priority mappings
135 * @vlan_proto: VLAN encapsulation protocol
136 * @vlan_id: VLAN identifier
137 * @flags: device flags
138 * @real_dev: underlying netdevice
139 * @real_dev_addr: address of underlying netdevice
140 * @dent: proc dir entry
141 * @vlan_pcpu_stats: ptr to percpu rx stats
142 */
143struct vlan_dev_priv {
144 unsigned int nr_ingress_mappings;
145 u32 ingress_priority_map[8];
146 unsigned int nr_egress_mappings;
147 struct vlan_priority_tci_mapping *egress_priority_map[16];
148
149 __be16 vlan_proto;
150 u16 vlan_id;
151 u16 flags;
152
153 struct net_device *real_dev;
154 unsigned char real_dev_addr[ETH_ALEN];
155
156 struct proc_dir_entry *dent;
157 struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
158#ifdef CONFIG_NET_POLL_CONTROLLER
159 struct netpoll *netpoll;
160#endif
161};
162
163static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
164{
165 return netdev_priv(dev);
166}
167
168static inline u16
169vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
170{
171 struct vlan_priority_tci_mapping *mp;
172
173 smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
174
175 mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
176 while (mp) {
177 if (mp->priority == skprio) {
178 return mp->vlan_qos; /* This should already be shifted
179 * to mask correctly with the
180 * VLAN's TCI */
181 }
182 mp = mp->next;
183 }
184 return 0;
185}
186
92extern bool vlan_do_receive(struct sk_buff **skb); 187extern bool vlan_do_receive(struct sk_buff **skb);
93extern struct sk_buff *vlan_untag(struct sk_buff *skb); 188extern struct sk_buff *vlan_untag(struct sk_buff *skb);
94 189
@@ -121,6 +216,12 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
121 return 0; 216 return 0;
122} 217}
123 218
219static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
220 u32 skprio)
221{
222 return 0;
223}
224
124static inline bool vlan_do_receive(struct sk_buff **skb) 225static inline bool vlan_do_receive(struct sk_buff **skb)
125{ 226{
126 return false; 227 return false;
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 2bac0eb8948d..15607b45221a 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,6 +11,7 @@
11#define _IIO_BUFFER_GENERIC_H_ 11#define _IIO_BUFFER_GENERIC_H_
12#include <linux/sysfs.h> 12#include <linux/sysfs.h>
13#include <linux/iio/iio.h> 13#include <linux/iio/iio.h>
14#include <linux/kref.h>
14 15
15#ifdef CONFIG_IIO_BUFFER 16#ifdef CONFIG_IIO_BUFFER
16 17
@@ -26,6 +27,8 @@ struct iio_buffer;
26 * @set_bytes_per_datum:set number of bytes per datum 27 * @set_bytes_per_datum:set number of bytes per datum
27 * @get_length: get number of datums in buffer 28 * @get_length: get number of datums in buffer
28 * @set_length: set number of datums in buffer 29 * @set_length: set number of datums in buffer
30 * @release: called when the last reference to the buffer is dropped,
31 * should free all resources allocated by the buffer.
29 * 32 *
30 * The purpose of this structure is to make the buffer element 33 * The purpose of this structure is to make the buffer element
31 * modular as event for a given driver, different usecases may require 34 * modular as event for a given driver, different usecases may require
@@ -36,7 +39,7 @@ struct iio_buffer;
36 * any of them not existing. 39 * any of them not existing.
37 **/ 40 **/
38struct iio_buffer_access_funcs { 41struct iio_buffer_access_funcs {
39 int (*store_to)(struct iio_buffer *buffer, u8 *data); 42 int (*store_to)(struct iio_buffer *buffer, const void *data);
40 int (*read_first_n)(struct iio_buffer *buffer, 43 int (*read_first_n)(struct iio_buffer *buffer,
41 size_t n, 44 size_t n,
42 char __user *buf); 45 char __user *buf);
@@ -47,6 +50,8 @@ struct iio_buffer_access_funcs {
47 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); 50 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
48 int (*get_length)(struct iio_buffer *buffer); 51 int (*get_length)(struct iio_buffer *buffer);
49 int (*set_length)(struct iio_buffer *buffer, int length); 52 int (*set_length)(struct iio_buffer *buffer, int length);
53
54 void (*release)(struct iio_buffer *buffer);
50}; 55};
51 56
52/** 57/**
@@ -67,6 +72,7 @@ struct iio_buffer_access_funcs {
67 * @demux_list: [INTERN] list of operations required to demux the scan. 72 * @demux_list: [INTERN] list of operations required to demux the scan.
68 * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. 73 * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
69 * @buffer_list: [INTERN] entry in the devices list of current buffers. 74 * @buffer_list: [INTERN] entry in the devices list of current buffers.
75 * @ref: [INTERN] reference count of the buffer.
70 */ 76 */
71struct iio_buffer { 77struct iio_buffer {
72 int length; 78 int length;
@@ -81,8 +87,9 @@ struct iio_buffer {
81 bool stufftoread; 87 bool stufftoread;
82 const struct attribute_group *attrs; 88 const struct attribute_group *attrs;
83 struct list_head demux_list; 89 struct list_head demux_list;
84 unsigned char *demux_bounce; 90 void *demux_bounce;
85 struct list_head buffer_list; 91 struct list_head buffer_list;
92 struct kref ref;
86}; 93};
87 94
88/** 95/**
@@ -120,7 +127,32 @@ int iio_scan_mask_set(struct iio_dev *indio_dev,
120 * @indio_dev: iio_dev structure for device. 127 * @indio_dev: iio_dev structure for device.
121 * @data: Full scan. 128 * @data: Full scan.
122 */ 129 */
123int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data); 130int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
131
132/*
133 * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
134 * @indio_dev: iio_dev structure for device.
135 * @data: sample data
136 * @timestamp: timestamp for the sample data
137 *
138 * Pushes data to the IIO device's buffers. If timestamps are enabled for the
139 * device the function will store the supplied timestamp as the last element in
140 * the sample data buffer before pushing it to the device buffers. The sample
141 * data buffer needs to be large enough to hold the additional timestamp
142 * (usually the buffer should be indio->scan_bytes bytes large).
143 *
144 * Returns 0 on success, a negative error code otherwise.
145 */
146static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
147 void *data, int64_t timestamp)
148{
149 if (indio_dev->scan_timestamp) {
150 size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
151 ((int64_t *)data)[ts_offset] = timestamp;
152 }
153
154 return iio_push_to_buffers(indio_dev, data);
155}
124 156
125int iio_update_demux(struct iio_dev *indio_dev); 157int iio_update_demux(struct iio_dev *indio_dev);
126 158
@@ -174,11 +206,27 @@ ssize_t iio_buffer_show_enable(struct device *dev,
174 iio_buffer_show_enable, \ 206 iio_buffer_show_enable, \
175 iio_buffer_store_enable) 207 iio_buffer_store_enable)
176 208
177int iio_sw_buffer_preenable(struct iio_dev *indio_dev);
178
179bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 209bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
180 const unsigned long *mask); 210 const unsigned long *mask);
181 211
212struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
213void iio_buffer_put(struct iio_buffer *buffer);
214
215/**
216 * iio_device_attach_buffer - Attach a buffer to a IIO device
217 * @indio_dev: The device the buffer should be attached to
218 * @buffer: The buffer to attach to the device
219 *
220 * This function attaches a buffer to a IIO device. The buffer stays attached to
221 * the device until the device is freed. The function should only be called at
222 * most once per device.
223 */
224static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
225 struct iio_buffer *buffer)
226{
227 indio_dev->buffer = iio_buffer_get(buffer);
228}
229
182#else /* CONFIG_IIO_BUFFER */ 230#else /* CONFIG_IIO_BUFFER */
183 231
184static inline int iio_buffer_register(struct iio_dev *indio_dev, 232static inline int iio_buffer_register(struct iio_dev *indio_dev,
@@ -191,6 +239,9 @@ static inline int iio_buffer_register(struct iio_dev *indio_dev,
191static inline void iio_buffer_unregister(struct iio_dev *indio_dev) 239static inline void iio_buffer_unregister(struct iio_dev *indio_dev)
192{} 240{}
193 241
242static inline void iio_buffer_get(struct iio_buffer *buffer) {}
243static inline void iio_buffer_put(struct iio_buffer *buffer) {}
244
194#endif /* CONFIG_IIO_BUFFER */ 245#endif /* CONFIG_IIO_BUFFER */
195 246
196#endif /* _IIO_BUFFER_GENERIC_H_ */ 247#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index e51f65480ea5..3c005eb3a0a4 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -16,6 +16,7 @@
16#include <linux/irqreturn.h> 16#include <linux/irqreturn.h>
17#include <linux/iio/trigger.h> 17#include <linux/iio/trigger.h>
18#include <linux/bitops.h> 18#include <linux/bitops.h>
19#include <linux/regulator/consumer.h>
19 20
20#include <linux/platform_data/st_sensors_pdata.h> 21#include <linux/platform_data/st_sensors_pdata.h>
21 22
@@ -184,6 +185,7 @@ struct st_sensors {
184 u8 wai; 185 u8 wai;
185 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; 186 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
186 struct iio_chan_spec *ch; 187 struct iio_chan_spec *ch;
188 int num_ch;
187 struct st_sensor_odr odr; 189 struct st_sensor_odr odr;
188 struct st_sensor_power pw; 190 struct st_sensor_power pw;
189 struct st_sensor_axis enable_axis; 191 struct st_sensor_axis enable_axis;
@@ -200,6 +202,8 @@ struct st_sensors {
200 * @trig: The trigger in use by the core driver. 202 * @trig: The trigger in use by the core driver.
201 * @sensor: Pointer to the current sensor struct in use. 203 * @sensor: Pointer to the current sensor struct in use.
202 * @current_fullscale: Maximum range of measure by the sensor. 204 * @current_fullscale: Maximum range of measure by the sensor.
205 * @vdd: Pointer to sensor's Vdd power supply
206 * @vdd_io: Pointer to sensor's Vdd-IO power supply
203 * @enabled: Status of the sensor (false->off, true->on). 207 * @enabled: Status of the sensor (false->off, true->on).
204 * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread. 208 * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread.
205 * @buffer_data: Data used by buffer part. 209 * @buffer_data: Data used by buffer part.
@@ -215,6 +219,8 @@ struct st_sensor_data {
215 struct iio_trigger *trig; 219 struct iio_trigger *trig;
216 struct st_sensors *sensor; 220 struct st_sensors *sensor;
217 struct st_sensor_fullscale_avl *current_fullscale; 221 struct st_sensor_fullscale_avl *current_fullscale;
222 struct regulator *vdd;
223 struct regulator *vdd_io;
218 224
219 bool enabled; 225 bool enabled;
220 bool multiread_bit; 226 bool multiread_bit;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 833926c91aa8..2752b1fd12be 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -77,7 +77,7 @@ struct iio_cb_buffer;
77 * fail. 77 * fail.
78 */ 78 */
79struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, 79struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
80 int (*cb)(u8 *data, 80 int (*cb)(const void *data,
81 void *private), 81 void *private),
82 void *private); 82 void *private);
83/** 83/**
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 13ce220c7003..5dab2c41031f 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -26,20 +26,6 @@ struct iio_event_data {
26 26
27#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int) 27#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
28 28
29enum iio_event_type {
30 IIO_EV_TYPE_THRESH,
31 IIO_EV_TYPE_MAG,
32 IIO_EV_TYPE_ROC,
33 IIO_EV_TYPE_THRESH_ADAPTIVE,
34 IIO_EV_TYPE_MAG_ADAPTIVE,
35};
36
37enum iio_event_direction {
38 IIO_EV_DIR_EITHER,
39 IIO_EV_DIR_RISING,
40 IIO_EV_DIR_FALLING,
41};
42
43/** 29/**
44 * IIO_EVENT_CODE() - create event identifier 30 * IIO_EVENT_CODE() - create event identifier
45 * @chan_type: Type of the channel. Should be one of enum iio_chan_type. 31 * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 2103cc32a5fb..256a90a1bea6 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -36,6 +36,14 @@ enum iio_chan_info_enum {
36 IIO_CHAN_INFO_PHASE, 36 IIO_CHAN_INFO_PHASE,
37 IIO_CHAN_INFO_HARDWAREGAIN, 37 IIO_CHAN_INFO_HARDWAREGAIN,
38 IIO_CHAN_INFO_HYSTERESIS, 38 IIO_CHAN_INFO_HYSTERESIS,
39 IIO_CHAN_INFO_INT_TIME,
40};
41
42enum iio_shared_by {
43 IIO_SEPARATE,
44 IIO_SHARED_BY_TYPE,
45 IIO_SHARED_BY_DIR,
46 IIO_SHARED_BY_ALL
39}; 47};
40 48
41enum iio_endian { 49enum iio_endian {
@@ -57,7 +65,7 @@ struct iio_dev;
57 */ 65 */
58struct iio_chan_spec_ext_info { 66struct iio_chan_spec_ext_info {
59 const char *name; 67 const char *name;
60 bool shared; 68 enum iio_shared_by shared;
61 ssize_t (*read)(struct iio_dev *, uintptr_t private, 69 ssize_t (*read)(struct iio_dev *, uintptr_t private,
62 struct iio_chan_spec const *, char *buf); 70 struct iio_chan_spec const *, char *buf);
63 ssize_t (*write)(struct iio_dev *, uintptr_t private, 71 ssize_t (*write)(struct iio_dev *, uintptr_t private,
@@ -125,12 +133,35 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
125#define IIO_ENUM_AVAILABLE(_name, _e) \ 133#define IIO_ENUM_AVAILABLE(_name, _e) \
126{ \ 134{ \
127 .name = (_name "_available"), \ 135 .name = (_name "_available"), \
128 .shared = true, \ 136 .shared = IIO_SHARED_BY_TYPE, \
129 .read = iio_enum_available_read, \ 137 .read = iio_enum_available_read, \
130 .private = (uintptr_t)(_e), \ 138 .private = (uintptr_t)(_e), \
131} 139}
132 140
133/** 141/**
142 * struct iio_event_spec - specification for a channel event
143 * @type: Type of the event
144 * @dir: Direction of the event
145 * @mask_separate: Bit mask of enum iio_event_info values. Attributes
146 * set in this mask will be registered per channel.
147 * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes
148 * set in this mask will be shared by channel type.
149 * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes
150 * set in this mask will be shared by channel type and
151 * direction.
152 * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes
153 * set in this mask will be shared by all channels.
154 */
155struct iio_event_spec {
156 enum iio_event_type type;
157 enum iio_event_direction dir;
158 unsigned long mask_separate;
159 unsigned long mask_shared_by_type;
160 unsigned long mask_shared_by_dir;
161 unsigned long mask_shared_by_all;
162};
163
164/**
134 * struct iio_chan_spec - specification of a single channel 165 * struct iio_chan_spec - specification of a single channel
135 * @type: What type of measurement is the channel making. 166 * @type: What type of measurement is the channel making.
136 * @channel: What number do we wish to assign the channel. 167 * @channel: What number do we wish to assign the channel.
@@ -146,13 +177,18 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
146 * shift: Shift right by this before masking out 177 * shift: Shift right by this before masking out
147 * realbits. 178 * realbits.
148 * endianness: little or big endian 179 * endianness: little or big endian
149 * @info_mask: What information is to be exported about this channel.
150 * This includes calibbias, scale etc.
151 * @info_mask_separate: What information is to be exported that is specific to 180 * @info_mask_separate: What information is to be exported that is specific to
152 * this channel. 181 * this channel.
153 * @info_mask_shared_by_type: What information is to be exported that is shared 182 * @info_mask_shared_by_type: What information is to be exported that is shared
154* by all channels of the same type. 183 * by all channels of the same type.
184 * @info_mask_shared_by_dir: What information is to be exported that is shared
185 * by all channels of the same direction.
186 * @info_mask_shared_by_all: What information is to be exported that is shared
187 * by all channels.
155 * @event_mask: What events can this channel produce. 188 * @event_mask: What events can this channel produce.
189 * @event_spec: Array of events which should be registered for this
190 * channel.
191 * @num_event_specs: Size of the event_spec array.
156 * @ext_info: Array of extended info attributes for this channel. 192 * @ext_info: Array of extended info attributes for this channel.
157 * The array is NULL terminated, the last element should 193 * The array is NULL terminated, the last element should
158 * have its name field set to NULL. 194 * have its name field set to NULL.
@@ -186,10 +222,13 @@ struct iio_chan_spec {
186 u8 shift; 222 u8 shift;
187 enum iio_endian endianness; 223 enum iio_endian endianness;
188 } scan_type; 224 } scan_type;
189 long info_mask;
190 long info_mask_separate; 225 long info_mask_separate;
191 long info_mask_shared_by_type; 226 long info_mask_shared_by_type;
227 long info_mask_shared_by_dir;
228 long info_mask_shared_by_all;
192 long event_mask; 229 long event_mask;
230 const struct iio_event_spec *event_spec;
231 unsigned int num_event_specs;
193 const struct iio_chan_spec_ext_info *ext_info; 232 const struct iio_chan_spec_ext_info *ext_info;
194 const char *extend_name; 233 const char *extend_name;
195 const char *datasheet_name; 234 const char *datasheet_name;
@@ -212,7 +251,9 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
212 enum iio_chan_info_enum type) 251 enum iio_chan_info_enum type)
213{ 252{
214 return (chan->info_mask_separate & BIT(type)) | 253 return (chan->info_mask_separate & BIT(type)) |
215 (chan->info_mask_shared_by_type & BIT(type)); 254 (chan->info_mask_shared_by_type & BIT(type)) |
255 (chan->info_mask_shared_by_dir & BIT(type)) |
256 (chan->info_mask_shared_by_all & BIT(type));
216} 257}
217 258
218#define IIO_ST(si, rb, sb, sh) \ 259#define IIO_ST(si, rb, sb, sh) \
@@ -270,6 +311,12 @@ struct iio_dev;
270 * is event dependant. event_code specifies which event. 311 * is event dependant. event_code specifies which event.
271 * @write_event_value: write the value associated with the event. 312 * @write_event_value: write the value associated with the event.
272 * Meaning is event dependent. 313 * Meaning is event dependent.
314 * @read_event_config_new: find out if the event is enabled. New style interface.
315 * @write_event_config_new: set if the event is enabled. New style interface.
316 * @read_event_value_new: read a configuration value associated with the event.
317 * New style interface.
318 * @write_event_value_new: write a configuration value for the event. New style
319 * interface.
273 * @validate_trigger: function to validate the trigger when the 320 * @validate_trigger: function to validate the trigger when the
274 * current trigger gets changed. 321 * current trigger gets changed.
275 * @update_scan_mode: function to configure device and scan buffer when 322 * @update_scan_mode: function to configure device and scan buffer when
@@ -310,6 +357,30 @@ struct iio_info {
310 int (*write_event_value)(struct iio_dev *indio_dev, 357 int (*write_event_value)(struct iio_dev *indio_dev,
311 u64 event_code, 358 u64 event_code,
312 int val); 359 int val);
360
361 int (*read_event_config_new)(struct iio_dev *indio_dev,
362 const struct iio_chan_spec *chan,
363 enum iio_event_type type,
364 enum iio_event_direction dir);
365
366 int (*write_event_config_new)(struct iio_dev *indio_dev,
367 const struct iio_chan_spec *chan,
368 enum iio_event_type type,
369 enum iio_event_direction dir,
370 int state);
371
372 int (*read_event_value_new)(struct iio_dev *indio_dev,
373 const struct iio_chan_spec *chan,
374 enum iio_event_type type,
375 enum iio_event_direction dir,
376 enum iio_event_info info, int *val, int *val2);
377
378 int (*write_event_value_new)(struct iio_dev *indio_dev,
379 const struct iio_chan_spec *chan,
380 enum iio_event_type type,
381 enum iio_event_direction dir,
382 enum iio_event_info info, int val, int val2);
383
313 int (*validate_trigger)(struct iio_dev *indio_dev, 384 int (*validate_trigger)(struct iio_dev *indio_dev,
314 struct iio_trigger *trig); 385 struct iio_trigger *trig);
315 int (*update_scan_mode)(struct iio_dev *indio_dev, 386 int (*update_scan_mode)(struct iio_dev *indio_dev,
@@ -457,7 +528,7 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
457{ 528{
458 if (indio_dev) 529 if (indio_dev)
459 put_device(&indio_dev->dev); 530 put_device(&indio_dev->dev);
460}; 531}
461 532
462/** 533/**
463 * dev_to_iio_dev() - Get IIO device struct from a device struct 534 * dev_to_iio_dev() - Get IIO device struct from a device struct
@@ -593,7 +664,7 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
593{ 664{
594 return indio_dev->currentmode 665 return indio_dev->currentmode
595 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE); 666 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
596}; 667}
597 668
598/** 669/**
599 * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry 670 * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
@@ -603,12 +674,12 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
603static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 674static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
604{ 675{
605 return indio_dev->debugfs_dentry; 676 return indio_dev->debugfs_dentry;
606}; 677}
607#else 678#else
608static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 679static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
609{ 680{
610 return NULL; 681 return NULL;
611}; 682}
612#endif 683#endif
613 684
614int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, 685int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 2958c960003a..8a1d18640ab9 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -100,6 +100,21 @@ struct iio_const_attr {
100#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \ 100#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \
101 IIO_CONST_ATTR(sampling_frequency_available, _string) 101 IIO_CONST_ATTR(sampling_frequency_available, _string)
102 102
103/**
104 * IIO_DEV_ATTR_INT_TIME_AVAIL - list available integration times
105 * @_show: output method for the attribute
106 **/
107#define IIO_DEV_ATTR_INT_TIME_AVAIL(_show) \
108 IIO_DEVICE_ATTR(integration_time_available, S_IRUGO, _show, NULL, 0)
109/**
110 * IIO_CONST_ATTR_INT_TIME_AVAIL - list available integration times
111 * @_string: frequency string for the attribute
112 *
113 * Constant version
114 **/
115#define IIO_CONST_ATTR_INT_TIME_AVAIL(_string) \
116 IIO_CONST_ATTR(integration_time_available, _string)
117
103#define IIO_DEV_ATTR_TEMP_RAW(_show) \ 118#define IIO_DEV_ATTR_TEMP_RAW(_show) \
104 IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0) 119 IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0)
105 120
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 88bf0f0d27b4..4ac928ee31c5 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -54,6 +54,26 @@ enum iio_modifier {
54 IIO_MOD_LIGHT_BLUE, 54 IIO_MOD_LIGHT_BLUE,
55}; 55};
56 56
57enum iio_event_type {
58 IIO_EV_TYPE_THRESH,
59 IIO_EV_TYPE_MAG,
60 IIO_EV_TYPE_ROC,
61 IIO_EV_TYPE_THRESH_ADAPTIVE,
62 IIO_EV_TYPE_MAG_ADAPTIVE,
63};
64
65enum iio_event_info {
66 IIO_EV_INFO_ENABLE,
67 IIO_EV_INFO_VALUE,
68 IIO_EV_INFO_HYSTERESIS,
69};
70
71enum iio_event_direction {
72 IIO_EV_DIR_EITHER,
73 IIO_EV_DIR_RISING,
74 IIO_EV_DIR_FALLING,
75};
76
57#define IIO_VAL_INT 1 77#define IIO_VAL_INT 1
58#define IIO_VAL_INT_PLUS_MICRO 2 78#define IIO_VAL_INT_PLUS_MICRO 2
59#define IIO_VAL_INT_PLUS_NANO 3 79#define IIO_VAL_INT_PLUS_NANO 3
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 79640e015a86..0d678aefe69d 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -147,25 +147,27 @@ struct in_ifaddr {
147 unsigned long ifa_tstamp; /* updated timestamp */ 147 unsigned long ifa_tstamp; /* updated timestamp */
148}; 148};
149 149
150extern int register_inetaddr_notifier(struct notifier_block *nb); 150int register_inetaddr_notifier(struct notifier_block *nb);
151extern int unregister_inetaddr_notifier(struct notifier_block *nb); 151int unregister_inetaddr_notifier(struct notifier_block *nb);
152 152
153extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, 153void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
154 struct ipv4_devconf *devconf); 154 struct ipv4_devconf *devconf);
155 155
156extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); 156struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
157static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) 157static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
158{ 158{
159 return __ip_dev_find(net, addr, true); 159 return __ip_dev_find(net, addr, true);
160} 160}
161 161
162extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); 162int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
163extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); 163int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
164extern void devinet_init(void); 164void devinet_init(void);
165extern struct in_device *inetdev_by_index(struct net *, int); 165struct in_device *inetdev_by_index(struct net *, int);
166extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); 166__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
167extern __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope); 167__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
168extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); 168 int scope);
169struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
170 __be32 mask);
169 171
170static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) 172static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
171{ 173{
@@ -218,7 +220,7 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
218 return rtnl_dereference(dev->ip_ptr); 220 return rtnl_dereference(dev->ip_ptr);
219} 221}
220 222
221extern void in_dev_finish_destroy(struct in_device *idev); 223void in_dev_finish_destroy(struct in_device *idev);
222 224
223static inline void in_dev_put(struct in_device *idev) 225static inline void in_dev_put(struct in_device *idev)
224{ 226{
diff --git a/include/linux/init.h b/include/linux/init.h
index f1c27a71d03c..8e68a64bfe00 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -26,8 +26,8 @@
26 * extern int initialize_foobar_device(int, int, int) __init; 26 * extern int initialize_foobar_device(int, int, int) __init;
27 * 27 *
28 * For initialized data: 28 * For initialized data:
29 * You should insert __initdata between the variable name and equal 29 * You should insert __initdata or __initconst between the variable name
30 * sign followed by value, e.g.: 30 * and equal sign followed by value, e.g.:
31 * 31 *
32 * static int init_variable __initdata = 0; 32 * static int init_variable __initdata = 0;
33 * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; 33 * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
@@ -35,8 +35,6 @@
35 * Don't forget to initialize data not at file scope, i.e. within a function, 35 * Don't forget to initialize data not at file scope, i.e. within a function,
36 * as gcc otherwise puts the data into the bss section and not into the init 36 * as gcc otherwise puts the data into the bss section and not into the init
37 * section. 37 * section.
38 *
39 * Also note, that this data cannot be "const".
40 */ 38 */
41 39
42/* These are for everybody (although not all archs will actually 40/* These are for everybody (although not all archs will actually
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a143df5ee548..8bde15387c7c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,10 +32,10 @@ extern struct fs_struct init_fs;
32#endif 32#endif
33 33
34#ifdef CONFIG_CPUSETS 34#ifdef CONFIG_CPUSETS
35#define INIT_CPUSET_SEQ \ 35#define INIT_CPUSET_SEQ(tsk) \
36 .mems_allowed_seq = SEQCNT_ZERO, 36 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
37#else 37#else
38#define INIT_CPUSET_SEQ 38#define INIT_CPUSET_SEQ(tsk)
39#endif 39#endif
40 40
41#define INIT_SIGNALS(sig) { \ 41#define INIT_SIGNALS(sig) { \
@@ -220,7 +220,7 @@ extern struct task_group root_task_group;
220 INIT_FTRACE_GRAPH \ 220 INIT_FTRACE_GRAPH \
221 INIT_TRACE_RECURSION \ 221 INIT_TRACE_RECURSION \
222 INIT_TASK_RCU_PREEMPT(tsk) \ 222 INIT_TASK_RCU_PREEMPT(tsk) \
223 INIT_CPUSET_SEQ \ 223 INIT_CPUSET_SEQ(tsk) \
224 INIT_VTIME(tsk) \ 224 INIT_VTIME(tsk) \
225} 225}
226 226
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5e865b554940..db43b58a3355 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,14 +11,13 @@
11#include <linux/irqnr.h> 11#include <linux/irqnr.h>
12#include <linux/hardirq.h> 12#include <linux/hardirq.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/smp.h>
15#include <linux/percpu.h>
16#include <linux/hrtimer.h> 14#include <linux/hrtimer.h>
17#include <linux/kref.h> 15#include <linux/kref.h>
18#include <linux/workqueue.h> 16#include <linux/workqueue.h>
19 17
20#include <linux/atomic.h> 18#include <linux/atomic.h>
21#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20#include <asm/irq.h>
22 21
23/* 22/*
24 * These correspond to the IORESOURCE_IRQ_* defines in 23 * These correspond to the IORESOURCE_IRQ_* defines in
@@ -374,6 +373,16 @@ struct softirq_action
374 373
375asmlinkage void do_softirq(void); 374asmlinkage void do_softirq(void);
376asmlinkage void __do_softirq(void); 375asmlinkage void __do_softirq(void);
376
377#ifdef __ARCH_HAS_DO_SOFTIRQ
378void do_softirq_own_stack(void);
379#else
380static inline void do_softirq_own_stack(void)
381{
382 __do_softirq();
383}
384#endif
385
377extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 386extern void open_softirq(int nr, void (*action)(struct softirq_action *));
378extern void softirq_init(void); 387extern void softirq_init(void);
379extern void __raise_softirq_irqoff(unsigned int nr); 388extern void __raise_softirq_irqoff(unsigned int nr);
@@ -381,15 +390,6 @@ extern void __raise_softirq_irqoff(unsigned int nr);
381extern void raise_softirq_irqoff(unsigned int nr); 390extern void raise_softirq_irqoff(unsigned int nr);
382extern void raise_softirq(unsigned int nr); 391extern void raise_softirq(unsigned int nr);
383 392
384/* This is the worklist that queues up per-cpu softirq work.
385 *
386 * send_remote_sendirq() adds work to these lists, and
387 * the softirq handler itself dequeues from them. The queues
388 * are protected by disabling local cpu interrupts and they must
389 * only be accessed by the local cpu that they are for.
390 */
391DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
392
393DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 393DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
394 394
395static inline struct task_struct *this_cpu_ksoftirqd(void) 395static inline struct task_struct *this_cpu_ksoftirqd(void)
@@ -397,17 +397,6 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
397 return this_cpu_read(ksoftirqd); 397 return this_cpu_read(ksoftirqd);
398} 398}
399 399
400/* Try to send a softirq to a remote cpu. If this cannot be done, the
401 * work will be queued to the local cpu.
402 */
403extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
404
405/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
406 * and compute the current cpu, passed in as 'this_cpu'.
407 */
408extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
409 int this_cpu, int softirq);
410
411/* Tasklets --- multithreaded analogue of BHs. 400/* Tasklets --- multithreaded analogue of BHs.
412 401
413 Main feature differing them of generic softirqs: tasklet 402 Main feature differing them of generic softirqs: tasklet
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7ea319e95b47..a444c790fa72 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <trace/events/iommu.h>
25 26
26#define IOMMU_READ (1) 27#define IOMMU_READ (1)
27#define IOMMU_WRITE (2) 28#define IOMMU_WRITE (2)
@@ -227,6 +228,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
227 ret = domain->handler(domain, dev, iova, flags, 228 ret = domain->handler(domain, dev, iova, flags,
228 domain->handler_token); 229 domain->handler_token);
229 230
231 trace_io_page_fault(dev, iova, flags);
230 return ret; 232 return ret;
231} 233}
232 234
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 28ea38439313..c56c350324e4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -4,6 +4,7 @@
4#include <uapi/linux/ipv6.h> 4#include <uapi/linux/ipv6.h>
5 5
6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) 6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
7#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
7/* 8/*
8 * This structure contains configuration options per IPv6 link. 9 * This structure contains configuration options per IPv6 link.
9 */ 10 */
@@ -21,13 +22,11 @@ struct ipv6_devconf {
21 __s32 force_mld_version; 22 __s32 force_mld_version;
22 __s32 mldv1_unsolicited_report_interval; 23 __s32 mldv1_unsolicited_report_interval;
23 __s32 mldv2_unsolicited_report_interval; 24 __s32 mldv2_unsolicited_report_interval;
24#ifdef CONFIG_IPV6_PRIVACY
25 __s32 use_tempaddr; 25 __s32 use_tempaddr;
26 __s32 temp_valid_lft; 26 __s32 temp_valid_lft;
27 __s32 temp_prefered_lft; 27 __s32 temp_prefered_lft;
28 __s32 regen_max_retry; 28 __s32 regen_max_retry;
29 __s32 max_desync_factor; 29 __s32 max_desync_factor;
30#endif
31 __s32 max_addresses; 30 __s32 max_addresses;
32 __s32 accept_ra_defrtr; 31 __s32 accept_ra_defrtr;
33 __s32 accept_ra_pinfo; 32 __s32 accept_ra_pinfo;
@@ -115,16 +114,8 @@ static inline int inet6_iif(const struct sk_buff *skb)
115 return IP6CB(skb)->iif; 114 return IP6CB(skb)->iif;
116} 115}
117 116
118struct inet6_request_sock {
119 struct in6_addr loc_addr;
120 struct in6_addr rmt_addr;
121 struct sk_buff *pktopts;
122 int iif;
123};
124
125struct tcp6_request_sock { 117struct tcp6_request_sock {
126 struct tcp_request_sock tcp6rsk_tcp; 118 struct tcp_request_sock tcp6rsk_tcp;
127 struct inet6_request_sock tcp6rsk_inet6;
128}; 119};
129 120
130struct ipv6_mc_socklist; 121struct ipv6_mc_socklist;
@@ -141,8 +132,6 @@ struct ipv6_fl_socklist;
141 */ 132 */
142struct ipv6_pinfo { 133struct ipv6_pinfo {
143 struct in6_addr saddr; 134 struct in6_addr saddr;
144 struct in6_addr rcv_saddr;
145 struct in6_addr daddr;
146 struct in6_pktinfo sticky_pktinfo; 135 struct in6_pktinfo sticky_pktinfo;
147 const struct in6_addr *daddr_cache; 136 const struct in6_addr *daddr_cache;
148#ifdef CONFIG_IPV6_SUBTREES 137#ifdef CONFIG_IPV6_SUBTREES
@@ -256,48 +245,22 @@ struct tcp6_sock {
256 245
257extern int inet6_sk_rebuild_header(struct sock *sk); 246extern int inet6_sk_rebuild_header(struct sock *sk);
258 247
259struct inet6_timewait_sock {
260 struct in6_addr tw_v6_daddr;
261 struct in6_addr tw_v6_rcv_saddr;
262};
263
264struct tcp6_timewait_sock { 248struct tcp6_timewait_sock {
265 struct tcp_timewait_sock tcp6tw_tcp; 249 struct tcp_timewait_sock tcp6tw_tcp;
266 struct inet6_timewait_sock tcp6tw_inet6;
267}; 250};
268 251
269static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
270{
271 return (struct inet6_timewait_sock *)(((u8 *)sk) +
272 inet_twsk(sk)->tw_ipv6_offset);
273}
274
275#if IS_ENABLED(CONFIG_IPV6) 252#if IS_ENABLED(CONFIG_IPV6)
276static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) 253static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
277{ 254{
278 return inet_sk(__sk)->pinet6; 255 return inet_sk(__sk)->pinet6;
279} 256}
280 257
281static inline struct inet6_request_sock *
282 inet6_rsk(const struct request_sock *rsk)
283{
284 return (struct inet6_request_sock *)(((u8 *)rsk) +
285 inet_rsk(rsk)->inet6_rsk_offset);
286}
287
288static inline u32 inet6_rsk_offset(struct request_sock *rsk)
289{
290 return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock);
291}
292
293static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops) 258static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
294{ 259{
295 struct request_sock *req = reqsk_alloc(ops); 260 struct request_sock *req = reqsk_alloc(ops);
296 261
297 if (req != NULL) { 262 if (req)
298 inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req); 263 inet_rsk(req)->pktopts = NULL;
299 inet6_rsk(req)->pktopts = NULL;
300 }
301 264
302 return req; 265 return req;
303} 266}
@@ -321,21 +284,11 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
321#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only) 284#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
322#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk)) 285#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
323 286
324static inline u16 inet6_tw_offset(const struct proto *prot) 287static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
325{ 288{
326 return prot->twsk_prot->twsk_obj_size - 289 if (sk->sk_family == AF_INET6)
327 sizeof(struct inet6_timewait_sock); 290 return &sk->sk_v6_rcv_saddr;
328} 291 return NULL;
329
330static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
331{
332 return likely(sk->sk_state != TCP_TIME_WAIT) ?
333 &inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr;
334}
335
336static inline struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
337{
338 return sk->sk_family == AF_INET6 ? __inet6_rcv_saddr(sk) : NULL;
339} 292}
340 293
341static inline int inet_v6_ipv6only(const struct sock *sk) 294static inline int inet_v6_ipv6only(const struct sock *sk)
@@ -363,28 +316,18 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
363 return NULL; 316 return NULL;
364} 317}
365 318
366#define __inet6_rcv_saddr(__sk) NULL
367#define inet6_rcv_saddr(__sk) NULL 319#define inet6_rcv_saddr(__sk) NULL
368#define tcp_twsk_ipv6only(__sk) 0 320#define tcp_twsk_ipv6only(__sk) 0
369#define inet_v6_ipv6only(__sk) 0 321#define inet_v6_ipv6only(__sk) 0
370#endif /* IS_ENABLED(CONFIG_IPV6) */ 322#endif /* IS_ENABLED(CONFIG_IPV6) */
371 323
372#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ 324#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
373 ((inet_sk(__sk)->inet_portpair == (__ports)) && \ 325 (((__sk)->sk_portpair == (__ports)) && \
374 ((__sk)->sk_family == AF_INET6) && \ 326 ((__sk)->sk_family == AF_INET6) && \
375 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ 327 ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
376 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ 328 ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
377 (!(__sk)->sk_bound_dev_if || \ 329 (!(__sk)->sk_bound_dev_if || \
378 ((__sk)->sk_bound_dev_if == (__dif))) && \ 330 ((__sk)->sk_bound_dev_if == (__dif))) && \
379 net_eq(sock_net(__sk), (__net))) 331 net_eq(sock_net(__sk), (__net)))
380 332
381#define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
382 ((inet_twsk(__sk)->tw_portpair == (__ports)) && \
383 ((__sk)->sk_family == AF_INET6) && \
384 ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr)) && \
385 ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \
386 (!(__sk)->sk_bound_dev_if || \
387 ((__sk)->sk_bound_dev_if == (__dif))) && \
388 net_eq(sock_net(__sk), (__net)))
389
390#endif /* _IPV6_H */ 333#endif /* _IPV6_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 56bb0dc8b7d4..7dc10036eff5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -70,6 +70,9 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
70 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 70 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
71 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread 71 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
72 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable 72 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
73 * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
74 * it from the spurious interrupt detection
75 * mechanism and from core side polling.
73 */ 76 */
74enum { 77enum {
75 IRQ_TYPE_NONE = 0x00000000, 78 IRQ_TYPE_NONE = 0x00000000,
@@ -94,12 +97,14 @@ enum {
94 IRQ_NESTED_THREAD = (1 << 15), 97 IRQ_NESTED_THREAD = (1 << 15),
95 IRQ_NOTHREAD = (1 << 16), 98 IRQ_NOTHREAD = (1 << 16),
96 IRQ_PER_CPU_DEVID = (1 << 17), 99 IRQ_PER_CPU_DEVID = (1 << 17),
100 IRQ_IS_POLLED = (1 << 18),
97}; 101};
98 102
99#define IRQF_MODIFY_MASK \ 103#define IRQF_MODIFY_MASK \
100 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 104 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
101 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 105 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
102 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) 106 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
107 IRQ_IS_POLLED)
103 108
104#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 109#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
105 110
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 0e5d9ecdb2b6..cac496b1e279 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -31,6 +31,8 @@
31#define GIC_DIST_TARGET 0x800 31#define GIC_DIST_TARGET 0x800
32#define GIC_DIST_CONFIG 0xc00 32#define GIC_DIST_CONFIG 0xc00
33#define GIC_DIST_SOFTINT 0xf00 33#define GIC_DIST_SOFTINT 0xf00
34#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
35#define GIC_DIST_SGI_PENDING_SET 0xf20
34 36
35#define GICH_HCR 0x0 37#define GICH_HCR 0x0
36#define GICH_VTR 0x4 38#define GICH_VTR 0x4
@@ -74,6 +76,11 @@ static inline void gic_init(unsigned int nr, int start,
74 gic_init_bases(nr, start, dist, cpu, 0, NULL); 76 gic_init_bases(nr, start, dist, cpu, 0, NULL);
75} 77}
76 78
79void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
80int gic_get_cpu_id(unsigned int cpu);
81void gic_migrate_target(unsigned int new_cpu_id);
82unsigned long gic_get_sgir_physaddr(void);
83
77#endif /* __ASSEMBLY */ 84#endif /* __ASSEMBLY */
78 85
79#endif 86#endif
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 714ba08dc092..e374e369fb2f 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -14,6 +14,6 @@ enum irqreturn {
14}; 14};
15 15
16typedef enum irqreturn irqreturn_t; 16typedef enum irqreturn irqreturn_t;
17#define IRQ_RETVAL(x) ((x) != IRQ_NONE) 17#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
18 18
19#endif 19#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a5079072da66..39999775b922 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -48,6 +48,13 @@
48 48
49#include <linux/types.h> 49#include <linux/types.h>
50#include <linux/compiler.h> 50#include <linux/compiler.h>
51#include <linux/bug.h>
52
53extern bool static_key_initialized;
54
55#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
56 "%s used before call to jump_label_init", \
57 __func__)
51 58
52#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 59#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
53 60
@@ -128,29 +135,32 @@ struct static_key {
128 135
129static __always_inline void jump_label_init(void) 136static __always_inline void jump_label_init(void)
130{ 137{
138 static_key_initialized = true;
131} 139}
132 140
133static __always_inline bool static_key_false(struct static_key *key) 141static __always_inline bool static_key_false(struct static_key *key)
134{ 142{
135 if (unlikely(atomic_read(&key->enabled)) > 0) 143 if (unlikely(atomic_read(&key->enabled) > 0))
136 return true; 144 return true;
137 return false; 145 return false;
138} 146}
139 147
140static __always_inline bool static_key_true(struct static_key *key) 148static __always_inline bool static_key_true(struct static_key *key)
141{ 149{
142 if (likely(atomic_read(&key->enabled)) > 0) 150 if (likely(atomic_read(&key->enabled) > 0))
143 return true; 151 return true;
144 return false; 152 return false;
145} 153}
146 154
147static inline void static_key_slow_inc(struct static_key *key) 155static inline void static_key_slow_inc(struct static_key *key)
148{ 156{
157 STATIC_KEY_CHECK_USE();
149 atomic_inc(&key->enabled); 158 atomic_inc(&key->enabled);
150} 159}
151 160
152static inline void static_key_slow_dec(struct static_key *key) 161static inline void static_key_slow_dec(struct static_key *key)
153{ 162{
163 STATIC_KEY_CHECK_USE();
154 atomic_dec(&key->enabled); 164 atomic_dec(&key->enabled);
155} 165}
156 166
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 113788389b3d..089f70f83e97 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -23,12 +23,14 @@ struct static_key_deferred {
23}; 23};
24static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) 24static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
25{ 25{
26 STATIC_KEY_CHECK_USE();
26 static_key_slow_dec(&key->key); 27 static_key_slow_dec(&key->key);
27} 28}
28static inline void 29static inline void
29jump_label_rate_limit(struct static_key_deferred *key, 30jump_label_rate_limit(struct static_key_deferred *key,
30 unsigned long rl) 31 unsigned long rl)
31{ 32{
33 STATIC_KEY_CHECK_USE();
32} 34}
33#endif /* HAVE_JUMP_LABEL */ 35#endif /* HAVE_JUMP_LABEL */
34#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ 36#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 7f6fe6e015bc..290db1269c4c 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -109,6 +109,7 @@ typedef enum {
109 KDB_REASON_RECURSE, /* Recursive entry to kdb; 109 KDB_REASON_RECURSE, /* Recursive entry to kdb;
110 * regs probably valid */ 110 * regs probably valid */
111 KDB_REASON_SSTEP, /* Single Step trap. - regs valid */ 111 KDB_REASON_SSTEP, /* Single Step trap. - regs valid */
112 KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */
112} kdb_reason_t; 113} kdb_reason_t;
113 114
114extern int kdb_trap_printk; 115extern int kdb_trap_printk;
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 546eb6a76934..f65ce09784f1 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -15,5 +15,6 @@
15#define KPF_OWNER_PRIVATE 37 15#define KPF_OWNER_PRIVATE 37
16#define KPF_ARCH 38 16#define KPF_ARCH 38
17#define KPF_UNCACHED 39 17#define KPF_UNCACHED 39
18#define KPF_SOFTDIRTY 40
18 19
19#endif /* LINUX_KERNEL_PAGE_FLAGS_H */ 20#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 672ddc4de4af..ecb87544cc5d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,7 +193,8 @@ extern int _cond_resched(void);
193 (__x < 0) ? -__x : __x; \ 193 (__x < 0) ? -__x : __x; \
194 }) 194 })
195 195
196#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 196#if defined(CONFIG_MMU) && \
197 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
197void might_fault(void); 198void might_fault(void);
198#else 199#else
199static inline void might_fault(void) { } 200static inline void might_fault(void) { }
@@ -501,7 +502,6 @@ void tracing_snapshot_alloc(void);
501 502
502extern void tracing_start(void); 503extern void tracing_start(void);
503extern void tracing_stop(void); 504extern void tracing_stop(void);
504extern void ftrace_off_permanent(void);
505 505
506static inline __printf(1, 2) 506static inline __printf(1, 2)
507void ____trace_printk_check_format(const char *fmt, ...) 507void ____trace_printk_check_format(const char *fmt, ...)
@@ -639,7 +639,6 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
639#else 639#else
640static inline void tracing_start(void) { } 640static inline void tracing_start(void) { }
641static inline void tracing_stop(void) { } 641static inline void tracing_stop(void) { }
642static inline void ftrace_off_permanent(void) { }
643static inline void trace_dump_stack(int skip) { } 642static inline void trace_dump_stack(int skip) { }
644 643
645static inline void tracing_on(void) { } 644static inline void tracing_on(void) { }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a733b1..5fd33dc1fe3a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
198extern size_t vmcoreinfo_size; 198extern size_t vmcoreinfo_size;
199extern size_t vmcoreinfo_max_size; 199extern size_t vmcoreinfo_max_size;
200 200
201/* flag to track if kexec reboot is in progress */
202extern bool kexec_in_progress;
203
201int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 204int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
202 unsigned long long *crash_size, unsigned long long *crash_base); 205 unsigned long long *crash_size, unsigned long long *crash_base);
203int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, 206int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 518a53afb9ea..a74c3a84dfdd 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,6 +45,7 @@ struct key_preparsed_payload {
45 const void *data; /* Raw data */ 45 const void *data; /* Raw data */
46 size_t datalen; /* Raw datalen */ 46 size_t datalen; /* Raw datalen */
47 size_t quotalen; /* Quota length for proposed payload */ 47 size_t quotalen; /* Quota length for proposed payload */
48 bool trusted; /* True if key is trusted */
48}; 49};
49 50
50typedef int (*request_key_actor_t)(struct key_construction *key, 51typedef int (*request_key_actor_t)(struct key_construction *key,
@@ -63,6 +64,11 @@ struct key_type {
63 */ 64 */
64 size_t def_datalen; 65 size_t def_datalen;
65 66
67 /* Default key search algorithm. */
68 unsigned def_lookup_type;
69#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
70#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
71
66 /* vet a description */ 72 /* vet a description */
67 int (*vet_description)(const char *description); 73 int (*vet_description)(const char *description);
68 74
diff --git a/include/linux/key.h b/include/linux/key.h
index 4dfde1161c5e..80d677483e31 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -22,6 +22,7 @@
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/rwsem.h> 23#include <linux/rwsem.h>
24#include <linux/atomic.h> 24#include <linux/atomic.h>
25#include <linux/assoc_array.h>
25 26
26#ifdef __KERNEL__ 27#ifdef __KERNEL__
27#include <linux/uidgid.h> 28#include <linux/uidgid.h>
@@ -82,6 +83,12 @@ struct key_owner;
82struct keyring_list; 83struct keyring_list;
83struct keyring_name; 84struct keyring_name;
84 85
86struct keyring_index_key {
87 struct key_type *type;
88 const char *description;
89 size_t desc_len;
90};
91
85/*****************************************************************************/ 92/*****************************************************************************/
86/* 93/*
87 * key reference with possession attribute handling 94 * key reference with possession attribute handling
@@ -99,7 +106,7 @@ struct keyring_name;
99typedef struct __key_reference_with_attributes *key_ref_t; 106typedef struct __key_reference_with_attributes *key_ref_t;
100 107
101static inline key_ref_t make_key_ref(const struct key *key, 108static inline key_ref_t make_key_ref(const struct key *key,
102 unsigned long possession) 109 bool possession)
103{ 110{
104 return (key_ref_t) ((unsigned long) key | possession); 111 return (key_ref_t) ((unsigned long) key | possession);
105} 112}
@@ -109,7 +116,7 @@ static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
109 return (struct key *) ((unsigned long) key_ref & ~1UL); 116 return (struct key *) ((unsigned long) key_ref & ~1UL);
110} 117}
111 118
112static inline unsigned long is_key_possessed(const key_ref_t key_ref) 119static inline bool is_key_possessed(const key_ref_t key_ref)
113{ 120{
114 return (unsigned long) key_ref & 1UL; 121 return (unsigned long) key_ref & 1UL;
115} 122}
@@ -129,7 +136,6 @@ struct key {
129 struct list_head graveyard_link; 136 struct list_head graveyard_link;
130 struct rb_node serial_node; 137 struct rb_node serial_node;
131 }; 138 };
132 struct key_type *type; /* type of key */
133 struct rw_semaphore sem; /* change vs change sem */ 139 struct rw_semaphore sem; /* change vs change sem */
134 struct key_user *user; /* owner of this key */ 140 struct key_user *user; /* owner of this key */
135 void *security; /* security data for this key */ 141 void *security; /* security data for this key */
@@ -162,13 +168,21 @@ struct key {
162#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ 168#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
163#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ 169#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
164#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ 170#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
171#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
172#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
165 173
166 /* the description string 174 /* the key type and key description string
167 * - this is used to match a key against search criteria 175 * - the desc is used to match a key against search criteria
168 * - this should be a printable string 176 * - it should be a printable string
169 * - eg: for krb5 AFS, this might be "afs@REDHAT.COM" 177 * - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
170 */ 178 */
171 char *description; 179 union {
180 struct keyring_index_key index_key;
181 struct {
182 struct key_type *type; /* type of key */
183 char *description;
184 };
185 };
172 186
173 /* type specific data 187 /* type specific data
174 * - this is used by the keyring type to index the name 188 * - this is used by the keyring type to index the name
@@ -185,11 +199,14 @@ struct key {
185 * whatever 199 * whatever
186 */ 200 */
187 union { 201 union {
188 unsigned long value; 202 union {
189 void __rcu *rcudata; 203 unsigned long value;
190 void *data; 204 void __rcu *rcudata;
191 struct keyring_list __rcu *subscriptions; 205 void *data;
192 } payload; 206 void *data2[2];
207 } payload;
208 struct assoc_array keys;
209 };
193}; 210};
194 211
195extern struct key *key_alloc(struct key_type *type, 212extern struct key *key_alloc(struct key_type *type,
@@ -203,18 +220,23 @@ extern struct key *key_alloc(struct key_type *type,
203#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ 220#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
204#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ 221#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
205#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ 222#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
223#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
206 224
207extern void key_revoke(struct key *key); 225extern void key_revoke(struct key *key);
208extern void key_invalidate(struct key *key); 226extern void key_invalidate(struct key *key);
209extern void key_put(struct key *key); 227extern void key_put(struct key *key);
210 228
211static inline struct key *key_get(struct key *key) 229static inline struct key *__key_get(struct key *key)
212{ 230{
213 if (key) 231 atomic_inc(&key->usage);
214 atomic_inc(&key->usage);
215 return key; 232 return key;
216} 233}
217 234
235static inline struct key *key_get(struct key *key)
236{
237 return key ? __key_get(key) : key;
238}
239
218static inline void key_ref_put(key_ref_t key_ref) 240static inline void key_ref_put(key_ref_t key_ref)
219{ 241{
220 key_put(key_ref_to_ptr(key_ref)); 242 key_put(key_ref_to_ptr(key_ref));
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 10308c6a3d1c..552d51efb429 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * A generic kernel FIFO implementation 2 * A generic kernel FIFO implementation
3 * 3 *
4 * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> 4 * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -67,9 +67,10 @@ struct __kfifo {
67 union { \ 67 union { \
68 struct __kfifo kfifo; \ 68 struct __kfifo kfifo; \
69 datatype *type; \ 69 datatype *type; \
70 const datatype *const_type; \
70 char (*rectype)[recsize]; \ 71 char (*rectype)[recsize]; \
71 ptrtype *ptr; \ 72 ptrtype *ptr; \
72 const ptrtype *ptr_const; \ 73 ptrtype const *ptr_const; \
73 } 74 }
74 75
75#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \ 76#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \
@@ -386,16 +387,12 @@ __kfifo_int_must_check_helper( \
386#define kfifo_put(fifo, val) \ 387#define kfifo_put(fifo, val) \
387({ \ 388({ \
388 typeof((fifo) + 1) __tmp = (fifo); \ 389 typeof((fifo) + 1) __tmp = (fifo); \
389 typeof((val) + 1) __val = (val); \ 390 typeof(*__tmp->const_type) __val = (val); \
390 unsigned int __ret; \ 391 unsigned int __ret; \
391 const size_t __recsize = sizeof(*__tmp->rectype); \ 392 size_t __recsize = sizeof(*__tmp->rectype); \
392 struct __kfifo *__kfifo = &__tmp->kfifo; \ 393 struct __kfifo *__kfifo = &__tmp->kfifo; \
393 if (0) { \
394 typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
395 __dummy = (typeof(__val))NULL; \
396 } \
397 if (__recsize) \ 394 if (__recsize) \
398 __ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \ 395 __ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \
399 __recsize); \ 396 __recsize); \
400 else { \ 397 else { \
401 __ret = !kfifo_is_full(__tmp); \ 398 __ret = !kfifo_is_full(__tmp); \
@@ -404,7 +401,7 @@ __kfifo_int_must_check_helper( \
404 ((typeof(__tmp->type))__kfifo->data) : \ 401 ((typeof(__tmp->type))__kfifo->data) : \
405 (__tmp->buf) \ 402 (__tmp->buf) \
406 )[__kfifo->in & __tmp->kfifo.mask] = \ 403 )[__kfifo->in & __tmp->kfifo.mask] = \
407 *(typeof(__tmp->type))__val; \ 404 (typeof(*__tmp->type))__val; \
408 smp_wmb(); \ 405 smp_wmb(); \
409 __kfifo->in++; \ 406 __kfifo->in++; \
410 } \ 407 } \
@@ -415,7 +412,7 @@ __kfifo_int_must_check_helper( \
415/** 412/**
416 * kfifo_get - get data from the fifo 413 * kfifo_get - get data from the fifo
417 * @fifo: address of the fifo to be used 414 * @fifo: address of the fifo to be used
418 * @val: the var where to store the data to be added 415 * @val: address where to store the data
419 * 416 *
420 * This macro reads the data from the fifo. 417 * This macro reads the data from the fifo.
421 * It returns 0 if the fifo was empty. Otherwise it returns the number 418 * It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -428,12 +425,10 @@ __kfifo_int_must_check_helper( \
428__kfifo_uint_must_check_helper( \ 425__kfifo_uint_must_check_helper( \
429({ \ 426({ \
430 typeof((fifo) + 1) __tmp = (fifo); \ 427 typeof((fifo) + 1) __tmp = (fifo); \
431 typeof((val) + 1) __val = (val); \ 428 typeof(__tmp->ptr) __val = (val); \
432 unsigned int __ret; \ 429 unsigned int __ret; \
433 const size_t __recsize = sizeof(*__tmp->rectype); \ 430 const size_t __recsize = sizeof(*__tmp->rectype); \
434 struct __kfifo *__kfifo = &__tmp->kfifo; \ 431 struct __kfifo *__kfifo = &__tmp->kfifo; \
435 if (0) \
436 __val = (typeof(__tmp->ptr))0; \
437 if (__recsize) \ 432 if (__recsize) \
438 __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \ 433 __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \
439 __recsize); \ 434 __recsize); \
@@ -456,7 +451,7 @@ __kfifo_uint_must_check_helper( \
456/** 451/**
457 * kfifo_peek - get data from the fifo without removing 452 * kfifo_peek - get data from the fifo without removing
458 * @fifo: address of the fifo to be used 453 * @fifo: address of the fifo to be used
459 * @val: the var where to store the data to be added 454 * @val: address where to store the data
460 * 455 *
461 * This reads the data from the fifo without removing it from the fifo. 456 * This reads the data from the fifo without removing it from the fifo.
462 * It returns 0 if the fifo was empty. Otherwise it returns the number 457 * It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -469,12 +464,10 @@ __kfifo_uint_must_check_helper( \
469__kfifo_uint_must_check_helper( \ 464__kfifo_uint_must_check_helper( \
470({ \ 465({ \
471 typeof((fifo) + 1) __tmp = (fifo); \ 466 typeof((fifo) + 1) __tmp = (fifo); \
472 typeof((val) + 1) __val = (val); \ 467 typeof(__tmp->ptr) __val = (val); \
473 unsigned int __ret; \ 468 unsigned int __ret; \
474 const size_t __recsize = sizeof(*__tmp->rectype); \ 469 const size_t __recsize = sizeof(*__tmp->rectype); \
475 struct __kfifo *__kfifo = &__tmp->kfifo; \ 470 struct __kfifo *__kfifo = &__tmp->kfifo; \
476 if (0) \
477 __val = (typeof(__tmp->ptr))NULL; \
478 if (__recsize) \ 471 if (__recsize) \
479 __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \ 472 __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \
480 __recsize); \ 473 __recsize); \
@@ -508,14 +501,10 @@ __kfifo_uint_must_check_helper( \
508#define kfifo_in(fifo, buf, n) \ 501#define kfifo_in(fifo, buf, n) \
509({ \ 502({ \
510 typeof((fifo) + 1) __tmp = (fifo); \ 503 typeof((fifo) + 1) __tmp = (fifo); \
511 typeof((buf) + 1) __buf = (buf); \ 504 typeof(__tmp->ptr_const) __buf = (buf); \
512 unsigned long __n = (n); \ 505 unsigned long __n = (n); \
513 const size_t __recsize = sizeof(*__tmp->rectype); \ 506 const size_t __recsize = sizeof(*__tmp->rectype); \
514 struct __kfifo *__kfifo = &__tmp->kfifo; \ 507 struct __kfifo *__kfifo = &__tmp->kfifo; \
515 if (0) { \
516 typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
517 __dummy = (typeof(__buf))NULL; \
518 } \
519 (__recsize) ?\ 508 (__recsize) ?\
520 __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \ 509 __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \
521 __kfifo_in(__kfifo, __buf, __n); \ 510 __kfifo_in(__kfifo, __buf, __n); \
@@ -561,14 +550,10 @@ __kfifo_uint_must_check_helper( \
561__kfifo_uint_must_check_helper( \ 550__kfifo_uint_must_check_helper( \
562({ \ 551({ \
563 typeof((fifo) + 1) __tmp = (fifo); \ 552 typeof((fifo) + 1) __tmp = (fifo); \
564 typeof((buf) + 1) __buf = (buf); \ 553 typeof(__tmp->ptr) __buf = (buf); \
565 unsigned long __n = (n); \ 554 unsigned long __n = (n); \
566 const size_t __recsize = sizeof(*__tmp->rectype); \ 555 const size_t __recsize = sizeof(*__tmp->rectype); \
567 struct __kfifo *__kfifo = &__tmp->kfifo; \ 556 struct __kfifo *__kfifo = &__tmp->kfifo; \
568 if (0) { \
569 typeof(__tmp->ptr) __dummy = NULL; \
570 __buf = __dummy; \
571 } \
572 (__recsize) ?\ 557 (__recsize) ?\
573 __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \ 558 __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \
574 __kfifo_out(__kfifo, __buf, __n); \ 559 __kfifo_out(__kfifo, __buf, __n); \
@@ -773,14 +758,10 @@ __kfifo_uint_must_check_helper( \
773__kfifo_uint_must_check_helper( \ 758__kfifo_uint_must_check_helper( \
774({ \ 759({ \
775 typeof((fifo) + 1) __tmp = (fifo); \ 760 typeof((fifo) + 1) __tmp = (fifo); \
776 typeof((buf) + 1) __buf = (buf); \ 761 typeof(__tmp->ptr) __buf = (buf); \
777 unsigned long __n = (n); \ 762 unsigned long __n = (n); \
778 const size_t __recsize = sizeof(*__tmp->rectype); \ 763 const size_t __recsize = sizeof(*__tmp->rectype); \
779 struct __kfifo *__kfifo = &__tmp->kfifo; \ 764 struct __kfifo *__kfifo = &__tmp->kfifo; \
780 if (0) { \
781 typeof(__tmp->ptr) __dummy __attribute__ ((unused)) = NULL; \
782 __buf = __dummy; \
783 } \
784 (__recsize) ? \ 765 (__recsize) ? \
785 __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \ 766 __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \
786 __kfifo_out_peek(__kfifo, __buf, __n); \ 767 __kfifo_out_peek(__kfifo, __buf, __n); \
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index c6e091bf39a5..dfb4f2ffdaa2 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -310,6 +310,7 @@ extern int
310kgdb_handle_exception(int ex_vector, int signo, int err_code, 310kgdb_handle_exception(int ex_vector, int signo, int err_code,
311 struct pt_regs *regs); 311 struct pt_regs *regs);
312extern int kgdb_nmicallback(int cpu, void *regs); 312extern int kgdb_nmicallback(int cpu, void *regs);
313extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *snd_rdy);
313extern void gdbstub_exit(int status); 314extern void gdbstub_exit(int status);
314 315
315extern int kgdb_single_step; 316extern int kgdb_single_step;
diff --git a/include/linux/kobj_completion.h b/include/linux/kobj_completion.h
new file mode 100644
index 000000000000..a428f6436063
--- /dev/null
+++ b/include/linux/kobj_completion.h
@@ -0,0 +1,18 @@
1#ifndef _KOBJ_COMPLETION_H_
2#define _KOBJ_COMPLETION_H_
3
4#include <linux/kobject.h>
5#include <linux/completion.h>
6
7struct kobj_completion {
8 struct kobject kc_kobj;
9 struct completion kc_unregister;
10};
11
12#define kobj_to_kobj_completion(kobj) \
13 container_of(kobj, struct kobj_completion, kc_kobj)
14
15void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype);
16void kobj_completion_release(struct kobject *kobj);
17void kobj_completion_del_and_wait(struct kobj_completion *kc);
18#endif /* _KOBJ_COMPLETION_H_ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index de6dcbcc6ef7..e7ba650086ce 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -107,6 +107,7 @@ extern int __must_check kobject_move(struct kobject *, struct kobject *);
107extern struct kobject *kobject_get(struct kobject *kobj); 107extern struct kobject *kobject_get(struct kobject *kobj);
108extern void kobject_put(struct kobject *kobj); 108extern void kobject_put(struct kobject *kobj);
109 109
110extern const void *kobject_namespace(struct kobject *kobj);
110extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); 111extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
111 112
112struct kobj_type { 113struct kobj_type {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0fbbc7aa02cb..9523d2ad7535 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -142,7 +142,7 @@ struct kvm;
142struct kvm_vcpu; 142struct kvm_vcpu;
143extern struct kmem_cache *kvm_vcpu_cache; 143extern struct kmem_cache *kvm_vcpu_cache;
144 144
145extern raw_spinlock_t kvm_lock; 145extern spinlock_t kvm_lock;
146extern struct list_head vm_list; 146extern struct list_head vm_list;
147 147
148struct kvm_io_range { 148struct kvm_io_range {
@@ -189,8 +189,7 @@ struct kvm_async_pf {
189 gva_t gva; 189 gva_t gva;
190 unsigned long addr; 190 unsigned long addr;
191 struct kvm_arch_async_pf arch; 191 struct kvm_arch_async_pf arch;
192 struct page *page; 192 bool wakeup_all;
193 bool done;
194}; 193};
195 194
196void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 195void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
@@ -508,9 +507,10 @@ int kvm_set_memory_region(struct kvm *kvm,
508 struct kvm_userspace_memory_region *mem); 507 struct kvm_userspace_memory_region *mem);
509int __kvm_set_memory_region(struct kvm *kvm, 508int __kvm_set_memory_region(struct kvm *kvm,
510 struct kvm_userspace_memory_region *mem); 509 struct kvm_userspace_memory_region *mem);
511void kvm_arch_free_memslot(struct kvm_memory_slot *free, 510void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
512 struct kvm_memory_slot *dont); 511 struct kvm_memory_slot *dont);
513int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); 512int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
513 unsigned long npages);
514void kvm_arch_memslots_updated(struct kvm *kvm); 514void kvm_arch_memslots_updated(struct kvm *kvm);
515int kvm_arch_prepare_memory_region(struct kvm *kvm, 515int kvm_arch_prepare_memory_region(struct kvm *kvm,
516 struct kvm_memory_slot *memslot, 516 struct kvm_memory_slot *memslot,
@@ -671,6 +671,25 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
671} 671}
672#endif 672#endif
673 673
674#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
675void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
676void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
677bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
678#else
679static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
680{
681}
682
683static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
684{
685}
686
687static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
688{
689 return false;
690}
691#endif
692
674static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 693static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
675{ 694{
676#ifdef __KVM_HAVE_ARCH_WQP 695#ifdef __KVM_HAVE_ARCH_WQP
@@ -747,9 +766,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
747int kvm_request_irq_source_id(struct kvm *kvm); 766int kvm_request_irq_source_id(struct kvm *kvm);
748void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 767void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
749 768
750/* For vcpu->arch.iommu_flags */
751#define KVM_IOMMU_CACHE_COHERENCY 0x1
752
753#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 769#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
754int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 770int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
755void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 771void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
@@ -789,7 +805,7 @@ static inline void kvm_guest_enter(void)
789 805
790 /* KVM does not hold any references to rcu protected data when it 806 /* KVM does not hold any references to rcu protected data when it
791 * switches CPU into a guest mode. In fact switching to a guest mode 807 * switches CPU into a guest mode. In fact switching to a guest mode
792 * is very similar to exiting to userspase from rcu point of view. In 808 * is very similar to exiting to userspace from rcu point of view. In
793 * addition CPU may stay in a guest mode for quite a long time (up to 809 * addition CPU may stay in a guest mode for quite a long time (up to
794 * one time slice). Lets treat guest mode as quiescent state, just like 810 * one time slice). Lets treat guest mode as quiescent state, just like
795 * we do with user-mode execution. 811 * we do with user-mode execution.
@@ -842,13 +858,6 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
842 return gfn_to_memslot(kvm, gfn)->id; 858 return gfn_to_memslot(kvm, gfn)->id;
843} 859}
844 860
845static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
846{
847 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
848 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
849 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
850}
851
852static inline gfn_t 861static inline gfn_t
853hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 862hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
854{ 863{
@@ -1066,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp);
1066 1075
1067extern struct kvm_device_ops kvm_mpic_ops; 1076extern struct kvm_device_ops kvm_mpic_ops;
1068extern struct kvm_device_ops kvm_xics_ops; 1077extern struct kvm_device_ops kvm_xics_ops;
1078extern struct kvm_device_ops kvm_vfio_ops;
1069 1079
1070#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1080#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1071 1081
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0d24e932db0b..96549abe8842 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -25,16 +25,6 @@
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/notifier.h> 26#include <linux/notifier.h>
27 27
28/* can make br locks by using local lock for read side, global lock for write */
29#define br_lock_init(name) lg_lock_init(name, #name)
30#define br_read_lock(name) lg_local_lock(name)
31#define br_read_unlock(name) lg_local_unlock(name)
32#define br_write_lock(name) lg_global_lock(name)
33#define br_write_unlock(name) lg_global_unlock(name)
34
35#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
36#define DEFINE_STATIC_BRLOCK(name) DEFINE_STATIC_LGLOCK(name)
37
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 28#ifdef CONFIG_DEBUG_LOCK_ALLOC
39#define LOCKDEP_INIT_MAP lockdep_init_map 29#define LOCKDEP_INIT_MAP lockdep_init_map
40#else 30#else
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e23c26485f4..9b503376738f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -418,6 +418,7 @@ enum {
418 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ 418 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
419 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ 419 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
420 ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ 420 ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
421 ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
421 422
422 /* DMA mask for user DMA control: User visible values; DO NOT 423 /* DMA mask for user DMA control: User visible values; DO NOT
423 renumber */ 424 renumber */
diff --git a/include/linux/list.h b/include/linux/list.h
index f4d8a2f12a33..ef9594171062 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -362,6 +362,17 @@ static inline void list_splice_tail_init(struct list_head *list,
362 list_entry((ptr)->next, type, member) 362 list_entry((ptr)->next, type, member)
363 363
364/** 364/**
365 * list_last_entry - get the last element from a list
366 * @ptr: the list head to take the element from.
367 * @type: the type of the struct this is embedded in.
368 * @member: the name of the list_struct within the struct.
369 *
370 * Note, that list is expected to be not empty.
371 */
372#define list_last_entry(ptr, type, member) \
373 list_entry((ptr)->prev, type, member)
374
375/**
365 * list_first_entry_or_null - get the first element from a list 376 * list_first_entry_or_null - get the first element from a list
366 * @ptr: the list head to take the element from. 377 * @ptr: the list head to take the element from.
367 * @type: the type of the struct this is embedded in. 378 * @type: the type of the struct this is embedded in.
@@ -373,6 +384,22 @@ static inline void list_splice_tail_init(struct list_head *list,
373 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) 384 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
374 385
375/** 386/**
387 * list_next_entry - get the next element in list
388 * @pos: the type * to cursor
389 * @member: the name of the list_struct within the struct.
390 */
391#define list_next_entry(pos, member) \
392 list_entry((pos)->member.next, typeof(*(pos)), member)
393
394/**
395 * list_prev_entry - get the prev element in list
396 * @pos: the type * to cursor
397 * @member: the name of the list_struct within the struct.
398 */
399#define list_prev_entry(pos, member) \
400 list_entry((pos)->member.prev, typeof(*(pos)), member)
401
402/**
376 * list_for_each - iterate over a list 403 * list_for_each - iterate over a list
377 * @pos: the &struct list_head to use as a loop cursor. 404 * @pos: the &struct list_head to use as a loop cursor.
378 * @head: the head for your list. 405 * @head: the head for your list.
@@ -416,9 +443,9 @@ static inline void list_splice_tail_init(struct list_head *list,
416 * @member: the name of the list_struct within the struct. 443 * @member: the name of the list_struct within the struct.
417 */ 444 */
418#define list_for_each_entry(pos, head, member) \ 445#define list_for_each_entry(pos, head, member) \
419 for (pos = list_entry((head)->next, typeof(*pos), member); \ 446 for (pos = list_first_entry(head, typeof(*pos), member); \
420 &pos->member != (head); \ 447 &pos->member != (head); \
421 pos = list_entry(pos->member.next, typeof(*pos), member)) 448 pos = list_next_entry(pos, member))
422 449
423/** 450/**
424 * list_for_each_entry_reverse - iterate backwards over list of given type. 451 * list_for_each_entry_reverse - iterate backwards over list of given type.
@@ -427,9 +454,9 @@ static inline void list_splice_tail_init(struct list_head *list,
427 * @member: the name of the list_struct within the struct. 454 * @member: the name of the list_struct within the struct.
428 */ 455 */
429#define list_for_each_entry_reverse(pos, head, member) \ 456#define list_for_each_entry_reverse(pos, head, member) \
430 for (pos = list_entry((head)->prev, typeof(*pos), member); \ 457 for (pos = list_last_entry(head, typeof(*pos), member); \
431 &pos->member != (head); \ 458 &pos->member != (head); \
432 pos = list_entry(pos->member.prev, typeof(*pos), member)) 459 pos = list_prev_entry(pos, member))
433 460
434/** 461/**
435 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() 462 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
@@ -452,9 +479,9 @@ static inline void list_splice_tail_init(struct list_head *list,
452 * the current position. 479 * the current position.
453 */ 480 */
454#define list_for_each_entry_continue(pos, head, member) \ 481#define list_for_each_entry_continue(pos, head, member) \
455 for (pos = list_entry(pos->member.next, typeof(*pos), member); \ 482 for (pos = list_next_entry(pos, member); \
456 &pos->member != (head); \ 483 &pos->member != (head); \
457 pos = list_entry(pos->member.next, typeof(*pos), member)) 484 pos = list_next_entry(pos, member))
458 485
459/** 486/**
460 * list_for_each_entry_continue_reverse - iterate backwards from the given point 487 * list_for_each_entry_continue_reverse - iterate backwards from the given point
@@ -466,9 +493,9 @@ static inline void list_splice_tail_init(struct list_head *list,
466 * the current position. 493 * the current position.
467 */ 494 */
468#define list_for_each_entry_continue_reverse(pos, head, member) \ 495#define list_for_each_entry_continue_reverse(pos, head, member) \
469 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ 496 for (pos = list_prev_entry(pos, member); \
470 &pos->member != (head); \ 497 &pos->member != (head); \
471 pos = list_entry(pos->member.prev, typeof(*pos), member)) 498 pos = list_prev_entry(pos, member))
472 499
473/** 500/**
474 * list_for_each_entry_from - iterate over list of given type from the current point 501 * list_for_each_entry_from - iterate over list of given type from the current point
@@ -479,8 +506,8 @@ static inline void list_splice_tail_init(struct list_head *list,
479 * Iterate over list of given type, continuing from current position. 506 * Iterate over list of given type, continuing from current position.
480 */ 507 */
481#define list_for_each_entry_from(pos, head, member) \ 508#define list_for_each_entry_from(pos, head, member) \
482 for (; &pos->member != (head); \ 509 for (; &pos->member != (head); \
483 pos = list_entry(pos->member.next, typeof(*pos), member)) 510 pos = list_next_entry(pos, member))
484 511
485/** 512/**
486 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 513 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
@@ -490,10 +517,10 @@ static inline void list_splice_tail_init(struct list_head *list,
490 * @member: the name of the list_struct within the struct. 517 * @member: the name of the list_struct within the struct.
491 */ 518 */
492#define list_for_each_entry_safe(pos, n, head, member) \ 519#define list_for_each_entry_safe(pos, n, head, member) \
493 for (pos = list_entry((head)->next, typeof(*pos), member), \ 520 for (pos = list_first_entry(head, typeof(*pos), member), \
494 n = list_entry(pos->member.next, typeof(*pos), member); \ 521 n = list_next_entry(pos, member); \
495 &pos->member != (head); \ 522 &pos->member != (head); \
496 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 523 pos = n, n = list_next_entry(n, member))
497 524
498/** 525/**
499 * list_for_each_entry_safe_continue - continue list iteration safe against removal 526 * list_for_each_entry_safe_continue - continue list iteration safe against removal
@@ -506,10 +533,10 @@ static inline void list_splice_tail_init(struct list_head *list,
506 * safe against removal of list entry. 533 * safe against removal of list entry.
507 */ 534 */
508#define list_for_each_entry_safe_continue(pos, n, head, member) \ 535#define list_for_each_entry_safe_continue(pos, n, head, member) \
509 for (pos = list_entry(pos->member.next, typeof(*pos), member), \ 536 for (pos = list_next_entry(pos, member), \
510 n = list_entry(pos->member.next, typeof(*pos), member); \ 537 n = list_next_entry(pos, member); \
511 &pos->member != (head); \ 538 &pos->member != (head); \
512 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 539 pos = n, n = list_next_entry(n, member))
513 540
514/** 541/**
515 * list_for_each_entry_safe_from - iterate over list from current point safe against removal 542 * list_for_each_entry_safe_from - iterate over list from current point safe against removal
@@ -522,9 +549,9 @@ static inline void list_splice_tail_init(struct list_head *list,
522 * removal of list entry. 549 * removal of list entry.
523 */ 550 */
524#define list_for_each_entry_safe_from(pos, n, head, member) \ 551#define list_for_each_entry_safe_from(pos, n, head, member) \
525 for (n = list_entry(pos->member.next, typeof(*pos), member); \ 552 for (n = list_next_entry(pos, member); \
526 &pos->member != (head); \ 553 &pos->member != (head); \
527 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 554 pos = n, n = list_next_entry(n, member))
528 555
529/** 556/**
530 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal 557 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
@@ -537,10 +564,10 @@ static inline void list_splice_tail_init(struct list_head *list,
537 * of list entry. 564 * of list entry.
538 */ 565 */
539#define list_for_each_entry_safe_reverse(pos, n, head, member) \ 566#define list_for_each_entry_safe_reverse(pos, n, head, member) \
540 for (pos = list_entry((head)->prev, typeof(*pos), member), \ 567 for (pos = list_last_entry(head, typeof(*pos), member), \
541 n = list_entry(pos->member.prev, typeof(*pos), member); \ 568 n = list_prev_entry(pos, member); \
542 &pos->member != (head); \ 569 &pos->member != (head); \
543 pos = n, n = list_entry(n->member.prev, typeof(*n), member)) 570 pos = n, n = list_prev_entry(n, member))
544 571
545/** 572/**
546 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop 573 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
@@ -555,7 +582,7 @@ static inline void list_splice_tail_init(struct list_head *list,
555 * completing the current iteration of the loop body. 582 * completing the current iteration of the loop body.
556 */ 583 */
557#define list_safe_reset_next(pos, n, member) \ 584#define list_safe_reset_next(pos, n, member) \
558 n = list_entry(pos->member.next, typeof(*pos), member) 585 n = list_next_entry(pos, member)
559 586
560/* 587/*
561 * Double linked lists with a single pointer list head. 588 * Double linked lists with a single pointer list head.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 8828a78dec9a..fbf10a0bc095 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -195,4 +195,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
195 195
196extern struct llist_node *llist_del_first(struct llist_head *head); 196extern struct llist_node *llist_del_first(struct llist_head *head);
197 197
198struct llist_node *llist_reverse_order(struct llist_node *head);
199
198#endif /* LLIST_H */ 200#endif /* LLIST_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index cfc2f119779a..92b1bfc5da60 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -497,6 +497,10 @@ static inline void print_irqtrace_events(struct task_struct *curr)
497#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 497#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
498#define rwlock_release(l, n, i) lock_release(l, n, i) 498#define rwlock_release(l, n, i) lock_release(l, n, i)
499 499
500#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
501#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
502#define seqcount_release(l, n, i) lock_release(l, n, i)
503
500#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 504#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
501#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 505#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
502#define mutex_release(l, n, i) lock_release(l, n, i) 506#define mutex_release(l, n, i) lock_release(l, n, i)
@@ -504,11 +508,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
504#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 508#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
505#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 509#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
506#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 510#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
507# define rwsem_release(l, n, i) lock_release(l, n, i) 511#define rwsem_release(l, n, i) lock_release(l, n, i)
508 512
509#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 513#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
510#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 514#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
511# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 515#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
512 516
513#ifdef CONFIG_PROVE_LOCKING 517#ifdef CONFIG_PROVE_LOCKING
514# define might_lock(lock) \ 518# define might_lock(lock) \
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index f279ed9a9163..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -15,10 +15,15 @@
15 */ 15 */
16 16
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <generated/bounds.h>
19
20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
18 23
19struct lockref { 24struct lockref {
20 union { 25 union {
21#ifdef CONFIG_CMPXCHG_LOCKREF 26#if USE_CMPXCHG_LOCKREF
22 aligned_u64 lock_count; 27 aligned_u64 lock_count;
23#endif 28#endif
24 struct { 29 struct {
@@ -36,4 +41,10 @@ extern int lockref_put_or_lock(struct lockref *);
36extern void lockref_mark_dead(struct lockref *); 41extern void lockref_mark_dead(struct lockref *);
37extern int lockref_get_not_dead(struct lockref *); 42extern int lockref_get_not_dead(struct lockref *);
38 43
44/* Must be called under spinlock for reliable results */
45static inline int __lockref_is_dead(const struct lockref *l)
46{
47 return ((int)l->count < 0);
48}
49
39#endif /* __LINUX_LOCKREF_H */ 50#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
138#ifndef mul_u64_u32_shr
139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140{
141 return (u64)(((unsigned __int128)a * mul) >> shift);
142}
143#endif /* mul_u64_u32_shr */
144
145#else
146
147#ifndef mul_u64_u32_shr
148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
149{
150 u32 ah, al;
151 u64 ret;
152
153 al = a;
154 ah = a >> 32;
155
156 ret = ((u64)al * mul) >> shift;
157 if (ah)
158 ret += ((u64)ah * mul) << (32 - shift);
159
160 return ret;
161}
162#endif /* mul_u64_u32_shr */
163
164#endif
165
136#endif /* _LINUX_MATH64_H */ 166#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 31e95acddb4d..77c60e52939d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -35,6 +35,7 @@ struct memblock_type {
35}; 35};
36 36
37struct memblock { 37struct memblock {
38 bool bottom_up; /* is bottom up direction? */
38 phys_addr_t current_limit; 39 phys_addr_t current_limit;
39 struct memblock_type memory; 40 struct memblock_type memory;
40 struct memblock_type reserved; 41 struct memblock_type reserved;
@@ -148,6 +149,29 @@ phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
148 149
149phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); 150phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
150 151
152#ifdef CONFIG_MOVABLE_NODE
153/*
154 * Set the allocation direction to bottom-up or top-down.
155 */
156static inline void memblock_set_bottom_up(bool enable)
157{
158 memblock.bottom_up = enable;
159}
160
161/*
162 * Check if the allocation direction is bottom-up or not.
163 * if this is true, that said, memblock will allocate memory
164 * in bottom-up direction.
165 */
166static inline bool memblock_bottom_up(void)
167{
168 return memblock.bottom_up;
169}
170#else
171static inline void memblock_set_bottom_up(bool enable) {}
172static inline bool memblock_bottom_up(void) { return false; }
173#endif
174
151/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 175/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
152#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 176#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
153#define MEMBLOCK_ALLOC_ACCESSIBLE 0 177#define MEMBLOCK_ALLOC_ACCESSIBLE 0
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index dd38e62b84d2..4ca3d951fe91 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -94,6 +94,8 @@ extern void __online_page_set_limits(struct page *page);
94extern void __online_page_increment_counters(struct page *page); 94extern void __online_page_increment_counters(struct page *page);
95extern void __online_page_free(struct page *page); 95extern void __online_page_free(struct page *page);
96 96
97extern int try_online_node(int nid);
98
97#ifdef CONFIG_MEMORY_HOTREMOVE 99#ifdef CONFIG_MEMORY_HOTREMOVE
98extern bool is_pageblock_removable_nolock(struct page *page); 100extern bool is_pageblock_removable_nolock(struct page *page);
99extern int arch_remove_memory(u64 start, u64 size); 101extern int arch_remove_memory(u64 start, u64 size);
@@ -225,6 +227,11 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
225{ 227{
226} 228}
227 229
230static inline int try_online_node(int nid)
231{
232 return 0;
233}
234
228static inline void lock_memory_hotplug(void) {} 235static inline void lock_memory_hotplug(void) {}
229static inline void unlock_memory_hotplug(void) {} 236static inline void unlock_memory_hotplug(void) {}
230 237
@@ -256,14 +263,12 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
256 263
257extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 264extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
258 void *arg, int (*func)(struct memory_block *, void *)); 265 void *arg, int (*func)(struct memory_block *, void *));
259extern int mem_online_node(int nid);
260extern int add_memory(int nid, u64 start, u64 size); 266extern int add_memory(int nid, u64 start, u64 size);
261extern int arch_add_memory(int nid, u64 start, u64 size); 267extern int arch_add_memory(int nid, u64 start, u64 size);
262extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 268extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
263extern bool is_memblock_offlined(struct memory_block *mem); 269extern bool is_memblock_offlined(struct memory_block *mem);
264extern void remove_memory(int nid, u64 start, u64 size); 270extern void remove_memory(int nid, u64 start, u64 size);
265extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 271extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
266 int nr_pages);
267extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); 272extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
268extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 273extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
269 unsigned long pnum); 274 unsigned long pnum);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index da6716b9e3fe..9fe426b30a41 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -136,6 +136,7 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
136 136
137struct mempolicy *get_vma_policy(struct task_struct *tsk, 137struct mempolicy *get_vma_policy(struct task_struct *tsk,
138 struct vm_area_struct *vma, unsigned long addr); 138 struct vm_area_struct *vma, unsigned long addr);
139bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma);
139 140
140extern void numa_default_policy(void); 141extern void numa_default_policy(void);
141extern void numa_policy_init(void); 142extern void numa_policy_init(void);
@@ -168,7 +169,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
168extern int mpol_parse_str(char *str, struct mempolicy **mpol); 169extern int mpol_parse_str(char *str, struct mempolicy **mpol);
169#endif 170#endif
170 171
171extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); 172extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
172 173
173/* Check if a vma is migratable */ 174/* Check if a vma is migratable */
174static inline int vma_migratable(struct vm_area_struct *vma) 175static inline int vma_migratable(struct vm_area_struct *vma)
@@ -306,9 +307,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
306} 307}
307#endif 308#endif
308 309
309static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 310static inline void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
310{ 311{
311 return 0;
312} 312}
313 313
314static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, 314static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 4706d3d46e56..cb49417f8ba9 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1908,7 +1908,7 @@
1908#define ARIZONA_FLL2_SYNC_GAIN_MASK 0x003c /* FLL2_SYNC_GAIN */ 1908#define ARIZONA_FLL2_SYNC_GAIN_MASK 0x003c /* FLL2_SYNC_GAIN */
1909#define ARIZONA_FLL2_SYNC_GAIN_SHIFT 2 /* FLL2_SYNC_GAIN */ 1909#define ARIZONA_FLL2_SYNC_GAIN_SHIFT 2 /* FLL2_SYNC_GAIN */
1910#define ARIZONA_FLL2_SYNC_GAIN_WIDTH 4 /* FLL2_SYNC_GAIN */ 1910#define ARIZONA_FLL2_SYNC_GAIN_WIDTH 4 /* FLL2_SYNC_GAIN */
1911#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */ 1911#define ARIZONA_FLL2_SYNC_BW 0x0001 /* FLL2_SYNC_BW */
1912#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */ 1912#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */
1913#define ARIZONA_FLL2_SYNC_BW_SHIFT 0 /* FLL2_SYNC_BW */ 1913#define ARIZONA_FLL2_SYNC_BW_SHIFT 0 /* FLL2_SYNC_BW */
1914#define ARIZONA_FLL2_SYNC_BW_WIDTH 1 /* FLL2_SYNC_BW */ 1914#define ARIZONA_FLL2_SYNC_BW_WIDTH 1 /* FLL2_SYNC_BW */
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
new file mode 100644
index 000000000000..16bf8a0dcd97
--- /dev/null
+++ b/include/linux/mfd/as3722.h
@@ -0,0 +1,423 @@
1/*
2 * as3722 definitions
3 *
4 * Copyright (C) 2013 ams
5 * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
6 *
7 * Author: Florian Lobmaier <florian.lobmaier@ams.com>
8 * Author: Laxman Dewangan <ldewangan@nvidia.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25
26#ifndef __LINUX_MFD_AS3722_H__
27#define __LINUX_MFD_AS3722_H__
28
29#include <linux/regmap.h>
30
31/* AS3722 registers */
32#define AS3722_SD0_VOLTAGE_REG 0x00
33#define AS3722_SD1_VOLTAGE_REG 0x01
34#define AS3722_SD2_VOLTAGE_REG 0x02
35#define AS3722_SD3_VOLTAGE_REG 0x03
36#define AS3722_SD4_VOLTAGE_REG 0x04
37#define AS3722_SD5_VOLTAGE_REG 0x05
38#define AS3722_SD6_VOLTAGE_REG 0x06
39#define AS3722_GPIO0_CONTROL_REG 0x08
40#define AS3722_GPIO1_CONTROL_REG 0x09
41#define AS3722_GPIO2_CONTROL_REG 0x0A
42#define AS3722_GPIO3_CONTROL_REG 0x0B
43#define AS3722_GPIO4_CONTROL_REG 0x0C
44#define AS3722_GPIO5_CONTROL_REG 0x0D
45#define AS3722_GPIO6_CONTROL_REG 0x0E
46#define AS3722_GPIO7_CONTROL_REG 0x0F
47#define AS3722_LDO0_VOLTAGE_REG 0x10
48#define AS3722_LDO1_VOLTAGE_REG 0x11
49#define AS3722_LDO2_VOLTAGE_REG 0x12
50#define AS3722_LDO3_VOLTAGE_REG 0x13
51#define AS3722_LDO4_VOLTAGE_REG 0x14
52#define AS3722_LDO5_VOLTAGE_REG 0x15
53#define AS3722_LDO6_VOLTAGE_REG 0x16
54#define AS3722_LDO7_VOLTAGE_REG 0x17
55#define AS3722_LDO9_VOLTAGE_REG 0x19
56#define AS3722_LDO10_VOLTAGE_REG 0x1A
57#define AS3722_LDO11_VOLTAGE_REG 0x1B
58#define AS3722_GPIO_DEB1_REG 0x1E
59#define AS3722_GPIO_DEB2_REG 0x1F
60#define AS3722_GPIO_SIGNAL_OUT_REG 0x20
61#define AS3722_GPIO_SIGNAL_IN_REG 0x21
62#define AS3722_REG_SEQU_MOD1_REG 0x22
63#define AS3722_REG_SEQU_MOD2_REG 0x23
64#define AS3722_REG_SEQU_MOD3_REG 0x24
65#define AS3722_SD_PHSW_CTRL_REG 0x27
66#define AS3722_SD_PHSW_STATUS 0x28
67#define AS3722_SD0_CONTROL_REG 0x29
68#define AS3722_SD1_CONTROL_REG 0x2A
69#define AS3722_SDmph_CONTROL_REG 0x2B
70#define AS3722_SD23_CONTROL_REG 0x2C
71#define AS3722_SD4_CONTROL_REG 0x2D
72#define AS3722_SD5_CONTROL_REG 0x2E
73#define AS3722_SD6_CONTROL_REG 0x2F
74#define AS3722_SD_DVM_REG 0x30
75#define AS3722_RESET_REASON_REG 0x31
76#define AS3722_BATTERY_VOLTAGE_MONITOR_REG 0x32
77#define AS3722_STARTUP_CONTROL_REG 0x33
78#define AS3722_RESET_TIMER_REG 0x34
79#define AS3722_REFERENCE_CONTROL_REG 0x35
80#define AS3722_RESET_CONTROL_REG 0x36
81#define AS3722_OVER_TEMP_CONTROL_REG 0x37
82#define AS3722_WATCHDOG_CONTROL_REG 0x38
83#define AS3722_REG_STANDBY_MOD1_REG 0x39
84#define AS3722_REG_STANDBY_MOD2_REG 0x3A
85#define AS3722_REG_STANDBY_MOD3_REG 0x3B
86#define AS3722_ENABLE_CTRL1_REG 0x3C
87#define AS3722_ENABLE_CTRL2_REG 0x3D
88#define AS3722_ENABLE_CTRL3_REG 0x3E
89#define AS3722_ENABLE_CTRL4_REG 0x3F
90#define AS3722_ENABLE_CTRL5_REG 0x40
91#define AS3722_PWM_CONTROL_L_REG 0x41
92#define AS3722_PWM_CONTROL_H_REG 0x42
93#define AS3722_WATCHDOG_TIMER_REG 0x46
94#define AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG 0x48
95#define AS3722_IOVOLTAGE_REG 0x49
96#define AS3722_BATTERY_VOLTAGE_MONITOR2_REG 0x4A
97#define AS3722_SD_CONTROL_REG 0x4D
98#define AS3722_LDOCONTROL0_REG 0x4E
99#define AS3722_LDOCONTROL1_REG 0x4F
100#define AS3722_SD0_PROTECT_REG 0x50
101#define AS3722_SD6_PROTECT_REG 0x51
102#define AS3722_PWM_VCONTROL1_REG 0x52
103#define AS3722_PWM_VCONTROL2_REG 0x53
104#define AS3722_PWM_VCONTROL3_REG 0x54
105#define AS3722_PWM_VCONTROL4_REG 0x55
106#define AS3722_BB_CHARGER_REG 0x57
107#define AS3722_CTRL_SEQU1_REG 0x58
108#define AS3722_CTRL_SEQU2_REG 0x59
109#define AS3722_OVCURRENT_REG 0x5A
110#define AS3722_OVCURRENT_DEB_REG 0x5B
111#define AS3722_SDLV_DEB_REG 0x5C
112#define AS3722_OC_PG_CTRL_REG 0x5D
113#define AS3722_OC_PG_CTRL2_REG 0x5E
114#define AS3722_CTRL_STATUS 0x5F
115#define AS3722_RTC_CONTROL_REG 0x60
116#define AS3722_RTC_SECOND_REG 0x61
117#define AS3722_RTC_MINUTE_REG 0x62
118#define AS3722_RTC_HOUR_REG 0x63
119#define AS3722_RTC_DAY_REG 0x64
120#define AS3722_RTC_MONTH_REG 0x65
121#define AS3722_RTC_YEAR_REG 0x66
122#define AS3722_RTC_ALARM_SECOND_REG 0x67
123#define AS3722_RTC_ALARM_MINUTE_REG 0x68
124#define AS3722_RTC_ALARM_HOUR_REG 0x69
125#define AS3722_RTC_ALARM_DAY_REG 0x6A
126#define AS3722_RTC_ALARM_MONTH_REG 0x6B
127#define AS3722_RTC_ALARM_YEAR_REG 0x6C
128#define AS3722_SRAM_REG 0x6D
129#define AS3722_RTC_ACCESS_REG 0x6F
130#define AS3722_RTC_STATUS_REG 0x73
131#define AS3722_INTERRUPT_MASK1_REG 0x74
132#define AS3722_INTERRUPT_MASK2_REG 0x75
133#define AS3722_INTERRUPT_MASK3_REG 0x76
134#define AS3722_INTERRUPT_MASK4_REG 0x77
135#define AS3722_INTERRUPT_STATUS1_REG 0x78
136#define AS3722_INTERRUPT_STATUS2_REG 0x79
137#define AS3722_INTERRUPT_STATUS3_REG 0x7A
138#define AS3722_INTERRUPT_STATUS4_REG 0x7B
139#define AS3722_TEMP_STATUS_REG 0x7D
140#define AS3722_ADC0_CONTROL_REG 0x80
141#define AS3722_ADC1_CONTROL_REG 0x81
142#define AS3722_ADC0_MSB_RESULT_REG 0x82
143#define AS3722_ADC0_LSB_RESULT_REG 0x83
144#define AS3722_ADC1_MSB_RESULT_REG 0x84
145#define AS3722_ADC1_LSB_RESULT_REG 0x85
146#define AS3722_ADC1_THRESHOLD_HI_MSB_REG 0x86
147#define AS3722_ADC1_THRESHOLD_HI_LSB_REG 0x87
148#define AS3722_ADC1_THRESHOLD_LO_MSB_REG 0x88
149#define AS3722_ADC1_THRESHOLD_LO_LSB_REG 0x89
150#define AS3722_ADC_CONFIGURATION_REG 0x8A
151#define AS3722_ASIC_ID1_REG 0x90
152#define AS3722_ASIC_ID2_REG 0x91
153#define AS3722_LOCK_REG 0x9E
154#define AS3722_MAX_REGISTER 0xF4
155
156#define AS3722_SD0_EXT_ENABLE_MASK 0x03
157#define AS3722_SD1_EXT_ENABLE_MASK 0x0C
158#define AS3722_SD2_EXT_ENABLE_MASK 0x30
159#define AS3722_SD3_EXT_ENABLE_MASK 0xC0
160#define AS3722_SD4_EXT_ENABLE_MASK 0x03
161#define AS3722_SD5_EXT_ENABLE_MASK 0x0C
162#define AS3722_SD6_EXT_ENABLE_MASK 0x30
163#define AS3722_LDO0_EXT_ENABLE_MASK 0x03
164#define AS3722_LDO1_EXT_ENABLE_MASK 0x0C
165#define AS3722_LDO2_EXT_ENABLE_MASK 0x30
166#define AS3722_LDO3_EXT_ENABLE_MASK 0xC0
167#define AS3722_LDO4_EXT_ENABLE_MASK 0x03
168#define AS3722_LDO5_EXT_ENABLE_MASK 0x0C
169#define AS3722_LDO6_EXT_ENABLE_MASK 0x30
170#define AS3722_LDO7_EXT_ENABLE_MASK 0xC0
171#define AS3722_LDO9_EXT_ENABLE_MASK 0x0C
172#define AS3722_LDO10_EXT_ENABLE_MASK 0x30
173#define AS3722_LDO11_EXT_ENABLE_MASK 0xC0
174
175#define AS3722_OVCURRENT_SD0_ALARM_MASK 0x07
176#define AS3722_OVCURRENT_SD0_ALARM_SHIFT 0x01
177#define AS3722_OVCURRENT_SD0_TRIP_MASK 0x18
178#define AS3722_OVCURRENT_SD0_TRIP_SHIFT 0x03
179#define AS3722_OVCURRENT_SD1_TRIP_MASK 0x60
180#define AS3722_OVCURRENT_SD1_TRIP_SHIFT 0x05
181
182#define AS3722_OVCURRENT_SD6_ALARM_MASK 0x07
183#define AS3722_OVCURRENT_SD6_ALARM_SHIFT 0x01
184#define AS3722_OVCURRENT_SD6_TRIP_MASK 0x18
185#define AS3722_OVCURRENT_SD6_TRIP_SHIFT 0x03
186
187/* AS3722 register bits and bit masks */
188#define AS3722_LDO_ILIMIT_MASK BIT(7)
189#define AS3722_LDO_ILIMIT_BIT BIT(7)
190#define AS3722_LDO0_VSEL_MASK 0x1F
191#define AS3722_LDO0_VSEL_MIN 0x01
192#define AS3722_LDO0_VSEL_MAX 0x12
193#define AS3722_LDO0_NUM_VOLT 0x12
194#define AS3722_LDO3_VSEL_MASK 0x3F
195#define AS3722_LDO3_VSEL_MIN 0x01
196#define AS3722_LDO3_VSEL_MAX 0x2D
197#define AS3722_LDO3_NUM_VOLT 0x2D
198#define AS3722_LDO_VSEL_MASK 0x7F
199#define AS3722_LDO_VSEL_MIN 0x01
200#define AS3722_LDO_VSEL_MAX 0x7F
201#define AS3722_LDO_VSEL_DNU_MIN 0x25
202#define AS3722_LDO_VSEL_DNU_MAX 0x3F
203#define AS3722_LDO_NUM_VOLT 0x80
204
205#define AS3722_LDO0_CTRL BIT(0)
206#define AS3722_LDO1_CTRL BIT(1)
207#define AS3722_LDO2_CTRL BIT(2)
208#define AS3722_LDO3_CTRL BIT(3)
209#define AS3722_LDO4_CTRL BIT(4)
210#define AS3722_LDO5_CTRL BIT(5)
211#define AS3722_LDO6_CTRL BIT(6)
212#define AS3722_LDO7_CTRL BIT(7)
213#define AS3722_LDO9_CTRL BIT(1)
214#define AS3722_LDO10_CTRL BIT(2)
215#define AS3722_LDO11_CTRL BIT(3)
216
217#define AS3722_LDO3_MODE_MASK (3 << 6)
218#define AS3722_LDO3_MODE_VAL(n) (((n) & 0x3) << 6)
219#define AS3722_LDO3_MODE_PMOS AS3722_LDO3_MODE_VAL(0)
220#define AS3722_LDO3_MODE_PMOS_TRACKING AS3722_LDO3_MODE_VAL(1)
221#define AS3722_LDO3_MODE_NMOS AS3722_LDO3_MODE_VAL(2)
222#define AS3722_LDO3_MODE_SWITCH AS3722_LDO3_MODE_VAL(3)
223
224#define AS3722_SD_VSEL_MASK 0x7F
225#define AS3722_SD0_VSEL_MIN 0x01
226#define AS3722_SD0_VSEL_MAX 0x5A
227#define AS3722_SD2_VSEL_MIN 0x01
228#define AS3722_SD2_VSEL_MAX 0x7F
229
230#define AS3722_SDn_CTRL(n) BIT(n)
231
232#define AS3722_SD0_MODE_FAST BIT(4)
233#define AS3722_SD1_MODE_FAST BIT(4)
234#define AS3722_SD2_MODE_FAST BIT(2)
235#define AS3722_SD3_MODE_FAST BIT(6)
236#define AS3722_SD4_MODE_FAST BIT(2)
237#define AS3722_SD5_MODE_FAST BIT(2)
238#define AS3722_SD6_MODE_FAST BIT(4)
239
240#define AS3722_POWER_OFF BIT(1)
241
242#define AS3722_INTERRUPT_MASK1_LID BIT(0)
243#define AS3722_INTERRUPT_MASK1_ACOK BIT(1)
244#define AS3722_INTERRUPT_MASK1_ENABLE1 BIT(2)
245#define AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0 BIT(3)
246#define AS3722_INTERRUPT_MASK1_ONKEY_LONG BIT(4)
247#define AS3722_INTERRUPT_MASK1_ONKEY BIT(5)
248#define AS3722_INTERRUPT_MASK1_OVTMP BIT(6)
249#define AS3722_INTERRUPT_MASK1_LOWBAT BIT(7)
250
251#define AS3722_INTERRUPT_MASK2_SD0_LV BIT(0)
252#define AS3722_INTERRUPT_MASK2_SD1_LV BIT(1)
253#define AS3722_INTERRUPT_MASK2_SD2345_LV BIT(2)
254#define AS3722_INTERRUPT_MASK2_PWM1_OV_PROT BIT(3)
255#define AS3722_INTERRUPT_MASK2_PWM2_OV_PROT BIT(4)
256#define AS3722_INTERRUPT_MASK2_ENABLE2 BIT(5)
257#define AS3722_INTERRUPT_MASK2_SD6_LV BIT(6)
258#define AS3722_INTERRUPT_MASK2_RTC_REP BIT(7)
259
260#define AS3722_INTERRUPT_MASK3_RTC_ALARM BIT(0)
261#define AS3722_INTERRUPT_MASK3_GPIO1 BIT(1)
262#define AS3722_INTERRUPT_MASK3_GPIO2 BIT(2)
263#define AS3722_INTERRUPT_MASK3_GPIO3 BIT(3)
264#define AS3722_INTERRUPT_MASK3_GPIO4 BIT(4)
265#define AS3722_INTERRUPT_MASK3_GPIO5 BIT(5)
266#define AS3722_INTERRUPT_MASK3_WATCHDOG BIT(6)
267#define AS3722_INTERRUPT_MASK3_ENABLE3 BIT(7)
268
269#define AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN BIT(0)
270#define AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN BIT(1)
271#define AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN BIT(2)
272#define AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM BIT(3)
273#define AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM BIT(4)
274#define AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM BIT(5)
275#define AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6 BIT(6)
276#define AS3722_INTERRUPT_MASK4_ADC BIT(7)
277
278#define AS3722_ADC1_INTERVAL_TIME BIT(0)
279#define AS3722_ADC1_INT_MODE_ON BIT(1)
280#define AS3722_ADC_BUF_ON BIT(2)
281#define AS3722_ADC1_LOW_VOLTAGE_RANGE BIT(5)
282#define AS3722_ADC1_INTEVAL_SCAN BIT(6)
283#define AS3722_ADC1_INT_MASK BIT(7)
284
285#define AS3722_ADC_MSB_VAL_MASK 0x7F
286#define AS3722_ADC_LSB_VAL_MASK 0x07
287
288#define AS3722_ADC0_CONV_START BIT(7)
289#define AS3722_ADC0_CONV_NOTREADY BIT(7)
290#define AS3722_ADC0_SOURCE_SELECT_MASK 0x1F
291
292#define AS3722_ADC1_CONV_START BIT(7)
293#define AS3722_ADC1_CONV_NOTREADY BIT(7)
294#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F
295
296/* GPIO modes */
297#define AS3722_GPIO_MODE_MASK 0x07
298#define AS3722_GPIO_MODE_INPUT 0x00
299#define AS3722_GPIO_MODE_OUTPUT_VDDH 0x01
300#define AS3722_GPIO_MODE_IO_OPEN_DRAIN 0x02
301#define AS3722_GPIO_MODE_ADC_IN 0x03
302#define AS3722_GPIO_MODE_INPUT_PULL_UP 0x04
303#define AS3722_GPIO_MODE_INPUT_PULL_DOWN 0x05
304#define AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP 0x06
305#define AS3722_GPIO_MODE_OUTPUT_VDDL 0x07
306#define AS3722_GPIO_MODE_VAL(n) ((n) & AS3722_GPIO_MODE_MASK)
307
308#define AS3722_GPIO_INV BIT(7)
309#define AS3722_GPIO_IOSF_MASK 0x78
310#define AS3722_GPIO_IOSF_VAL(n) (((n) & 0xF) << 3)
311#define AS3722_GPIO_IOSF_NORMAL AS3722_GPIO_IOSF_VAL(0)
312#define AS3722_GPIO_IOSF_INTERRUPT_OUT AS3722_GPIO_IOSF_VAL(1)
313#define AS3722_GPIO_IOSF_VSUP_LOW_OUT AS3722_GPIO_IOSF_VAL(2)
314#define AS3722_GPIO_IOSF_GPIO_INTERRUPT_IN AS3722_GPIO_IOSF_VAL(3)
315#define AS3722_GPIO_IOSF_ISINK_PWM_IN AS3722_GPIO_IOSF_VAL(4)
316#define AS3722_GPIO_IOSF_VOLTAGE_STBY AS3722_GPIO_IOSF_VAL(5)
317#define AS3722_GPIO_IOSF_PWR_GOOD_OUT AS3722_GPIO_IOSF_VAL(7)
318#define AS3722_GPIO_IOSF_Q32K_OUT AS3722_GPIO_IOSF_VAL(8)
319#define AS3722_GPIO_IOSF_WATCHDOG_IN AS3722_GPIO_IOSF_VAL(9)
320#define AS3722_GPIO_IOSF_SOFT_RESET_IN AS3722_GPIO_IOSF_VAL(11)
321#define AS3722_GPIO_IOSF_PWM_OUT AS3722_GPIO_IOSF_VAL(12)
322#define AS3722_GPIO_IOSF_VSUP_LOW_DEB_OUT AS3722_GPIO_IOSF_VAL(13)
323#define AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW AS3722_GPIO_IOSF_VAL(14)
324
325#define AS3722_GPIOn_SIGNAL(n) BIT(n)
326#define AS3722_GPIOn_CONTROL_REG(n) (AS3722_GPIO0_CONTROL_REG + n)
327#define AS3722_I2C_PULL_UP BIT(4)
328#define AS3722_INT_PULL_UP BIT(5)
329
330#define AS3722_RTC_REP_WAKEUP_EN BIT(0)
331#define AS3722_RTC_ALARM_WAKEUP_EN BIT(1)
332#define AS3722_RTC_ON BIT(2)
333#define AS3722_RTC_IRQMODE BIT(3)
334#define AS3722_RTC_CLK32K_OUT_EN BIT(5)
335
336#define AS3722_WATCHDOG_TIMER_MAX 0x7F
337#define AS3722_WATCHDOG_ON BIT(0)
338#define AS3722_WATCHDOG_SW_SIG BIT(0)
339
340#define AS3722_EXT_CONTROL_ENABLE1 0x1
341#define AS3722_EXT_CONTROL_ENABLE2 0x2
342#define AS3722_EXT_CONTROL_ENABLE3 0x3
343
344/* Interrupt IDs */
345enum as3722_irq {
346 AS3722_IRQ_LID,
347 AS3722_IRQ_ACOK,
348 AS3722_IRQ_ENABLE1,
349 AS3722_IRQ_OCCUR_ALARM_SD0,
350 AS3722_IRQ_ONKEY_LONG_PRESS,
351 AS3722_IRQ_ONKEY,
352 AS3722_IRQ_OVTMP,
353 AS3722_IRQ_LOWBAT,
354 AS3722_IRQ_SD0_LV,
355 AS3722_IRQ_SD1_LV,
356 AS3722_IRQ_SD2_LV,
357 AS3722_IRQ_PWM1_OV_PROT,
358 AS3722_IRQ_PWM2_OV_PROT,
359 AS3722_IRQ_ENABLE2,
360 AS3722_IRQ_SD6_LV,
361 AS3722_IRQ_RTC_REP,
362 AS3722_IRQ_RTC_ALARM,
363 AS3722_IRQ_GPIO1,
364 AS3722_IRQ_GPIO2,
365 AS3722_IRQ_GPIO3,
366 AS3722_IRQ_GPIO4,
367 AS3722_IRQ_GPIO5,
368 AS3722_IRQ_WATCHDOG,
369 AS3722_IRQ_ENABLE3,
370 AS3722_IRQ_TEMP_SD0_SHUTDOWN,
371 AS3722_IRQ_TEMP_SD1_SHUTDOWN,
372 AS3722_IRQ_TEMP_SD2_SHUTDOWN,
373 AS3722_IRQ_TEMP_SD0_ALARM,
374 AS3722_IRQ_TEMP_SD1_ALARM,
375 AS3722_IRQ_TEMP_SD6_ALARM,
376 AS3722_IRQ_OCCUR_ALARM_SD6,
377 AS3722_IRQ_ADC,
378 AS3722_IRQ_MAX,
379};
380
381struct as3722 {
382 struct device *dev;
383 struct regmap *regmap;
384 int chip_irq;
385 unsigned long irq_flags;
386 bool en_intern_int_pullup;
387 bool en_intern_i2c_pullup;
388 struct regmap_irq_chip_data *irq_data;
389};
390
391static inline int as3722_read(struct as3722 *as3722, u32 reg, u32 *dest)
392{
393 return regmap_read(as3722->regmap, reg, dest);
394}
395
396static inline int as3722_write(struct as3722 *as3722, u32 reg, u32 value)
397{
398 return regmap_write(as3722->regmap, reg, value);
399}
400
401static inline int as3722_block_read(struct as3722 *as3722, u32 reg,
402 int count, u8 *buf)
403{
404 return regmap_bulk_read(as3722->regmap, reg, buf, count);
405}
406
407static inline int as3722_block_write(struct as3722 *as3722, u32 reg,
408 int count, u8 *data)
409{
410 return regmap_bulk_write(as3722->regmap, reg, data, count);
411}
412
413static inline int as3722_update_bits(struct as3722 *as3722, u32 reg,
414 u32 mask, u8 val)
415{
416 return regmap_update_bits(as3722->regmap, reg, mask, val);
417}
418
419static inline int as3722_irq_get_virq(struct as3722 *as3722, int irq)
420{
421 return regmap_irq_get_virq(as3722->irq_data, irq);
422}
423#endif /* __LINUX_MFD_AS3722_H__ */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index cebe97ee98b8..bdba8c61207b 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -59,6 +59,12 @@ struct mfd_cell {
59 * pm_runtime_no_callbacks(). 59 * pm_runtime_no_callbacks().
60 */ 60 */
61 bool pm_runtime_no_callbacks; 61 bool pm_runtime_no_callbacks;
62
63 /* A list of regulator supplies that should be mapped to the MFD
64 * device rather than the child device when requested
65 */
66 const char **parent_supplies;
67 int num_parent_supplies;
62}; 68};
63 69
64/* 70/*
@@ -98,7 +104,7 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
98} 104}
99 105
100extern int mfd_add_devices(struct device *parent, int id, 106extern int mfd_add_devices(struct device *parent, int id,
101 struct mfd_cell *cells, int n_devs, 107 const struct mfd_cell *cells, int n_devs,
102 struct resource *mem_base, 108 struct resource *mem_base,
103 int irq_base, struct irq_domain *irq_domain); 109 int irq_base, struct irq_domain *irq_domain);
104 110
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 786d02eb79d2..21e21b81cc75 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -148,10 +148,15 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
148 unsigned reg_cnt, unsigned char *val) 148 unsigned reg_cnt, unsigned char *val)
149{ 149{
150 int ret; 150 int ret;
151 unsigned int tmp;
152 int i;
151 153
152 ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); 154 for (i = 0; i < reg_cnt; i++) {
153 if (ret < 0) 155 ret = regmap_read(da9052->regmap, reg + i, &tmp);
154 return ret; 156 val[i] = (unsigned char)tmp;
157 if (ret < 0)
158 return ret;
159 }
155 160
156 if (da9052->fix_io) { 161 if (da9052->fix_io) {
157 ret = da9052->fix_io(da9052, reg); 162 ret = da9052->fix_io(da9052, reg);
@@ -166,10 +171,13 @@ static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
166 unsigned reg_cnt, unsigned char *val) 171 unsigned reg_cnt, unsigned char *val)
167{ 172{
168 int ret; 173 int ret;
174 int i;
169 175
170 ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt); 176 for (i = 0; i < reg_cnt; i++) {
171 if (ret < 0) 177 ret = regmap_write(da9052->regmap, reg + i, val[i]);
172 return ret; 178 if (ret < 0)
179 return ret;
180 }
173 181
174 if (da9052->fix_io) { 182 if (da9052->fix_io) {
175 ret = da9052->fix_io(da9052, reg); 183 ret = da9052->fix_io(da9052, reg);
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index ca0790fba2f5..060e11256fbc 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -12,6 +12,8 @@
12#include <linux/notifier.h> 12#include <linux/notifier.h>
13#include <linux/err.h> 13#include <linux/err.h>
14 14
15#include <dt-bindings/mfd/dbx500-prcmu.h> /* For clock identifiers */
16
15/* Offset for the firmware version within the TCPM */ 17/* Offset for the firmware version within the TCPM */
16#define DB8500_PRCMU_FW_VERSION_OFFSET 0xA4 18#define DB8500_PRCMU_FW_VERSION_OFFSET 0xA4
17#define DBX540_PRCMU_FW_VERSION_OFFSET 0xA8 19#define DBX540_PRCMU_FW_VERSION_OFFSET 0xA8
@@ -94,74 +96,6 @@ enum prcmu_wakeup_index {
94#define PRCMU_CLKSRC_ARMCLKFIX 0x46 96#define PRCMU_CLKSRC_ARMCLKFIX 0x46
95#define PRCMU_CLKSRC_HDMICLK 0x47 97#define PRCMU_CLKSRC_HDMICLK 0x47
96 98
97/*
98 * Clock identifiers.
99 */
100enum prcmu_clock {
101 PRCMU_SGACLK,
102 PRCMU_UARTCLK,
103 PRCMU_MSP02CLK,
104 PRCMU_MSP1CLK,
105 PRCMU_I2CCLK,
106 PRCMU_SDMMCCLK,
107 PRCMU_SPARE1CLK,
108 PRCMU_SLIMCLK,
109 PRCMU_PER1CLK,
110 PRCMU_PER2CLK,
111 PRCMU_PER3CLK,
112 PRCMU_PER5CLK,
113 PRCMU_PER6CLK,
114 PRCMU_PER7CLK,
115 PRCMU_LCDCLK,
116 PRCMU_BMLCLK,
117 PRCMU_HSITXCLK,
118 PRCMU_HSIRXCLK,
119 PRCMU_HDMICLK,
120 PRCMU_APEATCLK,
121 PRCMU_APETRACECLK,
122 PRCMU_MCDECLK,
123 PRCMU_IPI2CCLK,
124 PRCMU_DSIALTCLK,
125 PRCMU_DMACLK,
126 PRCMU_B2R2CLK,
127 PRCMU_TVCLK,
128 PRCMU_SSPCLK,
129 PRCMU_RNGCLK,
130 PRCMU_UICCCLK,
131 PRCMU_PWMCLK,
132 PRCMU_IRDACLK,
133 PRCMU_IRRCCLK,
134 PRCMU_SIACLK,
135 PRCMU_SVACLK,
136 PRCMU_ACLK,
137 PRCMU_HVACLK, /* Ux540 only */
138 PRCMU_G1CLK, /* Ux540 only */
139 PRCMU_SDMMCHCLK,
140 PRCMU_CAMCLK,
141 PRCMU_BML8580CLK,
142 PRCMU_NUM_REG_CLOCKS,
143 PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS,
144 PRCMU_CDCLK,
145 PRCMU_TIMCLK,
146 PRCMU_PLLSOC0,
147 PRCMU_PLLSOC1,
148 PRCMU_ARMSS,
149 PRCMU_PLLDDR,
150 PRCMU_PLLDSI,
151 PRCMU_DSI0CLK,
152 PRCMU_DSI1CLK,
153 PRCMU_DSI0ESCCLK,
154 PRCMU_DSI1ESCCLK,
155 PRCMU_DSI2ESCCLK,
156 /* LCD DSI PLL - Ux540 only */
157 PRCMU_PLLDSI_LCD,
158 PRCMU_DSI0CLK_LCD,
159 PRCMU_DSI1CLK_LCD,
160 PRCMU_DSI0ESCCLK_LCD,
161 PRCMU_DSI1ESCCLK_LCD,
162 PRCMU_DSI2ESCCLK_LCD,
163};
164
165/** 99/**
166 * enum prcmu_wdog_id - PRCMU watchdog IDs 100 * enum prcmu_wdog_id - PRCMU watchdog IDs
167 * @PRCMU_WDOG_ALL: use all timers 101 * @PRCMU_WDOG_ALL: use all timers
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 244fb0d51589..3e050b933dd0 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -323,7 +323,6 @@ struct max77693_dev {
323 323
324 int irq; 324 int irq;
325 int irq_gpio; 325 int irq_gpio;
326 bool wakeup;
327 struct mutex irqlock; 326 struct mutex irqlock;
328 int irq_masks_cur[MAX77693_IRQ_GROUP_NR]; 327 int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
329 int irq_masks_cache[MAX77693_IRQ_GROUP_NR]; 328 int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 676f0f388992..3f3dc45f93ee 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -64,8 +64,6 @@ struct max77693_muic_platform_data {
64}; 64};
65 65
66struct max77693_platform_data { 66struct max77693_platform_data {
67 int wakeup;
68
69 /* regulator data */ 67 /* regulator data */
70 struct max77693_regulator_data *regulators; 68 struct max77693_regulator_data *regulators;
71 int num_regulators; 69 int num_regulators;
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 41ed59276c00..67c17b5a6f44 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -41,6 +41,13 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
41 unsigned int mode, unsigned int channel, 41 unsigned int mode, unsigned int channel,
42 u8 ato, bool atox, unsigned int *sample); 42 u8 ato, bool atox, unsigned int *sample);
43 43
44#define MC13783_AUDIO_RX0 36
45#define MC13783_AUDIO_RX1 37
46#define MC13783_AUDIO_TX 38
47#define MC13783_SSI_NETWORK 39
48#define MC13783_AUDIO_CODEC 40
49#define MC13783_AUDIO_DAC 41
50
44#define MC13XXX_IRQ_ADCDONE 0 51#define MC13XXX_IRQ_ADCDONE 0
45#define MC13XXX_IRQ_ADCBISDONE 1 52#define MC13XXX_IRQ_ADCBISDONE 1
46#define MC13XXX_IRQ_TS 2 53#define MC13XXX_IRQ_TS 2
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index d1382dfbeff0..0ce772105508 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -756,6 +756,59 @@
756#define PCR_SETTING_REG2 0x814 756#define PCR_SETTING_REG2 0x814
757#define PCR_SETTING_REG3 0x747 757#define PCR_SETTING_REG3 0x747
758 758
759/* Phy bits */
760#define PHY_PCR_FORCE_CODE 0xB000
761#define PHY_PCR_OOBS_CALI_50 0x0800
762#define PHY_PCR_OOBS_VCM_08 0x0200
763#define PHY_PCR_OOBS_SEN_90 0x0040
764#define PHY_PCR_RSSI_EN 0x0002
765
766#define PHY_RCR1_ADP_TIME 0x0100
767#define PHY_RCR1_VCO_COARSE 0x001F
768
769#define PHY_RCR2_EMPHASE_EN 0x8000
770#define PHY_RCR2_NADJR 0x4000
771#define PHY_RCR2_CDR_CP_10 0x0400
772#define PHY_RCR2_CDR_SR_2 0x0100
773#define PHY_RCR2_FREQSEL_12 0x0040
774#define PHY_RCR2_CPADJEN 0x0020
775#define PHY_RCR2_CDR_SC_8 0x0008
776#define PHY_RCR2_CALIB_LATE 0x0002
777
778#define PHY_RDR_RXDSEL_1_9 0x4000
779
780#define PHY_TUNE_TUNEREF_1_0 0x4000
781#define PHY_TUNE_VBGSEL_1252 0x0C00
782#define PHY_TUNE_SDBUS_33 0x0200
783#define PHY_TUNE_TUNED18 0x01C0
784#define PHY_TUNE_TUNED12 0X0020
785
786#define PHY_BPCR_IBRXSEL 0x0400
787#define PHY_BPCR_IBTXSEL 0x0100
788#define PHY_BPCR_IB_FILTER 0x0080
789#define PHY_BPCR_CMIRROR_EN 0x0040
790
791#define PHY_REG_REV_RESV 0xE000
792#define PHY_REG_REV_RXIDLE_LATCHED 0x1000
793#define PHY_REG_REV_P1_EN 0x0800
794#define PHY_REG_REV_RXIDLE_EN 0x0400
795#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040
796#define PHY_REG_REV_STOP_CLKRD 0x0020
797#define PHY_REG_REV_RX_PWST 0x0008
798#define PHY_REG_REV_STOP_CLKWR 0x0004
799
800#define PHY_FLD3_TIMER_4 0x7800
801#define PHY_FLD3_TIMER_6 0x00E0
802#define PHY_FLD3_RXDELINK 0x0004
803
804#define PHY_FLD4_FLDEN_SEL 0x4000
805#define PHY_FLD4_REQ_REF 0x2000
806#define PHY_FLD4_RXAMP_OFF 0x1000
807#define PHY_FLD4_REQ_ADDA 0x0800
808#define PHY_FLD4_BER_COUNT 0x00E0
809#define PHY_FLD4_BER_TIMER 0x000A
810#define PHY_FLD4_BER_CHK_EN 0x0001
811
759#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0) 812#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
760 813
761struct rtsx_pcr; 814struct rtsx_pcr;
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 378ae8a04c6a..cab2dd279076 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,7 +39,8 @@ enum sec_device_type {
39struct sec_pmic_dev { 39struct sec_pmic_dev {
40 struct device *dev; 40 struct device *dev;
41 struct sec_platform_data *pdata; 41 struct sec_platform_data *pdata;
42 struct regmap *regmap; 42 struct regmap *regmap_pmic;
43 struct regmap *regmap_rtc;
43 struct i2c_client *i2c; 44 struct i2c_client *i2c;
44 struct i2c_client *rtc; 45 struct i2c_client *rtc;
45 46
@@ -51,6 +52,7 @@ struct sec_pmic_dev {
51 int ono; 52 int ono;
52 int type; 53 int type;
53 bool wakeup; 54 bool wakeup;
55 bool wtsr_smpl;
54}; 56};
55 57
56int sec_irq_init(struct sec_pmic_dev *sec_pmic); 58int sec_irq_init(struct sec_pmic_dev *sec_pmic);
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 71597e20cddb..94b7cd6d8891 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -62,6 +62,11 @@ enum sec_rtc_reg {
62/* RTC Update Register1 */ 62/* RTC Update Register1 */
63#define RTC_UDR_SHIFT 0 63#define RTC_UDR_SHIFT 0
64#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT) 64#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
65#define RTC_TCON_SHIFT 1
66#define RTC_TCON_MASK (1 << RTC_TCON_SHIFT)
67#define RTC_TIME_EN_SHIFT 3
68#define RTC_TIME_EN_MASK (1 << RTC_TIME_EN_SHIFT)
69
65/* RTC Hour register */ 70/* RTC Hour register */
66#define HOUR_PM_SHIFT 6 71#define HOUR_PM_SHIFT 6
67#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT) 72#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
@@ -69,6 +74,12 @@ enum sec_rtc_reg {
69#define ALARM_ENABLE_SHIFT 7 74#define ALARM_ENABLE_SHIFT 7
70#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT) 75#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
71 76
77#define SMPL_ENABLE_SHIFT 7
78#define SMPL_ENABLE_MASK (1 << SMPL_ENABLE_SHIFT)
79
80#define WTSR_ENABLE_SHIFT 6
81#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
82
72enum { 83enum {
73 RTC_SEC = 0, 84 RTC_SEC = 0,
74 RTC_MIN, 85 RTC_MIN,
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index ba89b94e4a56..674b45d5a757 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -316,7 +316,7 @@ enum si476x_smoothmetrics {
316 * response to 'FM_RD_STATUS' command 316 * response to 'FM_RD_STATUS' command
317 * @rdstpptyint: Traffic program flag(TP) and/or program type(PTY) 317 * @rdstpptyint: Traffic program flag(TP) and/or program type(PTY)
318 * code has changed. 318 * code has changed.
319 * @rdspiint: Program indentifiaction(PI) code has changed. 319 * @rdspiint: Program identification(PI) code has changed.
320 * @rdssyncint: RDS synchronization has changed. 320 * @rdssyncint: RDS synchronization has changed.
321 * @rdsfifoint: RDS was received and the RDS FIFO has at least 321 * @rdsfifoint: RDS was received and the RDS FIFO has at least
322 * 'FM_RDS_INTERRUPT_FIFO_COUNT' elements in it. 322 * 'FM_RDS_INTERRUPT_FIFO_COUNT' elements in it.
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
new file mode 100644
index 000000000000..eda121556e5d
--- /dev/null
+++ b/include/linux/mfd/stw481x.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2011 ST-Ericsson SA
3 * Written on behalf of Linaro for ST-Ericsson
4 *
5 * Author: Linus Walleij <linus.walleij@linaro.org>
6 *
7 * License terms: GNU General Public License (GPL) version 2
8 */
9#ifndef MFD_STW481X_H
10#define MFD_STW481X_H
11
12#include <linux/i2c.h>
13#include <linux/regulator/machine.h>
14#include <linux/regmap.h>
15#include <linux/bitops.h>
16
17/* These registers are accessed from more than one driver */
18#define STW_CONF1 0x11U
19#define STW_CONF1_PDN_VMMC 0x01U
20#define STW_CONF1_VMMC_MASK 0x0eU
21#define STW_CONF1_VMMC_1_8V 0x02U
22#define STW_CONF1_VMMC_2_85V 0x04U
23#define STW_CONF1_VMMC_3V 0x06U
24#define STW_CONF1_VMMC_1_85V 0x08U
25#define STW_CONF1_VMMC_2_6V 0x0aU
26#define STW_CONF1_VMMC_2_7V 0x0cU
27#define STW_CONF1_VMMC_3_3V 0x0eU
28#define STW_CONF1_MMC_LS_STATUS 0x10U
29#define STW_PCTL_REG_LO 0x1eU
30#define STW_PCTL_REG_HI 0x1fU
31#define STW_CONF1_V_MONITORING 0x20U
32#define STW_CONF1_IT_WARN 0x40U
33#define STW_CONF1_PDN_VAUX 0x80U
34#define STW_CONF2 0x20U
35#define STW_CONF2_MASK_TWARN 0x01U
36#define STW_CONF2_VMMC_EXT 0x02U
37#define STW_CONF2_MASK_IT_WAKE_UP 0x04U
38#define STW_CONF2_GPO1 0x08U
39#define STW_CONF2_GPO2 0x10U
40#define STW_VCORE_SLEEP 0x21U
41
42/**
43 * struct stw481x - state holder for the Stw481x drivers
44 * @mutex: mutex to serialize I2C accesses
45 * @i2c_client: corresponding I2C client
46 * @regulator: regulator device for regulator children
47 * @map: regmap handle to access device registers
48 */
49struct stw481x {
50 struct mutex lock;
51 struct i2c_client *client;
52 struct regulator_dev *vmmc_regulator;
53 struct regmap *map;
54};
55
56#endif
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index b473577f36db..8789fa3c7fd9 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -17,10 +17,35 @@
17 17
18struct device_node; 18struct device_node;
19 19
20#ifdef CONFIG_MFD_SYSCON
20extern struct regmap *syscon_node_to_regmap(struct device_node *np); 21extern struct regmap *syscon_node_to_regmap(struct device_node *np);
21extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); 22extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
22extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s); 23extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s);
23extern struct regmap *syscon_regmap_lookup_by_phandle( 24extern struct regmap *syscon_regmap_lookup_by_phandle(
24 struct device_node *np, 25 struct device_node *np,
25 const char *property); 26 const char *property);
27#else
28static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
29{
30 return ERR_PTR(-ENOSYS);
31}
32
33static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
34{
35 return ERR_PTR(-ENOSYS);
36}
37
38static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
39{
40 return ERR_PTR(-ENOSYS);
41}
42
43static inline struct regmap *syscon_regmap_lookup_by_phandle(
44 struct device_node *np,
45 const char *property)
46{
47 return ERR_PTR(-ENOSYS);
48}
49#endif
50
26#endif /* __LINUX_MFD_SYSCON_H__ */ 51#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index b6bdcd66c07d..b6d36b38b99c 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -241,6 +241,12 @@
241 241
242#define IMX6Q_GPR5_L2_CLK_STOP BIT(8) 242#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
243 243
244#define IMX6Q_GPR8_TX_SWING_LOW (0x7f << 25)
245#define IMX6Q_GPR8_TX_SWING_FULL (0x7f << 18)
246#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB (0x3f << 12)
247#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB (0x3f << 6)
248#define IMX6Q_GPR8_TX_DEEMPH_GEN1 (0x3f << 0)
249
244#define IMX6Q_GPR9_TZASC2_BYP BIT(1) 250#define IMX6Q_GPR9_TZASC2_BYP BIT(1)
245#define IMX6Q_GPR9_TZASC1_BYP BIT(0) 251#define IMX6Q_GPR9_TZASC1_BYP BIT(0)
246 252
@@ -273,7 +279,9 @@
273#define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26) 279#define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26)
274#define IMX6Q_GPR12_ARMP_ATB_CLK_EN BIT(25) 280#define IMX6Q_GPR12_ARMP_ATB_CLK_EN BIT(25)
275#define IMX6Q_GPR12_ARMP_APB_CLK_EN BIT(24) 281#define IMX6Q_GPR12_ARMP_APB_CLK_EN BIT(24)
282#define IMX6Q_GPR12_DEVICE_TYPE (0xf << 12)
276#define IMX6Q_GPR12_PCIE_CTL_2 BIT(10) 283#define IMX6Q_GPR12_PCIE_CTL_2 BIT(10)
284#define IMX6Q_GPR12_LOS_LEVEL (0x1f << 4)
277 285
278#define IMX6Q_GPR13_SDMA_STOP_REQ BIT(30) 286#define IMX6Q_GPR13_SDMA_STOP_REQ BIT(30)
279#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29) 287#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
@@ -363,4 +371,9 @@
363#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2) 371#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2)
364#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1) 372#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
365#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0) 373#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
374
375/* For imx6sl iomux gpr register field define */
376#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
377#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
378
366#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ 379#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 25f2c611ab01..d498d98f0c2c 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -46,16 +46,24 @@
46/* Step Enable */ 46/* Step Enable */
47#define STEPENB_MASK (0x1FFFF << 0) 47#define STEPENB_MASK (0x1FFFF << 0)
48#define STEPENB(val) ((val) << 0) 48#define STEPENB(val) ((val) << 0)
49#define ENB(val) (1 << (val))
50#define STPENB_STEPENB STEPENB(0x1FFFF)
51#define STPENB_STEPENB_TC STEPENB(0x1FFF)
49 52
50/* IRQ enable */ 53/* IRQ enable */
51#define IRQENB_HW_PEN BIT(0) 54#define IRQENB_HW_PEN BIT(0)
52#define IRQENB_FIFO0THRES BIT(2) 55#define IRQENB_FIFO0THRES BIT(2)
56#define IRQENB_FIFO0OVRRUN BIT(3)
57#define IRQENB_FIFO0UNDRFLW BIT(4)
53#define IRQENB_FIFO1THRES BIT(5) 58#define IRQENB_FIFO1THRES BIT(5)
59#define IRQENB_FIFO1OVRRUN BIT(6)
60#define IRQENB_FIFO1UNDRFLW BIT(7)
54#define IRQENB_PENUP BIT(9) 61#define IRQENB_PENUP BIT(9)
55 62
56/* Step Configuration */ 63/* Step Configuration */
57#define STEPCONFIG_MODE_MASK (3 << 0) 64#define STEPCONFIG_MODE_MASK (3 << 0)
58#define STEPCONFIG_MODE(val) ((val) << 0) 65#define STEPCONFIG_MODE(val) ((val) << 0)
66#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
59#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2) 67#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
60#define STEPCONFIG_AVG_MASK (7 << 2) 68#define STEPCONFIG_AVG_MASK (7 << 2)
61#define STEPCONFIG_AVG(val) ((val) << 2) 69#define STEPCONFIG_AVG(val) ((val) << 2)
@@ -123,15 +131,21 @@
123#define ADC_CLK 3000000 131#define ADC_CLK 3000000
124#define TOTAL_STEPS 16 132#define TOTAL_STEPS 16
125#define TOTAL_CHANNELS 8 133#define TOTAL_CHANNELS 8
134#define FIFO1_THRESHOLD 19
126 135
127/* 136/*
128* ADC runs at 3MHz, and it takes 137 * time in us for processing a single channel, calculated as follows:
129* 15 cycles to latch one data output. 138 *
130* Hence the idle time for ADC to 139 * num cycles = open delay + (sample delay + conv time) * averaging
131* process one sample data would be 140 *
132* around 5 micro seconds. 141 * num cycles: 152 + (1 + 13) * 16 = 376
133*/ 142 *
134#define IDLE_TIMEOUT 5 /* microsec */ 143 * clock frequency: 26MHz / 8 = 3.25MHz
144 * clock period: 1 / 3.25MHz = 308ns
145 *
146 * processing time: 376 * 308ns = 116us
147 */
148#define IDLE_TIMEOUT 116 /* microsec */
135 149
136#define TSCADC_CELLS 2 150#define TSCADC_CELLS 2
137 151
@@ -146,6 +160,7 @@ struct ti_tscadc_dev {
146 struct mfd_cell cells[TSCADC_CELLS]; 160 struct mfd_cell cells[TSCADC_CELLS];
147 u32 reg_se_cache; 161 u32 reg_se_cache;
148 spinlock_t reg_lock; 162 spinlock_t reg_lock;
163 unsigned int clk_div;
149 164
150 /* tsc device */ 165 /* tsc device */
151 struct titsc *tsc; 166 struct titsc *tsc;
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 40854ac0ba3d..eefafa62d304 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -56,8 +56,6 @@ struct irq_domain;
56#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN) 56#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
57 57
58struct wm8994 { 58struct wm8994 {
59 struct mutex irq_lock;
60
61 struct wm8994_pdata pdata; 59 struct wm8994_pdata pdata;
62 60
63 enum wm8994_type type; 61 enum wm8994_type type;
@@ -85,16 +83,43 @@ struct wm8994 {
85}; 83};
86 84
87/* Device I/O API */ 85/* Device I/O API */
88int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg);
89int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
90 unsigned short val);
91int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
92 unsigned short mask, unsigned short val);
93int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
94 int count, u16 *buf);
95int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
96 int count, const u16 *buf);
97 86
87static inline int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
88{
89 unsigned int val;
90 int ret;
91
92 ret = regmap_read(wm8994->regmap, reg, &val);
93
94 if (ret < 0)
95 return ret;
96 else
97 return val;
98}
99
100static inline int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
101 unsigned short val)
102{
103 return regmap_write(wm8994->regmap, reg, val);
104}
105
106static inline int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
107 int count, u16 *buf)
108{
109 return regmap_bulk_read(wm8994->regmap, reg, buf, count);
110}
111
112static inline int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
113 int count, const u16 *buf)
114{
115 return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
116}
117
118static inline int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
119 unsigned short mask, unsigned short val)
120{
121 return regmap_update_bits(wm8994->regmap, reg, mask, val);
122}
98 123
99/* Helper to save on boilerplate */ 124/* Helper to save on boilerplate */
100static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq, 125static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq,
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index ad05ce60c1c9..2e5b194b9b19 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -22,6 +22,8 @@
22#define PHY_ID_KSZ8021 0x00221555 22#define PHY_ID_KSZ8021 0x00221555
23#define PHY_ID_KSZ8031 0x00221556 23#define PHY_ID_KSZ8031 0x00221556
24#define PHY_ID_KSZ8041 0x00221510 24#define PHY_ID_KSZ8041 0x00221510
25/* undocumented */
26#define PHY_ID_KSZ8041RNLI 0x00221537
25#define PHY_ID_KSZ8051 0x00221550 27#define PHY_ID_KSZ8051 0x00221550
26/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ 28/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
27#define PHY_ID_KSZ8001 0x0022161A 29#define PHY_ID_KSZ8001 0x0022161A
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 8d3c57fdf221..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page); 55 struct page *newpage, struct page *page);
56extern int migrate_page_move_mapping(struct address_space *mapping, 56extern int migrate_page_move_mapping(struct address_space *mapping,
57 struct page *newpage, struct page *page, 57 struct page *newpage, struct page *page,
58 struct buffer_head *head, enum migrate_mode mode); 58 struct buffer_head *head, enum migrate_mode mode,
59 int extra_count);
59#else 60#else
60 61
61static inline void putback_lru_pages(struct list_head *l) {} 62static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,11 +91,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
90#endif /* CONFIG_MIGRATION */ 91#endif /* CONFIG_MIGRATION */
91 92
92#ifdef CONFIG_NUMA_BALANCING 93#ifdef CONFIG_NUMA_BALANCING
93extern int migrate_misplaced_page(struct page *page, int node); 94extern bool pmd_trans_migrating(pmd_t pmd);
94extern int migrate_misplaced_page(struct page *page, int node); 95extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
96extern int migrate_misplaced_page(struct page *page,
97 struct vm_area_struct *vma, int node);
95extern bool migrate_ratelimited(int node); 98extern bool migrate_ratelimited(int node);
96#else 99#else
97static inline int migrate_misplaced_page(struct page *page, int node) 100static inline bool pmd_trans_migrating(pmd_t pmd)
101{
102 return false;
103}
104static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
105{
106}
107static inline int migrate_misplaced_page(struct page *page,
108 struct vm_area_struct *vma, int node)
98{ 109{
99 return -EAGAIN; /* can't migrate now */ 110 return -EAGAIN; /* can't migrate now */
100} 111}
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index cb358355ef43..f7eaf2d60083 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -31,6 +31,7 @@
31#define I2O_MINOR 166 31#define I2O_MINOR 166
32#define MICROCODE_MINOR 184 32#define MICROCODE_MINOR 184
33#define TUN_MINOR 200 33#define TUN_MINOR 200
34#define CUSE_MINOR 203
34#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ 35#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
35#define MPT_MINOR 220 36#define MPT_MINOR 220
36#define MPT2SAS_MINOR 221 37#define MPT2SAS_MINOR 221
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index cd1fdf75103b..8df61bc5da00 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -154,10 +154,6 @@ enum {
154 MLX4_CMD_QUERY_IF_STAT = 0X54, 154 MLX4_CMD_QUERY_IF_STAT = 0X54,
155 MLX4_CMD_SET_IF_STAT = 0X55, 155 MLX4_CMD_SET_IF_STAT = 0X55,
156 156
157 /* set port opcode modifiers */
158 MLX4_SET_PORT_PRIO2TC = 0x8,
159 MLX4_SET_PORT_SCHEDULER = 0x9,
160
161 /* register/delete flow steering network rules */ 157 /* register/delete flow steering network rules */
162 MLX4_QP_FLOW_STEERING_ATTACH = 0x65, 158 MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
163 MLX4_QP_FLOW_STEERING_DETACH = 0x66, 159 MLX4_QP_FLOW_STEERING_DETACH = 0x66,
@@ -182,6 +178,8 @@ enum {
182 MLX4_SET_PORT_VLAN_TABLE = 0x3, 178 MLX4_SET_PORT_VLAN_TABLE = 0x3,
183 MLX4_SET_PORT_PRIO_MAP = 0x4, 179 MLX4_SET_PORT_PRIO_MAP = 0x4,
184 MLX4_SET_PORT_GID_TABLE = 0x5, 180 MLX4_SET_PORT_GID_TABLE = 0x5,
181 MLX4_SET_PORT_PRIO2TC = 0x8,
182 MLX4_SET_PORT_SCHEDULER = 0x9,
185}; 183};
186 184
187enum { 185enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 24ce6bdd540e..7d3a523160ba 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -54,6 +54,7 @@ enum {
54 MLX4_FLAG_MASTER = 1 << 2, 54 MLX4_FLAG_MASTER = 1 << 2,
55 MLX4_FLAG_SLAVE = 1 << 3, 55 MLX4_FLAG_SLAVE = 1 << 3,
56 MLX4_FLAG_SRIOV = 1 << 4, 56 MLX4_FLAG_SRIOV = 1 << 4,
57 MLX4_FLAG_OLD_REG_MAC = 1 << 6,
57}; 58};
58 59
59enum { 60enum {
@@ -155,7 +156,7 @@ enum {
155 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, 156 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
156 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, 157 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
157 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, 158 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
158 MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, 159 MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN = 1LL << 4,
159 MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, 160 MLX4_DEV_CAP_FLAG2_TS = 1LL << 5,
160 MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, 161 MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6,
161 MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, 162 MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7,
@@ -640,16 +641,28 @@ struct mlx4_counter {
640 __be64 tx_bytes; 641 __be64 tx_bytes;
641}; 642};
642 643
644struct mlx4_quotas {
645 int qp;
646 int cq;
647 int srq;
648 int mpt;
649 int mtt;
650 int counter;
651 int xrcd;
652};
653
643struct mlx4_dev { 654struct mlx4_dev {
644 struct pci_dev *pdev; 655 struct pci_dev *pdev;
645 unsigned long flags; 656 unsigned long flags;
646 unsigned long num_slaves; 657 unsigned long num_slaves;
647 struct mlx4_caps caps; 658 struct mlx4_caps caps;
648 struct mlx4_phys_caps phys_caps; 659 struct mlx4_phys_caps phys_caps;
660 struct mlx4_quotas quotas;
649 struct radix_tree_root qp_table_tree; 661 struct radix_tree_root qp_table_tree;
650 u8 rev_id; 662 u8 rev_id;
651 char board_id[MLX4_BOARD_ID_LEN]; 663 char board_id[MLX4_BOARD_ID_LEN];
652 int num_vfs; 664 int num_vfs;
665 int numa_node;
653 int oper_log_mgm_entry_size; 666 int oper_log_mgm_entry_size;
654 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 667 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
655 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 668 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
@@ -771,6 +784,12 @@ static inline int mlx4_is_master(struct mlx4_dev *dev)
771 return dev->flags & MLX4_FLAG_MASTER; 784 return dev->flags & MLX4_FLAG_MASTER;
772} 785}
773 786
787static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
788{
789 return dev->phys_caps.base_sqpn + 8 +
790 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
791}
792
774static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) 793static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
775{ 794{
776 return (qpn < dev->phys_caps.base_sqpn + 8 + 795 return (qpn < dev->phys_caps.base_sqpn + 8 +
@@ -816,7 +835,7 @@ void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
816 835
817int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); 836int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
818void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); 837void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
819int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); 838int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
820void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); 839void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
821 840
822int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 841int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -1078,7 +1097,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1078 u8 *pg, u16 *ratelimit); 1097 u8 *pg, u16 *ratelimit);
1079int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1098int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1080int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1099int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1081void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); 1100void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1082 1101
1083int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 1102int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
1084 int npages, u64 iova, u32 *lkey, u32 *rkey); 1103 int npages, u64 iova, u32 *lkey, u32 *rkey);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 5eb4e31af22b..da78875807fc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -230,6 +230,15 @@ enum {
230 MLX5_MAX_PAGE_SHIFT = 31 230 MLX5_MAX_PAGE_SHIFT = 31
231}; 231};
232 232
233enum {
234 MLX5_ADAPTER_PAGE_SHIFT = 12
235};
236
237enum {
238 MLX5_CAP_OFF_DCT = 41,
239 MLX5_CAP_OFF_CMDIF_CSUM = 46,
240};
241
233struct mlx5_inbox_hdr { 242struct mlx5_inbox_hdr {
234 __be16 opcode; 243 __be16 opcode;
235 u8 rsvd[4]; 244 u8 rsvd[4];
@@ -319,9 +328,9 @@ struct mlx5_hca_cap {
319 u8 rsvd25[42]; 328 u8 rsvd25[42];
320 __be16 log_uar_page_sz; 329 __be16 log_uar_page_sz;
321 u8 rsvd26[28]; 330 u8 rsvd26[28];
322 u8 log_msx_atomic_size_qp; 331 u8 log_max_atomic_size_qp;
323 u8 rsvd27[2]; 332 u8 rsvd27[2];
324 u8 log_msx_atomic_size_dc; 333 u8 log_max_atomic_size_dc;
325 u8 rsvd28[76]; 334 u8 rsvd28[76];
326}; 335};
327 336
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 6b8c496572c8..554548cd3dd4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -483,6 +483,7 @@ struct mlx5_priv {
483 struct rb_root page_root; 483 struct rb_root page_root;
484 int fw_pages; 484 int fw_pages;
485 int reg_pages; 485 int reg_pages;
486 struct list_head free_list;
486 487
487 struct mlx5_core_health health; 488 struct mlx5_core_health health;
488 489
@@ -557,9 +558,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
557struct mlx5_cmd_work_ent { 558struct mlx5_cmd_work_ent {
558 struct mlx5_cmd_msg *in; 559 struct mlx5_cmd_msg *in;
559 struct mlx5_cmd_msg *out; 560 struct mlx5_cmd_msg *out;
561 void *uout;
562 int uout_size;
560 mlx5_cmd_cbk_t callback; 563 mlx5_cmd_cbk_t callback;
561 void *context; 564 void *context;
562 int idx; 565 int idx;
563 struct completion done; 566 struct completion done;
564 struct mlx5_cmd *cmd; 567 struct mlx5_cmd *cmd;
565 struct work_struct work; 568 struct work_struct work;
@@ -570,6 +573,7 @@ struct mlx5_cmd_work_ent {
570 u8 token; 573 u8 token;
571 struct timespec ts1; 574 struct timespec ts1;
572 struct timespec ts2; 575 struct timespec ts2;
576 u16 op;
573}; 577};
574 578
575struct mlx5_pas { 579struct mlx5_pas {
@@ -653,6 +657,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
653int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 657int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
654int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 658int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
655 int out_size); 659 int out_size);
660int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
661 void *out, int out_size, mlx5_cmd_cbk_t callback,
662 void *context);
656int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 663int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
657int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 664int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
658int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 665int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -676,7 +683,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
676int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 683int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
677 u16 lwm, int is_srq); 684 u16 lwm, int is_srq);
678int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 685int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
679 struct mlx5_create_mkey_mbox_in *in, int inlen); 686 struct mlx5_create_mkey_mbox_in *in, int inlen,
687 mlx5_cmd_cbk_t callback, void *context,
688 struct mlx5_create_mkey_mbox_out *out);
680int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); 689int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
681int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 690int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
682 struct mlx5_query_mkey_mbox_out *out, int outlen); 691 struct mlx5_query_mkey_mbox_out *out, int outlen);
@@ -745,6 +754,11 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
745 return mkey_idx << 8; 754 return mkey_idx << 8;
746} 755}
747 756
757static inline u8 mlx5_mkey_variant(u32 mkey)
758{
759 return mkey & 0xff;
760}
761
748enum { 762enum {
749 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, 763 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
750 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, 764 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b6e55ee8855..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -50,6 +50,10 @@ extern int sysctl_legacy_va_layout;
50#include <asm/pgtable.h> 50#include <asm/pgtable.h>
51#include <asm/processor.h> 51#include <asm/processor.h>
52 52
53#ifndef __pa_symbol
54#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
55#endif
56
53extern unsigned long sysctl_user_reserve_kbytes; 57extern unsigned long sysctl_user_reserve_kbytes;
54extern unsigned long sysctl_admin_reserve_kbytes; 58extern unsigned long sysctl_admin_reserve_kbytes;
55 59
@@ -297,12 +301,26 @@ static inline int put_page_testzero(struct page *page)
297/* 301/*
298 * Try to grab a ref unless the page has a refcount of zero, return false if 302 * Try to grab a ref unless the page has a refcount of zero, return false if
299 * that is the case. 303 * that is the case.
304 * This can be called when MMU is off so it must not access
305 * any of the virtual mappings.
300 */ 306 */
301static inline int get_page_unless_zero(struct page *page) 307static inline int get_page_unless_zero(struct page *page)
302{ 308{
303 return atomic_inc_not_zero(&page->_count); 309 return atomic_inc_not_zero(&page->_count);
304} 310}
305 311
312/*
313 * Try to drop a ref unless the page has a refcount of one, return false if
314 * that is the case.
315 * This is to make sure that the refcount won't become zero after this drop.
316 * This can be called when MMU is off so it must not access
317 * any of the virtual mappings.
318 */
319static inline int put_page_unless_one(struct page *page)
320{
321 return atomic_add_unless(&page->_count, -1, 1);
322}
323
306extern int page_is_ram(unsigned long pfn); 324extern int page_is_ram(unsigned long pfn);
307 325
308/* Support for virtually mapped pages */ 326/* Support for virtually mapped pages */
@@ -581,11 +599,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
581 * sets it, so none of the operations on it need to be atomic. 599 * sets it, so none of the operations on it need to be atomic.
582 */ 600 */
583 601
584/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NID] | ... | FLAGS | */ 602/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
585#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 603#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
586#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 604#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
587#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 605#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
588#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH) 606#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
589 607
590/* 608/*
591 * Define the bit shifts to access each section. For non-existent 609 * Define the bit shifts to access each section. For non-existent
@@ -595,7 +613,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
595#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 613#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
596#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 614#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
597#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 615#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
598#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0)) 616#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
599 617
600/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 618/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
601#ifdef NODE_NOT_IN_PAGE_FLAGS 619#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -617,7 +635,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
617#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 635#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
618#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 636#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
619#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 637#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
620#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1) 638#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1)
621#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 639#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
622 640
623static inline enum zone_type page_zonenum(const struct page *page) 641static inline enum zone_type page_zonenum(const struct page *page)
@@ -661,51 +679,117 @@ static inline int page_to_nid(const struct page *page)
661#endif 679#endif
662 680
663#ifdef CONFIG_NUMA_BALANCING 681#ifdef CONFIG_NUMA_BALANCING
664#ifdef LAST_NID_NOT_IN_PAGE_FLAGS 682static inline int cpu_pid_to_cpupid(int cpu, int pid)
665static inline int page_nid_xchg_last(struct page *page, int nid)
666{ 683{
667 return xchg(&page->_last_nid, nid); 684 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
668} 685}
669 686
670static inline int page_nid_last(struct page *page) 687static inline int cpupid_to_pid(int cpupid)
671{ 688{
672 return page->_last_nid; 689 return cpupid & LAST__PID_MASK;
673} 690}
674static inline void page_nid_reset_last(struct page *page) 691
692static inline int cpupid_to_cpu(int cpupid)
675{ 693{
676 page->_last_nid = -1; 694 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
677} 695}
678#else 696
679static inline int page_nid_last(struct page *page) 697static inline int cpupid_to_nid(int cpupid)
698{
699 return cpu_to_node(cpupid_to_cpu(cpupid));
700}
701
702static inline bool cpupid_pid_unset(int cpupid)
680{ 703{
681 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK; 704 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
682} 705}
683 706
684extern int page_nid_xchg_last(struct page *page, int nid); 707static inline bool cpupid_cpu_unset(int cpupid)
708{
709 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
710}
685 711
686static inline void page_nid_reset_last(struct page *page) 712static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
687{ 713{
688 int nid = (1 << LAST_NID_SHIFT) - 1; 714 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
715}
689 716
690 page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT); 717#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
691 page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT; 718#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
719static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
720{
721 return xchg(&page->_last_cpupid, cpupid);
722}
723
724static inline int page_cpupid_last(struct page *page)
725{
726 return page->_last_cpupid;
727}
728static inline void page_cpupid_reset_last(struct page *page)
729{
730 page->_last_cpupid = -1;
692} 731}
693#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
694#else 732#else
695static inline int page_nid_xchg_last(struct page *page, int nid) 733static inline int page_cpupid_last(struct page *page)
734{
735 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
736}
737
738extern int page_cpupid_xchg_last(struct page *page, int cpupid);
739
740static inline void page_cpupid_reset_last(struct page *page)
741{
742 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
743
744 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
745 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
746}
747#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
748#else /* !CONFIG_NUMA_BALANCING */
749static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
696{ 750{
697 return page_to_nid(page); 751 return page_to_nid(page); /* XXX */
698} 752}
699 753
700static inline int page_nid_last(struct page *page) 754static inline int page_cpupid_last(struct page *page)
701{ 755{
702 return page_to_nid(page); 756 return page_to_nid(page); /* XXX */
703} 757}
704 758
705static inline void page_nid_reset_last(struct page *page) 759static inline int cpupid_to_nid(int cpupid)
760{
761 return -1;
762}
763
764static inline int cpupid_to_pid(int cpupid)
765{
766 return -1;
767}
768
769static inline int cpupid_to_cpu(int cpupid)
770{
771 return -1;
772}
773
774static inline int cpu_pid_to_cpupid(int nid, int pid)
775{
776 return -1;
777}
778
779static inline bool cpupid_pid_unset(int cpupid)
780{
781 return 1;
782}
783
784static inline void page_cpupid_reset_last(struct page *page)
706{ 785{
707} 786}
708#endif 787
788static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
789{
790 return false;
791}
792#endif /* CONFIG_NUMA_BALANCING */
709 793
710static inline struct zone *page_zone(const struct page *page) 794static inline struct zone *page_zone(const struct page *page)
711{ 795{
@@ -1232,32 +1316,76 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1232} 1316}
1233#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1234 1318
1235#if USE_SPLIT_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1236/* 1320#if ALLOC_SPLIT_PTLOCKS
1237 * We tuck a spinlock to guard each pagetable page into its struct page, 1321extern bool ptlock_alloc(struct page *page);
1238 * at page->private, with BUILD_BUG_ON to make sure that this will not 1322extern void ptlock_free(struct page *page);
1239 * overflow into the next struct page (as it might with DEBUG_SPINLOCK). 1323
1240 * When freeing, reset page->mapping so free_pages_check won't complain. 1324static inline spinlock_t *ptlock_ptr(struct page *page)
1241 */ 1325{
1242#define __pte_lockptr(page) &((page)->ptl) 1326 return page->ptl;
1243#define pte_lock_init(_page) do { \ 1327}
1244 spin_lock_init(__pte_lockptr(_page)); \ 1328#else /* ALLOC_SPLIT_PTLOCKS */
1245} while (0) 1329static inline bool ptlock_alloc(struct page *page)
1246#define pte_lock_deinit(page) ((page)->mapping = NULL) 1330{
1247#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) 1331 return true;
1248#else /* !USE_SPLIT_PTLOCKS */ 1332}
1333
1334static inline void ptlock_free(struct page *page)
1335{
1336}
1337
1338static inline spinlock_t *ptlock_ptr(struct page *page)
1339{
1340 return &page->ptl;
1341}
1342#endif /* ALLOC_SPLIT_PTLOCKS */
1343
1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1345{
1346 return ptlock_ptr(pmd_page(*pmd));
1347}
1348
1349static inline bool ptlock_init(struct page *page)
1350{
1351 /*
1352 * prep_new_page() initialize page->private (and therefore page->ptl)
1353 * with 0. Make sure nobody took it in use in between.
1354 *
1355 * It can happen if arch try to use slab for page table allocation:
1356 * slab code uses page->slab_cache and page->first_page (for tail
1357 * pages), which share storage with page->ptl.
1358 */
1359 VM_BUG_ON(*(unsigned long *)&page->ptl);
1360 if (!ptlock_alloc(page))
1361 return false;
1362 spin_lock_init(ptlock_ptr(page));
1363 return true;
1364}
1365
1366/* Reset page->mapping so free_pages_check won't complain. */
1367static inline void pte_lock_deinit(struct page *page)
1368{
1369 page->mapping = NULL;
1370 ptlock_free(page);
1371}
1372
1373#else /* !USE_SPLIT_PTE_PTLOCKS */
1249/* 1374/*
1250 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1375 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1251 */ 1376 */
1252#define pte_lock_init(page) do {} while (0) 1377static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1253#define pte_lock_deinit(page) do {} while (0) 1378{
1254#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) 1379 return &mm->page_table_lock;
1255#endif /* USE_SPLIT_PTLOCKS */ 1380}
1381static inline bool ptlock_init(struct page *page) { return true; }
1382static inline void pte_lock_deinit(struct page *page) {}
1383#endif /* USE_SPLIT_PTE_PTLOCKS */
1256 1384
1257static inline void pgtable_page_ctor(struct page *page) 1385static inline bool pgtable_page_ctor(struct page *page)
1258{ 1386{
1259 pte_lock_init(page);
1260 inc_zone_page_state(page, NR_PAGETABLE); 1387 inc_zone_page_state(page, NR_PAGETABLE);
1388 return ptlock_init(page);
1261} 1389}
1262 1390
1263static inline void pgtable_page_dtor(struct page *page) 1391static inline void pgtable_page_dtor(struct page *page)
@@ -1294,6 +1422,52 @@ static inline void pgtable_page_dtor(struct page *page)
1294 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1422 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1295 NULL: pte_offset_kernel(pmd, address)) 1423 NULL: pte_offset_kernel(pmd, address))
1296 1424
1425#if USE_SPLIT_PMD_PTLOCKS
1426
1427static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1428{
1429 return ptlock_ptr(virt_to_page(pmd));
1430}
1431
1432static inline bool pgtable_pmd_page_ctor(struct page *page)
1433{
1434#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1435 page->pmd_huge_pte = NULL;
1436#endif
1437 return ptlock_init(page);
1438}
1439
1440static inline void pgtable_pmd_page_dtor(struct page *page)
1441{
1442#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1443 VM_BUG_ON(page->pmd_huge_pte);
1444#endif
1445 ptlock_free(page);
1446}
1447
1448#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
1449
1450#else
1451
1452static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1453{
1454 return &mm->page_table_lock;
1455}
1456
1457static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1458static inline void pgtable_pmd_page_dtor(struct page *page) {}
1459
1460#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1461
1462#endif
1463
1464static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1465{
1466 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1467 spin_lock(ptl);
1468 return ptl;
1469}
1470
1297extern void free_area_init(unsigned long * zones_size); 1471extern void free_area_init(unsigned long * zones_size);
1298extern void free_area_init_node(int nid, unsigned long * zones_size, 1472extern void free_area_init_node(int nid, unsigned long * zones_size,
1299 unsigned long zone_start_pfn, unsigned long *zholes_size); 1473 unsigned long zone_start_pfn, unsigned long *zholes_size);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d9851eeb6e1d..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -23,7 +23,10 @@
23 23
24struct address_space; 24struct address_space;
25 25
26#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
27 30
28/* 31/*
29 * Each physical page in the system has a struct page associated with 32 * Each physical page in the system has a struct page associated with
@@ -42,18 +45,22 @@ struct page {
42 /* First double word block */ 45 /* First double word block */
43 unsigned long flags; /* Atomic flags, some possibly 46 unsigned long flags; /* Atomic flags, some possibly
44 * updated asynchronously */ 47 * updated asynchronously */
45 struct address_space *mapping; /* If low bit clear, points to 48 union {
46 * inode address_space, or NULL. 49 struct address_space *mapping; /* If low bit clear, points to
47 * If page mapped as anonymous 50 * inode address_space, or NULL.
48 * memory, low bit is set, and 51 * If page mapped as anonymous
49 * it points to anon_vma object: 52 * memory, low bit is set, and
50 * see PAGE_MAPPING_ANON below. 53 * it points to anon_vma object:
51 */ 54 * see PAGE_MAPPING_ANON below.
55 */
56 void *s_mem; /* slab first object */
57 };
58
52 /* Second double word */ 59 /* Second double word */
53 struct { 60 struct {
54 union { 61 union {
55 pgoff_t index; /* Our offset within mapping. */ 62 pgoff_t index; /* Our offset within mapping. */
56 void *freelist; /* slub/slob first free object */ 63 void *freelist; /* sl[aou]b first free object */
57 bool pfmemalloc; /* If set by the page allocator, 64 bool pfmemalloc; /* If set by the page allocator,
58 * ALLOC_NO_WATERMARKS was set 65 * ALLOC_NO_WATERMARKS was set
59 * and the low watermark was not 66 * and the low watermark was not
@@ -109,6 +116,7 @@ struct page {
109 }; 116 };
110 atomic_t _count; /* Usage count, see below. */ 117 atomic_t _count; /* Usage count, see below. */
111 }; 118 };
119 unsigned int active; /* SLAB */
112 }; 120 };
113 }; 121 };
114 122
@@ -130,6 +138,12 @@ struct page {
130 138
131 struct list_head list; /* slobs list of pages */ 139 struct list_head list; /* slobs list of pages */
132 struct slab *slab_page; /* slab fields */ 140 struct slab *slab_page; /* slab fields */
141 struct rcu_head rcu_head; /* Used by SLAB
142 * when destroying via RCU
143 */
144#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
145 pgtable_t pmd_huge_pte; /* protected by page->ptl */
146#endif
133 }; 147 };
134 148
135 /* Remainder is not double word aligned */ 149 /* Remainder is not double word aligned */
@@ -141,9 +155,13 @@ struct page {
141 * indicates order in the buddy 155 * indicates order in the buddy
142 * system if PG_buddy is set. 156 * system if PG_buddy is set.
143 */ 157 */
144#if USE_SPLIT_PTLOCKS 158#if USE_SPLIT_PTE_PTLOCKS
159#if ALLOC_SPLIT_PTLOCKS
160 spinlock_t *ptl;
161#else
145 spinlock_t ptl; 162 spinlock_t ptl;
146#endif 163#endif
164#endif
147 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ 165 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
148 struct page *first_page; /* Compound tail pages */ 166 struct page *first_page; /* Compound tail pages */
149 }; 167 };
@@ -174,8 +192,8 @@ struct page {
174 void *shadow; 192 void *shadow;
175#endif 193#endif
176 194
177#ifdef LAST_NID_NOT_IN_PAGE_FLAGS 195#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
178 int _last_nid; 196 int _last_cpupid;
179#endif 197#endif
180} 198}
181/* 199/*
@@ -309,14 +327,14 @@ enum {
309 NR_MM_COUNTERS 327 NR_MM_COUNTERS
310}; 328};
311 329
312#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) 330#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
313#define SPLIT_RSS_COUNTING 331#define SPLIT_RSS_COUNTING
314/* per-thread cached information, */ 332/* per-thread cached information, */
315struct task_rss_stat { 333struct task_rss_stat {
316 int events; /* for synchronization threshold */ 334 int events; /* for synchronization threshold */
317 int count[NR_MM_COUNTERS]; 335 int count[NR_MM_COUNTERS];
318}; 336};
319#endif /* USE_SPLIT_PTLOCKS */ 337#endif /* USE_SPLIT_PTE_PTLOCKS */
320 338
321struct mm_rss_stat { 339struct mm_rss_stat {
322 atomic_long_t count[NR_MM_COUNTERS]; 340 atomic_long_t count[NR_MM_COUNTERS];
@@ -339,6 +357,7 @@ struct mm_struct {
339 pgd_t * pgd; 357 pgd_t * pgd;
340 atomic_t mm_users; /* How many users with user space? */ 358 atomic_t mm_users; /* How many users with user space? */
341 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 359 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
360 atomic_long_t nr_ptes; /* Page table pages */
342 int map_count; /* number of VMAs */ 361 int map_count; /* number of VMAs */
343 362
344 spinlock_t page_table_lock; /* Protects page tables and some counters */ 363 spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -360,7 +379,6 @@ struct mm_struct {
360 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ 379 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
361 unsigned long stack_vm; /* VM_GROWSUP/DOWN */ 380 unsigned long stack_vm; /* VM_GROWSUP/DOWN */
362 unsigned long def_flags; 381 unsigned long def_flags;
363 unsigned long nr_ptes; /* Page table pages */
364 unsigned long start_code, end_code, start_data, end_data; 382 unsigned long start_code, end_code, start_data, end_data;
365 unsigned long start_brk, brk, start_stack; 383 unsigned long start_brk, brk, start_stack;
366 unsigned long arg_start, arg_end, env_start, env_end; 384 unsigned long arg_start, arg_end, env_start, env_end;
@@ -406,7 +424,7 @@ struct mm_struct {
406#ifdef CONFIG_MMU_NOTIFIER 424#ifdef CONFIG_MMU_NOTIFIER
407 struct mmu_notifier_mm *mmu_notifier_mm; 425 struct mmu_notifier_mm *mmu_notifier_mm;
408#endif 426#endif
409#ifdef CONFIG_TRANSPARENT_HUGEPAGE 427#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
410 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 428 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
411#endif 429#endif
412#ifdef CONFIG_CPUMASK_OFFSTACK 430#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -420,28 +438,23 @@ struct mm_struct {
420 */ 438 */
421 unsigned long numa_next_scan; 439 unsigned long numa_next_scan;
422 440
423 /* numa_next_reset is when the PTE scanner period will be reset */
424 unsigned long numa_next_reset;
425
426 /* Restart point for scanning and setting pte_numa */ 441 /* Restart point for scanning and setting pte_numa */
427 unsigned long numa_scan_offset; 442 unsigned long numa_scan_offset;
428 443
429 /* numa_scan_seq prevents two threads setting pte_numa */ 444 /* numa_scan_seq prevents two threads setting pte_numa */
430 int numa_scan_seq; 445 int numa_scan_seq;
431 446#endif
447#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
432 /* 448 /*
433 * The first node a task was scheduled on. If a task runs on 449 * An operation with batched TLB flushing is going on. Anything that
434 * a different node than Make PTE Scan Go Now. 450 * can move process memory needs to flush the TLB when moving a
451 * PROT_NONE or PROT_NUMA mapped page.
435 */ 452 */
436 int first_nid; 453 bool tlb_flush_pending;
437#endif 454#endif
438 struct uprobes_state uprobes_state; 455 struct uprobes_state uprobes_state;
439}; 456};
440 457
441/* first nid will either be a valid NID or one of these values */
442#define NUMA_PTE_SCAN_INIT -1
443#define NUMA_PTE_SCAN_ACTIVE -2
444
445static inline void mm_init_cpumask(struct mm_struct *mm) 458static inline void mm_init_cpumask(struct mm_struct *mm)
446{ 459{
447#ifdef CONFIG_CPUMASK_OFFSTACK 460#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -455,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
455 return mm->cpu_vm_mask_var; 468 return mm->cpu_vm_mask_var;
456} 469}
457 470
471#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
472/*
473 * Memory barriers to keep this state in sync are graciously provided by
474 * the page table locks, outside of which no page table modifications happen.
475 * The barriers below prevent the compiler from re-ordering the instructions
476 * around the memory barriers that are already present in the code.
477 */
478static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
479{
480 barrier();
481 return mm->tlb_flush_pending;
482}
483static inline void set_tlb_flush_pending(struct mm_struct *mm)
484{
485 mm->tlb_flush_pending = true;
486
487 /*
488 * Guarantee that the tlb_flush_pending store does not leak into the
489 * critical section updating the page tables
490 */
491 smp_mb__before_spinlock();
492}
493/* Clearing is done after a TLB flush, which also provides a barrier. */
494static inline void clear_tlb_flush_pending(struct mm_struct *mm)
495{
496 barrier();
497 mm->tlb_flush_pending = false;
498}
499#else
500static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
501{
502 return false;
503}
504static inline void set_tlb_flush_pending(struct mm_struct *mm)
505{
506}
507static inline void clear_tlb_flush_pending(struct mm_struct *mm)
508{
509}
510#endif
511
458#endif /* _LINUX_MM_TYPES_H */ 512#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 92dc257251e4..7f7f8dae4b1d 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -87,4 +87,6 @@ calc_vm_flag_bits(unsigned long flags)
87 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | 87 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
88 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); 88 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
89} 89}
90
91unsigned long vm_commit_limit(void);
90#endif /* _LINUX_MMAN_H */ 92#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 842de3e21e70..176fdf824b14 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -240,6 +240,7 @@ struct mmc_part {
240struct mmc_card { 240struct mmc_card {
241 struct mmc_host *host; /* the host this device belongs to */ 241 struct mmc_host *host; /* the host this device belongs to */
242 struct device dev; /* the device */ 242 struct device dev; /* the device */
243 u32 ocr; /* the current OCR setting */
243 unsigned int rca; /* relative card address of device */ 244 unsigned int rca; /* relative card address of device */
244 unsigned int type; /* card type */ 245 unsigned int type; /* card type */
245#define MMC_TYPE_MMC 0 /* MMC card */ 246#define MMC_TYPE_MMC 0 /* MMC card */
@@ -257,6 +258,7 @@ struct mmc_card {
257#define MMC_CARD_REMOVED (1<<7) /* card has been removed */ 258#define MMC_CARD_REMOVED (1<<7) /* card has been removed */
258#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ 259#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */
259#define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */ 260#define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */
261#define MMC_STATE_SUSPENDED (1<<11) /* card is suspended */
260 unsigned int quirks; /* card quirks */ 262 unsigned int quirks; /* card quirks */
261#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 263#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
262#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ 264#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -420,10 +422,10 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
420#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) 422#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
421#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) 423#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
422#define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 424#define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
423#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
424#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) 425#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
425#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) 426#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
426#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) 427#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
428#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
427 429
428#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 430#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
429#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 431#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -432,11 +434,12 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
432#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 434#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
433#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) 435#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
434#define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) 436#define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
435#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
436#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) 437#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
437#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) 438#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
438#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS) 439#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
439#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS) 440#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
441#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
442#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
440 443
441/* 444/*
442 * Quirk add/remove for MMC products. 445 * Quirk add/remove for MMC products.
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index da51bec578c3..87079fc38011 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -151,7 +151,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
151extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, 151extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
152 struct mmc_command *, int); 152 struct mmc_command *, int);
153extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); 153extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
154extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool); 154extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
155 bool);
155extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); 156extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
156extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); 157extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
157 158
@@ -188,7 +189,6 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
188 189
189extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); 190extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
190extern void mmc_release_host(struct mmc_host *host); 191extern void mmc_release_host(struct mmc_host *host);
191extern int mmc_try_claim_host(struct mmc_host *host);
192 192
193extern void mmc_get_card(struct mmc_card *card); 193extern void mmc_get_card(struct mmc_card *card);
194extern void mmc_put_card(struct mmc_card *card); 194extern void mmc_put_card(struct mmc_card *card);
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 198f0fa44e9f..6ce7d2cd3c7a 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -15,6 +15,7 @@
15#define LINUX_MMC_DW_MMC_H 15#define LINUX_MMC_DW_MMC_H
16 16
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/mmc/core.h>
18 19
19#define MAX_MCI_SLOTS 2 20#define MAX_MCI_SLOTS 2
20 21
@@ -129,6 +130,9 @@ struct dw_mci {
129 struct mmc_request *mrq; 130 struct mmc_request *mrq;
130 struct mmc_command *cmd; 131 struct mmc_command *cmd;
131 struct mmc_data *data; 132 struct mmc_data *data;
133 struct mmc_command stop_abort;
134 unsigned int prev_blksz;
135 unsigned char timing;
132 struct workqueue_struct *card_workqueue; 136 struct workqueue_struct *card_workqueue;
133 137
134 /* DMA interface members*/ 138 /* DMA interface members*/
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3b0c33ae13e1..99f5709ac343 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -254,6 +254,7 @@ struct mmc_host {
254#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ 254#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
255#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ 255#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
256#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ 256#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
257#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */
257#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ 258#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
258#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ 259#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
259#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ 260#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
@@ -309,7 +310,6 @@ struct mmc_host {
309 spinlock_t lock; /* lock for claim and bus ops */ 310 spinlock_t lock; /* lock for claim and bus ops */
310 311
311 struct mmc_ios ios; /* current io bus settings */ 312 struct mmc_ios ios; /* current io bus settings */
312 u32 ocr; /* the current OCR setting */
313 313
314 /* group bitfields together to minimize padding */ 314 /* group bitfields together to minimize padding */
315 unsigned int use_spi_crc:1; 315 unsigned int use_spi_crc:1;
@@ -382,9 +382,6 @@ static inline void *mmc_priv(struct mmc_host *host)
382#define mmc_classdev(x) (&(x)->class_dev) 382#define mmc_classdev(x) (&(x)->class_dev)
383#define mmc_hostname(x) (dev_name(&(x)->class_dev)) 383#define mmc_hostname(x) (dev_name(&(x)->class_dev))
384 384
385int mmc_suspend_host(struct mmc_host *);
386int mmc_resume_host(struct mmc_host *);
387
388int mmc_power_save_host(struct mmc_host *host); 385int mmc_power_save_host(struct mmc_host *host);
389int mmc_power_restore_host(struct mmc_host *host); 386int mmc_power_restore_host(struct mmc_host *host);
390 387
diff --git a/include/linux/module.h b/include/linux/module.h
index 05f2447f8c15..15cd6b1b211e 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -367,9 +367,6 @@ struct module
367 /* What modules do I depend on? */ 367 /* What modules do I depend on? */
368 struct list_head target_list; 368 struct list_head target_list;
369 369
370 /* Who is waiting for us to be unloaded */
371 struct task_struct *waiter;
372
373 /* Destruction function. */ 370 /* Destruction function. */
374 void (*exit)(void); 371 void (*exit)(void);
375 372
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 38cd98f112a0..371d346fa270 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -49,6 +49,8 @@ struct mnt_namespace;
49 49
50#define MNT_LOCK_READONLY 0x400000 50#define MNT_LOCK_READONLY 0x400000
51#define MNT_LOCKED 0x800000 51#define MNT_LOCKED 0x800000
52#define MNT_DOOMED 0x1000000
53#define MNT_SYNC_UMOUNT 0x2000000
52 54
53struct vfsmount { 55struct vfsmount {
54 struct dentry *mnt_root; /* root of the mounted tree */ 56 struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/msg.h b/include/linux/msg.h
index 391af8d11cce..e21f9d44307f 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -6,9 +6,9 @@
6 6
7/* one msg_msg structure for each message */ 7/* one msg_msg structure for each message */
8struct msg_msg { 8struct msg_msg {
9 struct list_head m_list; 9 struct list_head m_list;
10 long m_type; 10 long m_type;
11 int m_ts; /* message text size */ 11 size_t m_ts; /* message text size */
12 struct msg_msgseg* next; 12 struct msg_msgseg* next;
13 void *security; 13 void *security;
14 /* the actual message follows immediately */ 14 /* the actual message follows immediately */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index b17ead818aec..009b02481436 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -26,11 +26,11 @@ struct msi_desc {
26 struct { 26 struct {
27 __u8 is_msix : 1; 27 __u8 is_msix : 1;
28 __u8 multiple: 3; /* log2 number of messages */ 28 __u8 multiple: 3; /* log2 number of messages */
29 __u8 maskbit : 1; /* mask-pending bit supported ? */ 29 __u8 maskbit : 1; /* mask-pending bit supported ? */
30 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 30 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
31 __u8 pos; /* Location of the msi capability */ 31 __u8 pos; /* Location of the msi capability */
32 __u16 entry_nr; /* specific enabled entry */ 32 __u16 entry_nr; /* specific enabled entry */
33 unsigned default_irq; /* default pre-assigned irq */ 33 unsigned default_irq; /* default pre-assigned irq */
34 } msi_attrib; 34 } msi_attrib;
35 35
36 u32 masked; /* mask bits */ 36 u32 masked; /* mask bits */
@@ -64,6 +64,8 @@ void arch_restore_msi_irqs(struct pci_dev *dev, int irq);
64 64
65void default_teardown_msi_irqs(struct pci_dev *dev); 65void default_teardown_msi_irqs(struct pci_dev *dev);
66void default_restore_msi_irqs(struct pci_dev *dev, int irq); 66void default_restore_msi_irqs(struct pci_dev *dev, int irq);
67u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
68u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
67 69
68struct msi_chip { 70struct msi_chip {
69 struct module *owner; 71 struct module *owner;
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 95fc482cef36..36bb6a503f19 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -91,8 +91,6 @@ struct nand_bbt_descr {
91 * with NAND_BBT_CREATE. 91 * with NAND_BBT_CREATE.
92 */ 92 */
93#define NAND_BBT_CREATE_EMPTY 0x00000400 93#define NAND_BBT_CREATE_EMPTY 0x00000400
94/* Search good / bad pattern through all pages of a block */
95#define NAND_BBT_SCANALLPAGES 0x00000800
96/* Write bbt if neccecary */ 94/* Write bbt if neccecary */
97#define NAND_BBT_WRITE 0x00002000 95#define NAND_BBT_WRITE 0x00002000
98/* Read and write back block contents when writing bbt */ 96/* Read and write back block contents when writing bbt */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 4b02512e421c..5f487d776411 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -365,7 +365,7 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
365 bitpos = (map_bankwidth(map)-1-i)*8; 365 bitpos = (map_bankwidth(map)-1-i)*8;
366#endif 366#endif
367 orig.x[0] &= ~(0xff << bitpos); 367 orig.x[0] &= ~(0xff << bitpos);
368 orig.x[0] |= buf[i-start] << bitpos; 368 orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
369 } 369 }
370 } 370 }
371 return orig; 371 return orig;
@@ -384,7 +384,7 @@ static inline map_word map_word_ff(struct map_info *map)
384 384
385 if (map_bankwidth(map) < MAP_FF_LIMIT) { 385 if (map_bankwidth(map) < MAP_FF_LIMIT) {
386 int bw = 8 * map_bankwidth(map); 386 int bw = 8 * map_bankwidth(map);
387 r.x[0] = (1 << bw) - 1; 387 r.x[0] = (1UL << bw) - 1;
388 } else { 388 } else {
389 for (i=0; i<map_words(map); i++) 389 for (i=0; i<map_words(map); i++)
390 r.x[i] = ~0UL; 390 r.x[i] = ~0UL;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index f9bfe526d310..8cc0e2fb6894 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -29,9 +29,6 @@
29 29
30#include <asm/div64.h> 30#include <asm/div64.h>
31 31
32#define MTD_CHAR_MAJOR 90
33#define MTD_BLOCK_MAJOR 31
34
35#define MTD_ERASE_PENDING 0x01 32#define MTD_ERASE_PENDING 0x01
36#define MTD_ERASING 0x02 33#define MTD_ERASING 0x02
37#define MTD_ERASE_SUSPEND 0x04 34#define MTD_ERASE_SUSPEND 0x04
@@ -354,6 +351,11 @@ static inline int mtd_has_oob(const struct mtd_info *mtd)
354 return mtd->_read_oob && mtd->_write_oob; 351 return mtd->_read_oob && mtd->_write_oob;
355} 352}
356 353
354static inline int mtd_type_is_nand(const struct mtd_info *mtd)
355{
356 return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
357}
358
357static inline int mtd_can_have_bb(const struct mtd_info *mtd) 359static inline int mtd_can_have_bb(const struct mtd_info *mtd)
358{ 360{
359 return !!mtd->_block_isbad; 361 return !!mtd->_block_isbad;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ac8e89d5a792..9e6c8f9f306e 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -198,6 +198,7 @@ typedef enum {
198/* Cell info constants */ 198/* Cell info constants */
199#define NAND_CI_CHIPNR_MSK 0x03 199#define NAND_CI_CHIPNR_MSK 0x03
200#define NAND_CI_CELLTYPE_MSK 0x0C 200#define NAND_CI_CELLTYPE_MSK 0x0C
201#define NAND_CI_CELLTYPE_SHIFT 2
201 202
202/* Keep gcc happy */ 203/* Keep gcc happy */
203struct nand_chip; 204struct nand_chip;
@@ -477,7 +478,7 @@ struct nand_buffers {
477 * @badblockbits: [INTERN] minimum number of set bits in a good block's 478 * @badblockbits: [INTERN] minimum number of set bits in a good block's
478 * bad block marker position; i.e., BBM == 11110111b is 479 * bad block marker position; i.e., BBM == 11110111b is
479 * not bad when badblockbits == 7 480 * not bad when badblockbits == 7
480 * @cellinfo: [INTERN] MLC/multichip data from chip ident 481 * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
481 * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet. 482 * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet.
482 * Minimum amount of bit errors per @ecc_step_ds guaranteed 483 * Minimum amount of bit errors per @ecc_step_ds guaranteed
483 * to be correctable. If unknown, set to zero. 484 * to be correctable. If unknown, set to zero.
@@ -498,7 +499,6 @@ struct nand_buffers {
498 * supported, 0 otherwise. 499 * supported, 0 otherwise.
499 * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand 500 * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
500 * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand 501 * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
501 * @ecclayout: [REPLACEABLE] the default ECC placement scheme
502 * @bbt: [INTERN] bad block table pointer 502 * @bbt: [INTERN] bad block table pointer
503 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash 503 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
504 * lookup. 504 * lookup.
@@ -559,7 +559,7 @@ struct nand_chip {
559 int pagebuf; 559 int pagebuf;
560 unsigned int pagebuf_bitflips; 560 unsigned int pagebuf_bitflips;
561 int subpagesize; 561 int subpagesize;
562 uint8_t cellinfo; 562 uint8_t bits_per_cell;
563 uint16_t ecc_strength_ds; 563 uint16_t ecc_strength_ds;
564 uint16_t ecc_step_ds; 564 uint16_t ecc_step_ds;
565 int badblockpos; 565 int badblockpos;
@@ -572,7 +572,6 @@ struct nand_chip {
572 572
573 uint8_t *oob_poi; 573 uint8_t *oob_poi;
574 struct nand_hw_control *controller; 574 struct nand_hw_control *controller;
575 struct nand_ecclayout *ecclayout;
576 575
577 struct nand_ecc_ctrl ecc; 576 struct nand_ecc_ctrl ecc;
578 struct nand_buffers *buffers; 577 struct nand_buffers *buffers;
@@ -797,4 +796,13 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
797 return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); 796 return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
798} 797}
799 798
799/*
800 * Check if it is a SLC nand.
801 * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
802 * We do not distinguish the MLC and TLC now.
803 */
804static inline bool nand_is_slc(struct nand_chip *chip)
805{
806 return chip->bits_per_cell == 1;
807}
800#endif /* __LINUX_MTD_NAND_H */ 808#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bab49da8a0f0..d3181936c138 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -131,7 +131,7 @@ static inline int mutex_is_locked(struct mutex *lock)
131} 131}
132 132
133/* 133/*
134 * See kernel/mutex.c for detailed documentation of these APIs. 134 * See kernel/locking/mutex.c for detailed documentation of these APIs.
135 * Also see Documentation/mutex-design.txt. 135 * Also see Documentation/mutex-design.txt.
136 */ 136 */
137#ifdef CONFIG_DEBUG_LOCK_ALLOC 137#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 8e47bc7a1665..492de72560fa 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -16,7 +16,7 @@ struct nameidata {
16 struct path root; 16 struct path root;
17 struct inode *inode; /* path.dentry.d_inode */ 17 struct inode *inode; /* path.dentry.d_inode */
18 unsigned int flags; 18 unsigned int flags;
19 unsigned seq; 19 unsigned seq, m_seq;
20 int last_type; 20 int last_type;
21 unsigned depth; 21 unsigned depth;
22 char *saved_names[MAX_NESTED_LINKS + 1]; 22 char *saved_names[MAX_NESTED_LINKS + 1];
diff --git a/include/linux/net.h b/include/linux/net.h
index 4f27575ce1d6..69be3e6079c8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,6 +24,7 @@
24#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ 24#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
25#include <linux/kmemcheck.h> 25#include <linux/kmemcheck.h>
26#include <linux/rcupdate.h> 26#include <linux/rcupdate.h>
27#include <linux/jump_label.h>
27#include <uapi/linux/net.h> 28#include <uapi/linux/net.h>
28 29
29struct poll_table_struct; 30struct poll_table_struct;
@@ -163,6 +164,14 @@ struct proto_ops {
163#endif 164#endif
164 int (*sendmsg) (struct kiocb *iocb, struct socket *sock, 165 int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
165 struct msghdr *m, size_t total_len); 166 struct msghdr *m, size_t total_len);
167 /* Notes for implementing recvmsg:
168 * ===============================
169 * msg->msg_namelen should get updated by the recvmsg handlers
170 * iff msg_name != NULL. It is by default 0 to prevent
171 * returning uninitialized memory to user space. The recvfrom
172 * handlers can assume that msg.msg_name is either NULL or has
173 * a minimum size of sizeof(struct sockaddr_storage).
174 */
166 int (*recvmsg) (struct kiocb *iocb, struct socket *sock, 175 int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
167 struct msghdr *m, size_t total_len, 176 struct msghdr *m, size_t total_len,
168 int flags); 177 int flags);
@@ -172,7 +181,7 @@ struct proto_ops {
172 int offset, size_t size, int flags); 181 int offset, size_t size, int flags);
173 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, 182 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
174 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 183 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
175 void (*set_peek_off)(struct sock *sk, int val); 184 int (*set_peek_off)(struct sock *sk, int val);
176}; 185};
177 186
178#define DECLARE_SOCKADDR(type, dst, src) \ 187#define DECLARE_SOCKADDR(type, dst, src) \
@@ -195,27 +204,23 @@ enum {
195 SOCK_WAKE_URG, 204 SOCK_WAKE_URG,
196}; 205};
197 206
198extern int sock_wake_async(struct socket *sk, int how, int band); 207int sock_wake_async(struct socket *sk, int how, int band);
199extern int sock_register(const struct net_proto_family *fam); 208int sock_register(const struct net_proto_family *fam);
200extern void sock_unregister(int family); 209void sock_unregister(int family);
201extern int __sock_create(struct net *net, int family, int type, int proto, 210int __sock_create(struct net *net, int family, int type, int proto,
202 struct socket **res, int kern); 211 struct socket **res, int kern);
203extern int sock_create(int family, int type, int proto, 212int sock_create(int family, int type, int proto, struct socket **res);
204 struct socket **res); 213int sock_create_kern(int family, int type, int proto, struct socket **res);
205extern int sock_create_kern(int family, int type, int proto, 214int sock_create_lite(int family, int type, int proto, struct socket **res);
206 struct socket **res); 215void sock_release(struct socket *sock);
207extern int sock_create_lite(int family, int type, int proto, 216int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
208 struct socket **res); 217int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
209extern void sock_release(struct socket *sock); 218 int flags);
210extern int sock_sendmsg(struct socket *sock, struct msghdr *msg, 219struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
211 size_t len); 220struct socket *sockfd_lookup(int fd, int *err);
212extern int sock_recvmsg(struct socket *sock, struct msghdr *msg, 221struct socket *sock_from_file(struct file *file, int *err);
213 size_t size, int flags);
214extern struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
215extern struct socket *sockfd_lookup(int fd, int *err);
216extern struct socket *sock_from_file(struct file *file, int *err);
217#define sockfd_put(sock) fput(sock->file) 222#define sockfd_put(sock) fput(sock->file)
218extern int net_ratelimit(void); 223int net_ratelimit(void);
219 224
220#define net_ratelimited_function(function, ...) \ 225#define net_ratelimited_function(function, ...) \
221do { \ 226do { \
@@ -243,32 +248,52 @@ do { \
243#define net_random() prandom_u32() 248#define net_random() prandom_u32()
244#define net_srandom(seed) prandom_seed((__force u32)(seed)) 249#define net_srandom(seed) prandom_seed((__force u32)(seed))
245 250
246extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 251bool __net_get_random_once(void *buf, int nbytes, bool *done,
247 struct kvec *vec, size_t num, size_t len); 252 struct static_key *done_key);
248extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg, 253
249 struct kvec *vec, size_t num, 254#ifdef HAVE_JUMP_LABEL
250 size_t len, int flags); 255#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
251 256 { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
252extern int kernel_bind(struct socket *sock, struct sockaddr *addr, 257#else /* !HAVE_JUMP_LABEL */
253 int addrlen); 258#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
254extern int kernel_listen(struct socket *sock, int backlog); 259#endif /* HAVE_JUMP_LABEL */
255extern int kernel_accept(struct socket *sock, struct socket **newsock, 260
256 int flags); 261#define net_get_random_once(buf, nbytes) \
257extern int kernel_connect(struct socket *sock, struct sockaddr *addr, 262 ({ \
258 int addrlen, int flags); 263 bool ___ret = false; \
259extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr, 264 static bool ___done = false; \
260 int *addrlen); 265 static struct static_key ___done_key = \
261extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr, 266 ___NET_RANDOM_STATIC_KEY_INIT; \
262 int *addrlen); 267 if (!static_key_true(&___done_key)) \
263extern int kernel_getsockopt(struct socket *sock, int level, int optname, 268 ___ret = __net_get_random_once(buf, \
264 char *optval, int *optlen); 269 nbytes, \
265extern int kernel_setsockopt(struct socket *sock, int level, int optname, 270 &___done, \
266 char *optval, unsigned int optlen); 271 &___done_key); \
267extern int kernel_sendpage(struct socket *sock, struct page *page, int offset, 272 ___ret; \
268 size_t size, int flags); 273 })
269extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); 274
270extern int kernel_sock_shutdown(struct socket *sock, 275int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
271 enum sock_shutdown_cmd how); 276 size_t num, size_t len);
277int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
278 size_t num, size_t len, int flags);
279
280int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
281int kernel_listen(struct socket *sock, int backlog);
282int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
283int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
284 int flags);
285int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
286 int *addrlen);
287int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
288 int *addrlen);
289int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
290 int *optlen);
291int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
292 unsigned int optlen);
293int kernel_sendpage(struct socket *sock, struct page *page, int offset,
294 size_t size, int flags);
295int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
296int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
272 297
273#define MODULE_ALIAS_NETPROTO(proto) \ 298#define MODULE_ALIAS_NETPROTO(proto) \
274 MODULE_ALIAS("net-pf-" __stringify(proto)) 299 MODULE_ALIAS("net-pf-" __stringify(proto))
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index a2a89a5c7be5..1005ebf17575 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -42,6 +42,8 @@ enum {
42 NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ 42 NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
43 NETIF_F_FSO_BIT, /* ... FCoE segmentation */ 43 NETIF_F_FSO_BIT, /* ... FCoE segmentation */
44 NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ 44 NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
45 NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
46 NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
45 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ 47 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
46 NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ 48 NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
47 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ 49 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
@@ -60,6 +62,7 @@ enum {
60 NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */ 62 NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */
61 NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ 63 NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
62 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ 64 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
65 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
63 66
64 /* 67 /*
65 * Add your fresh new feature above and remember to update 68 * Add your fresh new feature above and remember to update
@@ -107,11 +110,14 @@ enum {
107#define NETIF_F_RXFCS __NETIF_F(RXFCS) 110#define NETIF_F_RXFCS __NETIF_F(RXFCS)
108#define NETIF_F_RXALL __NETIF_F(RXALL) 111#define NETIF_F_RXALL __NETIF_F(RXALL)
109#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) 112#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
113#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
114#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
110#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) 115#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
111#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) 116#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
112#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 117#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
113#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 118#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
114#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 119#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
120#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
115 121
116/* Features valid for ethtool to change */ 122/* Features valid for ethtool to change */
117/* = all defined minus driver/device-class-related */ 123/* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 25f5d2d11e7c..ce2a1f5f9a1e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,8 +60,8 @@ struct wireless_dev;
60#define SET_ETHTOOL_OPS(netdev,ops) \ 60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) ) 61 ( (netdev)->ethtool_ops = (ops) )
62 62
63extern void netdev_set_default_ethtool_ops(struct net_device *dev, 63void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops); 64 const struct ethtool_ops *ops);
65 65
66/* hardware address assignment types */ 66/* hardware address assignment types */
67#define NET_ADDR_PERM 0 /* address is permanent (default) */ 67#define NET_ADDR_PERM 0 /* address is permanent (default) */
@@ -298,7 +298,7 @@ struct netdev_boot_setup {
298}; 298};
299#define NETDEV_BOOT_SETUP_MAX 8 299#define NETDEV_BOOT_SETUP_MAX 8
300 300
301extern int __init netdev_boot_setup(char *str); 301int __init netdev_boot_setup(char *str);
302 302
303/* 303/*
304 * Structure for NAPI scheduling similar to tasklet but with weighting 304 * Structure for NAPI scheduling similar to tasklet but with weighting
@@ -394,7 +394,7 @@ enum rx_handler_result {
394typedef enum rx_handler_result rx_handler_result_t; 394typedef enum rx_handler_result rx_handler_result_t;
395typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 395typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
396 396
397extern void __napi_schedule(struct napi_struct *n); 397void __napi_schedule(struct napi_struct *n);
398 398
399static inline bool napi_disable_pending(struct napi_struct *n) 399static inline bool napi_disable_pending(struct napi_struct *n)
400{ 400{
@@ -445,8 +445,8 @@ static inline bool napi_reschedule(struct napi_struct *napi)
445 * 445 *
446 * Mark NAPI processing as complete. 446 * Mark NAPI processing as complete.
447 */ 447 */
448extern void __napi_complete(struct napi_struct *n); 448void __napi_complete(struct napi_struct *n);
449extern void napi_complete(struct napi_struct *n); 449void napi_complete(struct napi_struct *n);
450 450
451/** 451/**
452 * napi_by_id - lookup a NAPI by napi_id 452 * napi_by_id - lookup a NAPI by napi_id
@@ -455,7 +455,7 @@ extern void napi_complete(struct napi_struct *n);
455 * lookup @napi_id in napi_hash table 455 * lookup @napi_id in napi_hash table
456 * must be called under rcu_read_lock() 456 * must be called under rcu_read_lock()
457 */ 457 */
458extern struct napi_struct *napi_by_id(unsigned int napi_id); 458struct napi_struct *napi_by_id(unsigned int napi_id);
459 459
460/** 460/**
461 * napi_hash_add - add a NAPI to global hashtable 461 * napi_hash_add - add a NAPI to global hashtable
@@ -463,7 +463,7 @@ extern struct napi_struct *napi_by_id(unsigned int napi_id);
463 * 463 *
464 * generate a new napi_id and store a @napi under it in napi_hash 464 * generate a new napi_id and store a @napi under it in napi_hash
465 */ 465 */
466extern void napi_hash_add(struct napi_struct *napi); 466void napi_hash_add(struct napi_struct *napi);
467 467
468/** 468/**
469 * napi_hash_del - remove a NAPI from global table 469 * napi_hash_del - remove a NAPI from global table
@@ -472,7 +472,7 @@ extern void napi_hash_add(struct napi_struct *napi);
472 * Warning: caller must observe rcu grace period 472 * Warning: caller must observe rcu grace period
473 * before freeing memory containing @napi 473 * before freeing memory containing @napi
474 */ 474 */
475extern void napi_hash_del(struct napi_struct *napi); 475void napi_hash_del(struct napi_struct *napi);
476 476
477/** 477/**
478 * napi_disable - prevent NAPI from scheduling 478 * napi_disable - prevent NAPI from scheduling
@@ -483,6 +483,7 @@ extern void napi_hash_del(struct napi_struct *napi);
483 */ 483 */
484static inline void napi_disable(struct napi_struct *n) 484static inline void napi_disable(struct napi_struct *n)
485{ 485{
486 might_sleep();
486 set_bit(NAPI_STATE_DISABLE, &n->state); 487 set_bit(NAPI_STATE_DISABLE, &n->state);
487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 488 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
488 msleep(1); 489 msleep(1);
@@ -664,8 +665,8 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
664extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 665extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
665 666
666#ifdef CONFIG_RFS_ACCEL 667#ifdef CONFIG_RFS_ACCEL
667extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 668bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
668 u32 flow_id, u16 filter_id); 669 u16 filter_id);
669#endif 670#endif
670 671
671/* This structure contains an instance of an RX queue. */ 672/* This structure contains an instance of an RX queue. */
@@ -768,7 +769,8 @@ struct netdev_phys_port_id {
768 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
769 * Required can not be NULL. 770 * Required can not be NULL.
770 * 771 *
771 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 772 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
773 * void *accel_priv);
772 * Called to decide which queue to when device supports multiple 774 * Called to decide which queue to when device supports multiple
773 * transmit queues. 775 * transmit queues.
774 * 776 *
@@ -961,6 +963,25 @@ struct netdev_phys_port_id {
961 * Called by vxlan to notify the driver about a UDP port and socket 963 * Called by vxlan to notify the driver about a UDP port and socket
962 * address family that vxlan is not listening to anymore. The operation 964 * address family that vxlan is not listening to anymore. The operation
963 * is protected by the vxlan_net->sock_lock. 965 * is protected by the vxlan_net->sock_lock.
966 *
967 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
968 * struct net_device *dev)
969 * Called by upper layer devices to accelerate switching or other
970 * station functionality into hardware. 'pdev is the lowerdev
971 * to use for the offload and 'dev' is the net device that will
972 * back the offload. Returns a pointer to the private structure
973 * the upper layer will maintain.
974 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
975 * Called by upper layer device to delete the station created
976 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
977 * the station and priv is the structure returned by the add
978 * operation.
979 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
980 * struct net_device *dev,
981 * void *priv);
982 * Callback to use for xmit over the accelerated station. This
983 * is used in place of ndo_start_xmit on accelerated net
984 * devices.
964 */ 985 */
965struct net_device_ops { 986struct net_device_ops {
966 int (*ndo_init)(struct net_device *dev); 987 int (*ndo_init)(struct net_device *dev);
@@ -970,7 +991,8 @@ struct net_device_ops {
970 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 991 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
971 struct net_device *dev); 992 struct net_device *dev);
972 u16 (*ndo_select_queue)(struct net_device *dev, 993 u16 (*ndo_select_queue)(struct net_device *dev,
973 struct sk_buff *skb); 994 struct sk_buff *skb,
995 void *accel_priv);
974 void (*ndo_change_rx_flags)(struct net_device *dev, 996 void (*ndo_change_rx_flags)(struct net_device *dev,
975 int flags); 997 int flags);
976 void (*ndo_set_rx_mode)(struct net_device *dev); 998 void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1097,6 +1119,15 @@ struct net_device_ops {
1097 void (*ndo_del_vxlan_port)(struct net_device *dev, 1119 void (*ndo_del_vxlan_port)(struct net_device *dev,
1098 sa_family_t sa_family, 1120 sa_family_t sa_family,
1099 __be16 port); 1121 __be16 port);
1122
1123 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1124 struct net_device *dev);
1125 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1126 void *priv);
1127
1128 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1129 struct net_device *dev,
1130 void *priv);
1100}; 1131};
1101 1132
1102/* 1133/*
@@ -1131,7 +1162,7 @@ struct net_device {
1131 unsigned long mem_end; /* shared mem end */ 1162 unsigned long mem_end; /* shared mem end */
1132 unsigned long mem_start; /* shared mem start */ 1163 unsigned long mem_start; /* shared mem start */
1133 unsigned long base_addr; /* device I/O address */ 1164 unsigned long base_addr; /* device I/O address */
1134 unsigned int irq; /* device IRQ number */ 1165 int irq; /* device IRQ number */
1135 1166
1136 /* 1167 /*
1137 * Some hardware also needs these fields, but they are not 1168 * Some hardware also needs these fields, but they are not
@@ -1143,8 +1174,19 @@ struct net_device {
1143 struct list_head dev_list; 1174 struct list_head dev_list;
1144 struct list_head napi_list; 1175 struct list_head napi_list;
1145 struct list_head unreg_list; 1176 struct list_head unreg_list;
1146 struct list_head upper_dev_list; /* List of upper devices */ 1177 struct list_head close_list;
1147 struct list_head lower_dev_list; 1178
1179 /* directly linked devices, like slaves for bonding */
1180 struct {
1181 struct list_head upper;
1182 struct list_head lower;
1183 } adj_list;
1184
1185 /* all linked devices, *including* neighbours */
1186 struct {
1187 struct list_head upper;
1188 struct list_head lower;
1189 } all_adj_list;
1148 1190
1149 1191
1150 /* currently active device features */ 1192 /* currently active device features */
@@ -1183,6 +1225,7 @@ struct net_device {
1183 /* Management operations */ 1225 /* Management operations */
1184 const struct net_device_ops *netdev_ops; 1226 const struct net_device_ops *netdev_ops;
1185 const struct ethtool_ops *ethtool_ops; 1227 const struct ethtool_ops *ethtool_ops;
1228 const struct forwarding_accel_ops *fwd_ops;
1186 1229
1187 /* Hardware header description */ 1230 /* Hardware header description */
1188 const struct header_ops *header_ops; 1231 const struct header_ops *header_ops;
@@ -1214,7 +1257,7 @@ struct net_device {
1214 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 1257 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1215 unsigned char addr_assign_type; /* hw address assignment type */ 1258 unsigned char addr_assign_type; /* hw address assignment type */
1216 unsigned char addr_len; /* hardware address length */ 1259 unsigned char addr_len; /* hardware address length */
1217 unsigned char neigh_priv_len; 1260 unsigned short neigh_priv_len;
1218 unsigned short dev_id; /* Used to differentiate devices 1261 unsigned short dev_id; /* Used to differentiate devices
1219 * that share the same link 1262 * that share the same link
1220 * layer address 1263 * layer address
@@ -1487,9 +1530,10 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1487 f(dev, &dev->_tx[i], arg); 1530 f(dev, &dev->_tx[i], arg);
1488} 1531}
1489 1532
1490extern struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1533struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1491 struct sk_buff *skb); 1534 struct sk_buff *skb,
1492extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1535 void *accel_priv);
1536u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1493 1537
1494/* 1538/*
1495 * Net namespace inlines 1539 * Net namespace inlines
@@ -1546,7 +1590,7 @@ static inline void *netdev_priv(const struct net_device *dev)
1546#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 1590#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1547 1591
1548/* Set the sysfs device type for the network logical device to allow 1592/* Set the sysfs device type for the network logical device to allow
1549 * fin grained indentification of different network device types. For 1593 * fine-grained identification of different network device types. For
1550 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. 1594 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1551 */ 1595 */
1552#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 1596#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
@@ -1673,8 +1717,8 @@ struct packet_offload {
1673#define NETDEV_CHANGEUPPER 0x0015 1717#define NETDEV_CHANGEUPPER 0x0015
1674#define NETDEV_RESEND_IGMP 0x0016 1718#define NETDEV_RESEND_IGMP 0x0016
1675 1719
1676extern int register_netdevice_notifier(struct notifier_block *nb); 1720int register_netdevice_notifier(struct notifier_block *nb);
1677extern int unregister_netdevice_notifier(struct notifier_block *nb); 1721int unregister_netdevice_notifier(struct notifier_block *nb);
1678 1722
1679struct netdev_notifier_info { 1723struct netdev_notifier_info {
1680 struct net_device *dev; 1724 struct net_device *dev;
@@ -1697,9 +1741,9 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1697 return info->dev; 1741 return info->dev;
1698} 1742}
1699 1743
1700extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, 1744int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1701 struct netdev_notifier_info *info); 1745 struct netdev_notifier_info *info);
1702extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 1746int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1703 1747
1704 1748
1705extern rwlock_t dev_base_lock; /* Device list lock */ 1749extern rwlock_t dev_base_lock; /* Device list lock */
@@ -1754,54 +1798,54 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
1754 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1798 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1755} 1799}
1756 1800
1757extern int netdev_boot_setup_check(struct net_device *dev); 1801int netdev_boot_setup_check(struct net_device *dev);
1758extern unsigned long netdev_boot_base(const char *prefix, int unit); 1802unsigned long netdev_boot_base(const char *prefix, int unit);
1759extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 1803struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1760 const char *hwaddr); 1804 const char *hwaddr);
1761extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1805struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1762extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1806struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1763extern void dev_add_pack(struct packet_type *pt); 1807void dev_add_pack(struct packet_type *pt);
1764extern void dev_remove_pack(struct packet_type *pt); 1808void dev_remove_pack(struct packet_type *pt);
1765extern void __dev_remove_pack(struct packet_type *pt); 1809void __dev_remove_pack(struct packet_type *pt);
1766extern void dev_add_offload(struct packet_offload *po); 1810void dev_add_offload(struct packet_offload *po);
1767extern void dev_remove_offload(struct packet_offload *po); 1811void dev_remove_offload(struct packet_offload *po);
1768extern void __dev_remove_offload(struct packet_offload *po); 1812void __dev_remove_offload(struct packet_offload *po);
1769 1813
1770extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, 1814struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1771 unsigned short mask); 1815 unsigned short mask);
1772extern struct net_device *dev_get_by_name(struct net *net, const char *name); 1816struct net_device *dev_get_by_name(struct net *net, const char *name);
1773extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 1817struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1774extern struct net_device *__dev_get_by_name(struct net *net, const char *name); 1818struct net_device *__dev_get_by_name(struct net *net, const char *name);
1775extern int dev_alloc_name(struct net_device *dev, const char *name); 1819int dev_alloc_name(struct net_device *dev, const char *name);
1776extern int dev_open(struct net_device *dev); 1820int dev_open(struct net_device *dev);
1777extern int dev_close(struct net_device *dev); 1821int dev_close(struct net_device *dev);
1778extern void dev_disable_lro(struct net_device *dev); 1822void dev_disable_lro(struct net_device *dev);
1779extern int dev_loopback_xmit(struct sk_buff *newskb); 1823int dev_loopback_xmit(struct sk_buff *newskb);
1780extern int dev_queue_xmit(struct sk_buff *skb); 1824int dev_queue_xmit(struct sk_buff *skb);
1781extern int register_netdevice(struct net_device *dev); 1825int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
1782extern void unregister_netdevice_queue(struct net_device *dev, 1826int register_netdevice(struct net_device *dev);
1783 struct list_head *head); 1827void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
1784extern void unregister_netdevice_many(struct list_head *head); 1828void unregister_netdevice_many(struct list_head *head);
1785static inline void unregister_netdevice(struct net_device *dev) 1829static inline void unregister_netdevice(struct net_device *dev)
1786{ 1830{
1787 unregister_netdevice_queue(dev, NULL); 1831 unregister_netdevice_queue(dev, NULL);
1788} 1832}
1789 1833
1790extern int netdev_refcnt_read(const struct net_device *dev); 1834int netdev_refcnt_read(const struct net_device *dev);
1791extern void free_netdev(struct net_device *dev); 1835void free_netdev(struct net_device *dev);
1792extern void synchronize_net(void); 1836void netdev_freemem(struct net_device *dev);
1793extern int init_dummy_netdev(struct net_device *dev); 1837void synchronize_net(void);
1838int init_dummy_netdev(struct net_device *dev);
1794 1839
1795extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1840struct net_device *dev_get_by_index(struct net *net, int ifindex);
1796extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1841struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1797extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1842struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1798extern int netdev_get_name(struct net *net, char *name, int ifindex); 1843int netdev_get_name(struct net *net, char *name, int ifindex);
1799extern int dev_restart(struct net_device *dev); 1844int dev_restart(struct net_device *dev);
1800#ifdef CONFIG_NETPOLL_TRAP 1845#ifdef CONFIG_NETPOLL_TRAP
1801extern int netpoll_trap(void); 1846int netpoll_trap(void);
1802#endif 1847#endif
1803extern int skb_gro_receive(struct sk_buff **head, 1848int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1804 struct sk_buff *skb);
1805 1849
1806static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 1850static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1807{ 1851{
@@ -1872,8 +1916,17 @@ static inline int dev_parse_header(const struct sk_buff *skb,
1872 return dev->header_ops->parse(skb, haddr); 1916 return dev->header_ops->parse(skb, haddr);
1873} 1917}
1874 1918
1919static inline int dev_rebuild_header(struct sk_buff *skb)
1920{
1921 const struct net_device *dev = skb->dev;
1922
1923 if (!dev->header_ops || !dev->header_ops->rebuild)
1924 return 0;
1925 return dev->header_ops->rebuild(skb);
1926}
1927
1875typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 1928typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1876extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); 1929int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
1877static inline int unregister_gifconf(unsigned int family) 1930static inline int unregister_gifconf(unsigned int family)
1878{ 1931{
1879 return register_gifconf(family, NULL); 1932 return register_gifconf(family, NULL);
@@ -1944,7 +1997,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1944 1997
1945DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 1998DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1946 1999
1947extern void __netif_schedule(struct Qdisc *q); 2000void __netif_schedule(struct Qdisc *q);
1948 2001
1949static inline void netif_schedule_queue(struct netdev_queue *txq) 2002static inline void netif_schedule_queue(struct netdev_queue *txq)
1950{ 2003{
@@ -2264,9 +2317,8 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2264} 2317}
2265 2318
2266#ifdef CONFIG_XPS 2319#ifdef CONFIG_XPS
2267extern int netif_set_xps_queue(struct net_device *dev, 2320int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2268 const struct cpumask *mask, 2321 u16 index);
2269 u16 index);
2270#else 2322#else
2271static inline int netif_set_xps_queue(struct net_device *dev, 2323static inline int netif_set_xps_queue(struct net_device *dev,
2272 const struct cpumask *mask, 2324 const struct cpumask *mask,
@@ -2297,12 +2349,10 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
2297 return dev->num_tx_queues > 1; 2349 return dev->num_tx_queues > 1;
2298} 2350}
2299 2351
2300extern int netif_set_real_num_tx_queues(struct net_device *dev, 2352int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2301 unsigned int txq);
2302 2353
2303#ifdef CONFIG_RPS 2354#ifdef CONFIG_RPS
2304extern int netif_set_real_num_rx_queues(struct net_device *dev, 2355int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2305 unsigned int rxq);
2306#else 2356#else
2307static inline int netif_set_real_num_rx_queues(struct net_device *dev, 2357static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2308 unsigned int rxq) 2358 unsigned int rxq)
@@ -2329,28 +2379,27 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2329} 2379}
2330 2380
2331#define DEFAULT_MAX_NUM_RSS_QUEUES (8) 2381#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2332extern int netif_get_num_default_rss_queues(void); 2382int netif_get_num_default_rss_queues(void);
2333 2383
2334/* Use this variant when it is known for sure that it 2384/* Use this variant when it is known for sure that it
2335 * is executing from hardware interrupt context or with hardware interrupts 2385 * is executing from hardware interrupt context or with hardware interrupts
2336 * disabled. 2386 * disabled.
2337 */ 2387 */
2338extern void dev_kfree_skb_irq(struct sk_buff *skb); 2388void dev_kfree_skb_irq(struct sk_buff *skb);
2339 2389
2340/* Use this variant in places where it could be invoked 2390/* Use this variant in places where it could be invoked
2341 * from either hardware interrupt or other context, with hardware interrupts 2391 * from either hardware interrupt or other context, with hardware interrupts
2342 * either disabled or enabled. 2392 * either disabled or enabled.
2343 */ 2393 */
2344extern void dev_kfree_skb_any(struct sk_buff *skb); 2394void dev_kfree_skb_any(struct sk_buff *skb);
2345 2395
2346extern int netif_rx(struct sk_buff *skb); 2396int netif_rx(struct sk_buff *skb);
2347extern int netif_rx_ni(struct sk_buff *skb); 2397int netif_rx_ni(struct sk_buff *skb);
2348extern int netif_receive_skb(struct sk_buff *skb); 2398int netif_receive_skb(struct sk_buff *skb);
2349extern gro_result_t napi_gro_receive(struct napi_struct *napi, 2399gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2350 struct sk_buff *skb); 2400void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2351extern void napi_gro_flush(struct napi_struct *napi, bool flush_old); 2401struct sk_buff *napi_get_frags(struct napi_struct *napi);
2352extern struct sk_buff * napi_get_frags(struct napi_struct *napi); 2402gro_result_t napi_gro_frags(struct napi_struct *napi);
2353extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2354 2403
2355static inline void napi_free_frags(struct napi_struct *napi) 2404static inline void napi_free_frags(struct napi_struct *napi)
2356{ 2405{
@@ -2358,40 +2407,36 @@ static inline void napi_free_frags(struct napi_struct *napi)
2358 napi->skb = NULL; 2407 napi->skb = NULL;
2359} 2408}
2360 2409
2361extern int netdev_rx_handler_register(struct net_device *dev, 2410int netdev_rx_handler_register(struct net_device *dev,
2362 rx_handler_func_t *rx_handler, 2411 rx_handler_func_t *rx_handler,
2363 void *rx_handler_data); 2412 void *rx_handler_data);
2364extern void netdev_rx_handler_unregister(struct net_device *dev); 2413void netdev_rx_handler_unregister(struct net_device *dev);
2365 2414
2366extern bool dev_valid_name(const char *name); 2415bool dev_valid_name(const char *name);
2367extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 2416int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2368extern int dev_ethtool(struct net *net, struct ifreq *); 2417int dev_ethtool(struct net *net, struct ifreq *);
2369extern unsigned int dev_get_flags(const struct net_device *); 2418unsigned int dev_get_flags(const struct net_device *);
2370extern int __dev_change_flags(struct net_device *, unsigned int flags); 2419int __dev_change_flags(struct net_device *, unsigned int flags);
2371extern int dev_change_flags(struct net_device *, unsigned int); 2420int dev_change_flags(struct net_device *, unsigned int);
2372extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); 2421void __dev_notify_flags(struct net_device *, unsigned int old_flags,
2373extern int dev_change_name(struct net_device *, const char *); 2422 unsigned int gchanges);
2374extern int dev_set_alias(struct net_device *, const char *, size_t); 2423int dev_change_name(struct net_device *, const char *);
2375extern int dev_change_net_namespace(struct net_device *, 2424int dev_set_alias(struct net_device *, const char *, size_t);
2376 struct net *, const char *); 2425int dev_change_net_namespace(struct net_device *, struct net *, const char *);
2377extern int dev_set_mtu(struct net_device *, int); 2426int dev_set_mtu(struct net_device *, int);
2378extern void dev_set_group(struct net_device *, int); 2427void dev_set_group(struct net_device *, int);
2379extern int dev_set_mac_address(struct net_device *, 2428int dev_set_mac_address(struct net_device *, struct sockaddr *);
2380 struct sockaddr *); 2429int dev_change_carrier(struct net_device *, bool new_carrier);
2381extern int dev_change_carrier(struct net_device *, 2430int dev_get_phys_port_id(struct net_device *dev,
2382 bool new_carrier); 2431 struct netdev_phys_port_id *ppid);
2383extern int dev_get_phys_port_id(struct net_device *dev, 2432int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2384 struct netdev_phys_port_id *ppid); 2433 struct netdev_queue *txq);
2385extern int dev_hard_start_xmit(struct sk_buff *skb, 2434int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2386 struct net_device *dev,
2387 struct netdev_queue *txq);
2388extern int dev_forward_skb(struct net_device *dev,
2389 struct sk_buff *skb);
2390 2435
2391extern int netdev_budget; 2436extern int netdev_budget;
2392 2437
2393/* Called by rtnetlink.c:rtnl_unlock() */ 2438/* Called by rtnetlink.c:rtnl_unlock() */
2394extern void netdev_run_todo(void); 2439void netdev_run_todo(void);
2395 2440
2396/** 2441/**
2397 * dev_put - release reference to device 2442 * dev_put - release reference to device
@@ -2424,9 +2469,9 @@ static inline void dev_hold(struct net_device *dev)
2424 * kind of lower layer not just hardware media. 2469 * kind of lower layer not just hardware media.
2425 */ 2470 */
2426 2471
2427extern void linkwatch_init_dev(struct net_device *dev); 2472void linkwatch_init_dev(struct net_device *dev);
2428extern void linkwatch_fire_event(struct net_device *dev); 2473void linkwatch_fire_event(struct net_device *dev);
2429extern void linkwatch_forget_dev(struct net_device *dev); 2474void linkwatch_forget_dev(struct net_device *dev);
2430 2475
2431/** 2476/**
2432 * netif_carrier_ok - test if carrier present 2477 * netif_carrier_ok - test if carrier present
@@ -2439,13 +2484,13 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
2439 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 2484 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2440} 2485}
2441 2486
2442extern unsigned long dev_trans_start(struct net_device *dev); 2487unsigned long dev_trans_start(struct net_device *dev);
2443 2488
2444extern void __netdev_watchdog_up(struct net_device *dev); 2489void __netdev_watchdog_up(struct net_device *dev);
2445 2490
2446extern void netif_carrier_on(struct net_device *dev); 2491void netif_carrier_on(struct net_device *dev);
2447 2492
2448extern void netif_carrier_off(struct net_device *dev); 2493void netif_carrier_off(struct net_device *dev);
2449 2494
2450/** 2495/**
2451 * netif_dormant_on - mark device as dormant. 2496 * netif_dormant_on - mark device as dormant.
@@ -2513,9 +2558,9 @@ static inline bool netif_device_present(struct net_device *dev)
2513 return test_bit(__LINK_STATE_PRESENT, &dev->state); 2558 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2514} 2559}
2515 2560
2516extern void netif_device_detach(struct net_device *dev); 2561void netif_device_detach(struct net_device *dev);
2517 2562
2518extern void netif_device_attach(struct net_device *dev); 2563void netif_device_attach(struct net_device *dev);
2519 2564
2520/* 2565/*
2521 * Network interface message level settings 2566 * Network interface message level settings
@@ -2724,119 +2769,138 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
2724 2769
2725/* These functions live elsewhere (drivers/net/net_init.c, but related) */ 2770/* These functions live elsewhere (drivers/net/net_init.c, but related) */
2726 2771
2727extern void ether_setup(struct net_device *dev); 2772void ether_setup(struct net_device *dev);
2728 2773
2729/* Support for loadable net-drivers */ 2774/* Support for loadable net-drivers */
2730extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 2775struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2731 void (*setup)(struct net_device *), 2776 void (*setup)(struct net_device *),
2732 unsigned int txqs, unsigned int rxqs); 2777 unsigned int txqs, unsigned int rxqs);
2733#define alloc_netdev(sizeof_priv, name, setup) \ 2778#define alloc_netdev(sizeof_priv, name, setup) \
2734 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) 2779 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2735 2780
2736#define alloc_netdev_mq(sizeof_priv, name, setup, count) \ 2781#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2737 alloc_netdev_mqs(sizeof_priv, name, setup, count, count) 2782 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2738 2783
2739extern int register_netdev(struct net_device *dev); 2784int register_netdev(struct net_device *dev);
2740extern void unregister_netdev(struct net_device *dev); 2785void unregister_netdev(struct net_device *dev);
2741 2786
2742/* General hardware address lists handling functions */ 2787/* General hardware address lists handling functions */
2743extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, 2788int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2744 struct netdev_hw_addr_list *from_list, 2789 struct netdev_hw_addr_list *from_list,
2745 int addr_len, unsigned char addr_type); 2790 int addr_len, unsigned char addr_type);
2746extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, 2791void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2747 struct netdev_hw_addr_list *from_list, 2792 struct netdev_hw_addr_list *from_list,
2748 int addr_len, unsigned char addr_type); 2793 int addr_len, unsigned char addr_type);
2749extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 2794int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2750 struct netdev_hw_addr_list *from_list, 2795 struct netdev_hw_addr_list *from_list, int addr_len);
2751 int addr_len); 2796void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2752extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 2797 struct netdev_hw_addr_list *from_list, int addr_len);
2753 struct netdev_hw_addr_list *from_list, 2798void __hw_addr_flush(struct netdev_hw_addr_list *list);
2754 int addr_len); 2799void __hw_addr_init(struct netdev_hw_addr_list *list);
2755extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2756extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2757 2800
2758/* Functions used for device addresses handling */ 2801/* Functions used for device addresses handling */
2759extern int dev_addr_add(struct net_device *dev, const unsigned char *addr, 2802int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2760 unsigned char addr_type); 2803 unsigned char addr_type);
2761extern int dev_addr_del(struct net_device *dev, const unsigned char *addr, 2804int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2762 unsigned char addr_type); 2805 unsigned char addr_type);
2763extern int dev_addr_add_multiple(struct net_device *to_dev, 2806int dev_addr_add_multiple(struct net_device *to_dev,
2764 struct net_device *from_dev, 2807 struct net_device *from_dev, unsigned char addr_type);
2765 unsigned char addr_type); 2808int dev_addr_del_multiple(struct net_device *to_dev,
2766extern int dev_addr_del_multiple(struct net_device *to_dev, 2809 struct net_device *from_dev, unsigned char addr_type);
2767 struct net_device *from_dev, 2810void dev_addr_flush(struct net_device *dev);
2768 unsigned char addr_type); 2811int dev_addr_init(struct net_device *dev);
2769extern void dev_addr_flush(struct net_device *dev);
2770extern int dev_addr_init(struct net_device *dev);
2771 2812
2772/* Functions used for unicast addresses handling */ 2813/* Functions used for unicast addresses handling */
2773extern int dev_uc_add(struct net_device *dev, const unsigned char *addr); 2814int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2774extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 2815int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2775extern int dev_uc_del(struct net_device *dev, const unsigned char *addr); 2816int dev_uc_del(struct net_device *dev, const unsigned char *addr);
2776extern int dev_uc_sync(struct net_device *to, struct net_device *from); 2817int dev_uc_sync(struct net_device *to, struct net_device *from);
2777extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 2818int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
2778extern void dev_uc_unsync(struct net_device *to, struct net_device *from); 2819void dev_uc_unsync(struct net_device *to, struct net_device *from);
2779extern void dev_uc_flush(struct net_device *dev); 2820void dev_uc_flush(struct net_device *dev);
2780extern void dev_uc_init(struct net_device *dev); 2821void dev_uc_init(struct net_device *dev);
2781 2822
2782/* Functions used for multicast addresses handling */ 2823/* Functions used for multicast addresses handling */
2783extern int dev_mc_add(struct net_device *dev, const unsigned char *addr); 2824int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2784extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 2825int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2785extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 2826int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2786extern int dev_mc_del(struct net_device *dev, const unsigned char *addr); 2827int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2787extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 2828int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
2788extern int dev_mc_sync(struct net_device *to, struct net_device *from); 2829int dev_mc_sync(struct net_device *to, struct net_device *from);
2789extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 2830int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
2790extern void dev_mc_unsync(struct net_device *to, struct net_device *from); 2831void dev_mc_unsync(struct net_device *to, struct net_device *from);
2791extern void dev_mc_flush(struct net_device *dev); 2832void dev_mc_flush(struct net_device *dev);
2792extern void dev_mc_init(struct net_device *dev); 2833void dev_mc_init(struct net_device *dev);
2793 2834
2794/* Functions used for secondary unicast and multicast support */ 2835/* Functions used for secondary unicast and multicast support */
2795extern void dev_set_rx_mode(struct net_device *dev); 2836void dev_set_rx_mode(struct net_device *dev);
2796extern void __dev_set_rx_mode(struct net_device *dev); 2837void __dev_set_rx_mode(struct net_device *dev);
2797extern int dev_set_promiscuity(struct net_device *dev, int inc); 2838int dev_set_promiscuity(struct net_device *dev, int inc);
2798extern int dev_set_allmulti(struct net_device *dev, int inc); 2839int dev_set_allmulti(struct net_device *dev, int inc);
2799extern void netdev_state_change(struct net_device *dev); 2840void netdev_state_change(struct net_device *dev);
2800extern void netdev_notify_peers(struct net_device *dev); 2841void netdev_notify_peers(struct net_device *dev);
2801extern void netdev_features_change(struct net_device *dev); 2842void netdev_features_change(struct net_device *dev);
2802/* Load a device via the kmod */ 2843/* Load a device via the kmod */
2803extern void dev_load(struct net *net, const char *name); 2844void dev_load(struct net *net, const char *name);
2804extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 2845struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2805 struct rtnl_link_stats64 *storage); 2846 struct rtnl_link_stats64 *storage);
2806extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 2847void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2807 const struct net_device_stats *netdev_stats); 2848 const struct net_device_stats *netdev_stats);
2808 2849
2809extern int netdev_max_backlog; 2850extern int netdev_max_backlog;
2810extern int netdev_tstamp_prequeue; 2851extern int netdev_tstamp_prequeue;
2811extern int weight_p; 2852extern int weight_p;
2812extern int bpf_jit_enable; 2853extern int bpf_jit_enable;
2813 2854
2814extern bool netdev_has_upper_dev(struct net_device *dev, 2855bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
2815 struct net_device *upper_dev); 2856bool netdev_has_any_upper_dev(struct net_device *dev);
2816extern bool netdev_has_any_upper_dev(struct net_device *dev); 2857struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
2817extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 2858 struct list_head **iter);
2818 struct list_head **iter);
2819 2859
2820/* iterate through upper list, must be called under RCU read lock */ 2860/* iterate through upper list, must be called under RCU read lock */
2821#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \ 2861#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
2822 for (iter = &(dev)->upper_dev_list, \ 2862 for (iter = &(dev)->all_adj_list.upper, \
2823 upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 2863 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
2824 upper; \ 2864 updev; \
2825 upper = netdev_upper_get_next_dev_rcu(dev, &(iter))) 2865 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
2826 2866
2827extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 2867void *netdev_lower_get_next_private(struct net_device *dev,
2828extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 2868 struct list_head **iter);
2829extern int netdev_upper_dev_link(struct net_device *dev, 2869void *netdev_lower_get_next_private_rcu(struct net_device *dev,
2870 struct list_head **iter);
2871
2872#define netdev_for_each_lower_private(dev, priv, iter) \
2873 for (iter = (dev)->adj_list.lower.next, \
2874 priv = netdev_lower_get_next_private(dev, &(iter)); \
2875 priv; \
2876 priv = netdev_lower_get_next_private(dev, &(iter)))
2877
2878#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
2879 for (iter = &(dev)->adj_list.lower, \
2880 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
2881 priv; \
2882 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
2883
2884void *netdev_adjacent_get_private(struct list_head *adj_list);
2885struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2886struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2887int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
2888int netdev_master_upper_dev_link(struct net_device *dev,
2830 struct net_device *upper_dev); 2889 struct net_device *upper_dev);
2831extern int netdev_master_upper_dev_link(struct net_device *dev, 2890int netdev_master_upper_dev_link_private(struct net_device *dev,
2832 struct net_device *upper_dev); 2891 struct net_device *upper_dev,
2833extern void netdev_upper_dev_unlink(struct net_device *dev, 2892 void *private);
2834 struct net_device *upper_dev); 2893void netdev_upper_dev_unlink(struct net_device *dev,
2835extern int skb_checksum_help(struct sk_buff *skb); 2894 struct net_device *upper_dev);
2836extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2895void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
2837 netdev_features_t features, bool tx_path); 2896 struct net_device *lower_dev);
2838extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 2897void *netdev_lower_dev_get_private(struct net_device *dev,
2839 netdev_features_t features); 2898 struct net_device *lower_dev);
2899int skb_checksum_help(struct sk_buff *skb);
2900struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2901 netdev_features_t features, bool tx_path);
2902struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2903 netdev_features_t features);
2840 2904
2841static inline 2905static inline
2842struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 2906struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
@@ -2858,30 +2922,42 @@ static inline bool can_checksum_protocol(netdev_features_t features,
2858} 2922}
2859 2923
2860#ifdef CONFIG_BUG 2924#ifdef CONFIG_BUG
2861extern void netdev_rx_csum_fault(struct net_device *dev); 2925void netdev_rx_csum_fault(struct net_device *dev);
2862#else 2926#else
2863static inline void netdev_rx_csum_fault(struct net_device *dev) 2927static inline void netdev_rx_csum_fault(struct net_device *dev)
2864{ 2928{
2865} 2929}
2866#endif 2930#endif
2867/* rx skb timestamps */ 2931/* rx skb timestamps */
2868extern void net_enable_timestamp(void); 2932void net_enable_timestamp(void);
2869extern void net_disable_timestamp(void); 2933void net_disable_timestamp(void);
2870 2934
2871#ifdef CONFIG_PROC_FS 2935#ifdef CONFIG_PROC_FS
2872extern int __init dev_proc_init(void); 2936int __init dev_proc_init(void);
2873#else 2937#else
2874#define dev_proc_init() 0 2938#define dev_proc_init() 0
2875#endif 2939#endif
2876 2940
2877extern int netdev_class_create_file(struct class_attribute *class_attr); 2941int netdev_class_create_file_ns(struct class_attribute *class_attr,
2878extern void netdev_class_remove_file(struct class_attribute *class_attr); 2942 const void *ns);
2943void netdev_class_remove_file_ns(struct class_attribute *class_attr,
2944 const void *ns);
2945
2946static inline int netdev_class_create_file(struct class_attribute *class_attr)
2947{
2948 return netdev_class_create_file_ns(class_attr, NULL);
2949}
2950
2951static inline void netdev_class_remove_file(struct class_attribute *class_attr)
2952{
2953 netdev_class_remove_file_ns(class_attr, NULL);
2954}
2879 2955
2880extern struct kobj_ns_type_operations net_ns_type_operations; 2956extern struct kobj_ns_type_operations net_ns_type_operations;
2881 2957
2882extern const char *netdev_drivername(const struct net_device *dev); 2958const char *netdev_drivername(const struct net_device *dev);
2883 2959
2884extern void linkwatch_run_queue(void); 2960void linkwatch_run_queue(void);
2885 2961
2886static inline netdev_features_t netdev_get_wanted_features( 2962static inline netdev_features_t netdev_get_wanted_features(
2887 struct net_device *dev) 2963 struct net_device *dev)
@@ -2945,6 +3021,24 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
2945 dev->gso_max_size = size; 3021 dev->gso_max_size = size;
2946} 3022}
2947 3023
3024static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3025 int pulled_hlen, u16 mac_offset,
3026 int mac_len)
3027{
3028 skb->protocol = protocol;
3029 skb->encapsulation = 1;
3030 skb_push(skb, pulled_hlen);
3031 skb_reset_transport_header(skb);
3032 skb->mac_header = mac_offset;
3033 skb->network_header = skb->mac_header + mac_len;
3034 skb->mac_len = mac_len;
3035}
3036
3037static inline bool netif_is_macvlan(struct net_device *dev)
3038{
3039 return dev->priv_flags & IFF_MACVLAN;
3040}
3041
2948static inline bool netif_is_bond_master(struct net_device *dev) 3042static inline bool netif_is_bond_master(struct net_device *dev)
2949{ 3043{
2950 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 3044 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
@@ -2973,22 +3067,22 @@ static inline const char *netdev_name(const struct net_device *dev)
2973 return dev->name; 3067 return dev->name;
2974} 3068}
2975 3069
2976extern __printf(3, 4) 3070__printf(3, 4)
2977int netdev_printk(const char *level, const struct net_device *dev, 3071int netdev_printk(const char *level, const struct net_device *dev,
2978 const char *format, ...); 3072 const char *format, ...);
2979extern __printf(2, 3) 3073__printf(2, 3)
2980int netdev_emerg(const struct net_device *dev, const char *format, ...); 3074int netdev_emerg(const struct net_device *dev, const char *format, ...);
2981extern __printf(2, 3) 3075__printf(2, 3)
2982int netdev_alert(const struct net_device *dev, const char *format, ...); 3076int netdev_alert(const struct net_device *dev, const char *format, ...);
2983extern __printf(2, 3) 3077__printf(2, 3)
2984int netdev_crit(const struct net_device *dev, const char *format, ...); 3078int netdev_crit(const struct net_device *dev, const char *format, ...);
2985extern __printf(2, 3) 3079__printf(2, 3)
2986int netdev_err(const struct net_device *dev, const char *format, ...); 3080int netdev_err(const struct net_device *dev, const char *format, ...);
2987extern __printf(2, 3) 3081__printf(2, 3)
2988int netdev_warn(const struct net_device *dev, const char *format, ...); 3082int netdev_warn(const struct net_device *dev, const char *format, ...);
2989extern __printf(2, 3) 3083__printf(2, 3)
2990int netdev_notice(const struct net_device *dev, const char *format, ...); 3084int netdev_notice(const struct net_device *dev, const char *format, ...);
2991extern __printf(2, 3) 3085__printf(2, 3)
2992int netdev_info(const struct net_device *dev, const char *format, ...); 3086int netdev_info(const struct net_device *dev, const char *format, ...);
2993 3087
2994#define MODULE_ALIAS_NETDEV(device) \ 3088#define MODULE_ALIAS_NETDEV(device) \
@@ -3029,7 +3123,7 @@ do { \
3029 * file/line information and a backtrace. 3123 * file/line information and a backtrace.
3030 */ 3124 */
3031#define netdev_WARN(dev, format, args...) \ 3125#define netdev_WARN(dev, format, args...) \
3032 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); 3126 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
3033 3127
3034/* netif printk helpers, similar to netdev_printk */ 3128/* netif printk helpers, similar to netdev_printk */
3035 3129
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 708fe72ab913..2077489f9887 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -35,14 +35,15 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
35 result->all[3] = a1->all[3] & mask->all[3]; 35 result->all[3] = a1->all[3] & mask->all[3];
36} 36}
37 37
38extern int netfilter_init(void); 38int netfilter_init(void);
39 39
40/* Largest hook number + 1 */ 40/* Largest hook number + 1 */
41#define NF_MAX_HOOKS 8 41#define NF_MAX_HOOKS 8
42 42
43struct sk_buff; 43struct sk_buff;
44 44
45typedef unsigned int nf_hookfn(unsigned int hooknum, 45struct nf_hook_ops;
46typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
46 struct sk_buff *skb, 47 struct sk_buff *skb,
47 const struct net_device *in, 48 const struct net_device *in,
48 const struct net_device *out, 49 const struct net_device *out,
@@ -52,12 +53,13 @@ struct nf_hook_ops {
52 struct list_head list; 53 struct list_head list;
53 54
54 /* User fills in from here down. */ 55 /* User fills in from here down. */
55 nf_hookfn *hook; 56 nf_hookfn *hook;
56 struct module *owner; 57 struct module *owner;
57 u_int8_t pf; 58 void *priv;
58 unsigned int hooknum; 59 u_int8_t pf;
60 unsigned int hooknum;
59 /* Hooks are ordered in ascending priority. */ 61 /* Hooks are ordered in ascending priority. */
60 int priority; 62 int priority;
61}; 63};
62 64
63struct nf_sockopt_ops { 65struct nf_sockopt_ops {
@@ -208,7 +210,7 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
208/* Call this before modifying an existing packet: ensures it is 210/* Call this before modifying an existing packet: ensures it is
209 modifiable and linear to the point you care about (writable_len). 211 modifiable and linear to the point you care about (writable_len).
210 Returns true or false. */ 212 Returns true or false. */
211extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); 213int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
212 214
213struct flowi; 215struct flowi;
214struct nf_queue_entry; 216struct nf_queue_entry;
@@ -269,8 +271,8 @@ nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
269 return csum; 271 return csum;
270} 272}
271 273
272extern int nf_register_afinfo(const struct nf_afinfo *afinfo); 274int nf_register_afinfo(const struct nf_afinfo *afinfo);
273extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); 275void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
274 276
275#include <net/flow.h> 277#include <net/flow.h>
276extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); 278extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
@@ -315,7 +317,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
315 317
316#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 318#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
317extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; 319extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
318extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *); 320void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
319extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; 321extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
320 322
321struct nf_conn; 323struct nf_conn;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 9ac9fbde7b61..c7174b816674 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -49,31 +49,68 @@ enum ip_set_feature {
49 49
50/* Set extensions */ 50/* Set extensions */
51enum ip_set_extension { 51enum ip_set_extension {
52 IPSET_EXT_NONE = 0, 52 IPSET_EXT_BIT_TIMEOUT = 0,
53 IPSET_EXT_BIT_TIMEOUT = 1,
54 IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT), 53 IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
55 IPSET_EXT_BIT_COUNTER = 2, 54 IPSET_EXT_BIT_COUNTER = 1,
56 IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER), 55 IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
57}; 56 IPSET_EXT_BIT_COMMENT = 2,
58 57 IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
59/* Extension offsets */ 58 /* Mark set with an extension which needs to call destroy */
60enum ip_set_offset { 59 IPSET_EXT_BIT_DESTROY = 7,
61 IPSET_OFFSET_TIMEOUT = 0, 60 IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
62 IPSET_OFFSET_COUNTER,
63 IPSET_OFFSET_MAX,
64}; 61};
65 62
66#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) 63#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
67#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER) 64#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
65#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
66
67/* Extension id, in size order */
68enum ip_set_ext_id {
69 IPSET_EXT_ID_COUNTER = 0,
70 IPSET_EXT_ID_TIMEOUT,
71 IPSET_EXT_ID_COMMENT,
72 IPSET_EXT_ID_MAX,
73};
74
75/* Extension type */
76struct ip_set_ext_type {
77 /* Destroy extension private data (can be NULL) */
78 void (*destroy)(void *ext);
79 enum ip_set_extension type;
80 enum ipset_cadt_flags flag;
81 /* Size and minimal alignment */
82 u8 len;
83 u8 align;
84};
85
86extern const struct ip_set_ext_type ip_set_extensions[];
68 87
69struct ip_set_ext { 88struct ip_set_ext {
70 unsigned long timeout;
71 u64 packets; 89 u64 packets;
72 u64 bytes; 90 u64 bytes;
91 u32 timeout;
92 char *comment;
93};
94
95struct ip_set_counter {
96 atomic64_t bytes;
97 atomic64_t packets;
98};
99
100struct ip_set_comment {
101 char *str;
73}; 102};
74 103
75struct ip_set; 104struct ip_set;
76 105
106#define ext_timeout(e, s) \
107(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
108#define ext_counter(e, s) \
109(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
110#define ext_comment(e, s) \
111(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
112
113
77typedef int (*ipset_adtfn)(struct ip_set *set, void *value, 114typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
78 const struct ip_set_ext *ext, 115 const struct ip_set_ext *ext,
79 struct ip_set_ext *mext, u32 cmdflags); 116 struct ip_set_ext *mext, u32 cmdflags);
@@ -147,7 +184,8 @@ struct ip_set_type {
147 u8 revision_min, revision_max; 184 u8 revision_min, revision_max;
148 185
149 /* Create set */ 186 /* Create set */
150 int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags); 187 int (*create)(struct net *net, struct ip_set *set,
188 struct nlattr *tb[], u32 flags);
151 189
152 /* Attribute policies */ 190 /* Attribute policies */
153 const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1]; 191 const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
@@ -179,14 +217,45 @@ struct ip_set {
179 u8 revision; 217 u8 revision;
180 /* Extensions */ 218 /* Extensions */
181 u8 extensions; 219 u8 extensions;
220 /* Default timeout value, if enabled */
221 u32 timeout;
222 /* Element data size */
223 size_t dsize;
224 /* Offsets to extensions in elements */
225 size_t offset[IPSET_EXT_ID_MAX];
182 /* The type specific data */ 226 /* The type specific data */
183 void *data; 227 void *data;
184}; 228};
185 229
186struct ip_set_counter { 230static inline void
187 atomic64_t bytes; 231ip_set_ext_destroy(struct ip_set *set, void *data)
188 atomic64_t packets; 232{
189}; 233 /* Check that the extension is enabled for the set and
234 * call it's destroy function for its extension part in data.
235 */
236 if (SET_WITH_COMMENT(set))
237 ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
238 ext_comment(data, set));
239}
240
241static inline int
242ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
243{
244 u32 cadt_flags = 0;
245
246 if (SET_WITH_TIMEOUT(set))
247 if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
248 htonl(set->timeout))))
249 return -EMSGSIZE;
250 if (SET_WITH_COUNTER(set))
251 cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
252 if (SET_WITH_COMMENT(set))
253 cadt_flags |= IPSET_FLAG_WITH_COMMENT;
254
255 if (!cadt_flags)
256 return 0;
257 return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
258}
190 259
191static inline void 260static inline void
192ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) 261ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
@@ -247,13 +316,24 @@ ip_set_init_counter(struct ip_set_counter *counter,
247 atomic64_set(&(counter)->packets, (long long)(ext->packets)); 316 atomic64_set(&(counter)->packets, (long long)(ext->packets));
248} 317}
249 318
319/* Netlink CB args */
320enum {
321 IPSET_CB_NET = 0,
322 IPSET_CB_DUMP,
323 IPSET_CB_INDEX,
324 IPSET_CB_ARG0,
325 IPSET_CB_ARG1,
326 IPSET_CB_ARG2,
327};
328
250/* register and unregister set references */ 329/* register and unregister set references */
251extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set); 330extern ip_set_id_t ip_set_get_byname(struct net *net,
252extern void ip_set_put_byindex(ip_set_id_t index); 331 const char *name, struct ip_set **set);
253extern const char *ip_set_name_byindex(ip_set_id_t index); 332extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
254extern ip_set_id_t ip_set_nfnl_get(const char *name); 333extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
255extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index); 334extern ip_set_id_t ip_set_nfnl_get(struct net *net, const char *name);
256extern void ip_set_nfnl_put(ip_set_id_t index); 335extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
336extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
257 337
258/* API for iptables set match, and SET target */ 338/* API for iptables set match, and SET target */
259 339
@@ -272,6 +352,8 @@ extern void *ip_set_alloc(size_t size);
272extern void ip_set_free(void *members); 352extern void ip_set_free(void *members);
273extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); 353extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
274extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); 354extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
355extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
356 size_t len);
275extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], 357extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
276 struct ip_set_ext *ext); 358 struct ip_set_ext *ext);
277 359
@@ -389,13 +471,40 @@ bitmap_bytes(u32 a, u32 b)
389} 471}
390 472
391#include <linux/netfilter/ipset/ip_set_timeout.h> 473#include <linux/netfilter/ipset/ip_set_timeout.h>
474#include <linux/netfilter/ipset/ip_set_comment.h>
475
476static inline int
477ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
478 const void *e, bool active)
479{
480 if (SET_WITH_TIMEOUT(set)) {
481 unsigned long *timeout = ext_timeout(e, set);
482
483 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
484 htonl(active ? ip_set_timeout_get(timeout)
485 : *timeout)))
486 return -EMSGSIZE;
487 }
488 if (SET_WITH_COUNTER(set) &&
489 ip_set_put_counter(skb, ext_counter(e, set)))
490 return -EMSGSIZE;
491 if (SET_WITH_COMMENT(set) &&
492 ip_set_put_comment(skb, ext_comment(e, set)))
493 return -EMSGSIZE;
494 return 0;
495}
392 496
393#define IP_SET_INIT_KEXT(skb, opt, map) \ 497#define IP_SET_INIT_KEXT(skb, opt, set) \
394 { .bytes = (skb)->len, .packets = 1, \ 498 { .bytes = (skb)->len, .packets = 1, \
395 .timeout = ip_set_adt_opt_timeout(opt, map) } 499 .timeout = ip_set_adt_opt_timeout(opt, set) }
396 500
397#define IP_SET_INIT_UEXT(map) \ 501#define IP_SET_INIT_UEXT(set) \
398 { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ 502 { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
399 .timeout = (map)->timeout } 503 .timeout = (set)->timeout }
504
505#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
506
507#define IPSET_CONCAT(a, b) a##b
508#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
400 509
401#endif /*_IP_SET_H */ 510#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
new file mode 100644
index 000000000000..21217ea008d7
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -0,0 +1,57 @@
1#ifndef _IP_SET_COMMENT_H
2#define _IP_SET_COMMENT_H
3
4/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef __KERNEL__
12
13static inline char*
14ip_set_comment_uget(struct nlattr *tb)
15{
16 return nla_data(tb);
17}
18
19static inline void
20ip_set_init_comment(struct ip_set_comment *comment,
21 const struct ip_set_ext *ext)
22{
23 size_t len = ext->comment ? strlen(ext->comment) : 0;
24
25 if (unlikely(comment->str)) {
26 kfree(comment->str);
27 comment->str = NULL;
28 }
29 if (!len)
30 return;
31 if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
32 len = IPSET_MAX_COMMENT_SIZE;
33 comment->str = kzalloc(len + 1, GFP_ATOMIC);
34 if (unlikely(!comment->str))
35 return;
36 strlcpy(comment->str, ext->comment, len + 1);
37}
38
39static inline int
40ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
41{
42 if (!comment->str)
43 return 0;
44 return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
45}
46
47static inline void
48ip_set_comment_free(struct ip_set_comment *comment)
49{
50 if (unlikely(!comment->str))
51 return;
52 kfree(comment->str);
53 comment->str = NULL;
54}
55
56#endif
57#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 3aac04167ca7..83c2f9e0886c 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -23,8 +23,8 @@
23/* Set is defined with timeout support: timeout value may be 0 */ 23/* Set is defined with timeout support: timeout value may be 0 */
24#define IPSET_NO_TIMEOUT UINT_MAX 24#define IPSET_NO_TIMEOUT UINT_MAX
25 25
26#define ip_set_adt_opt_timeout(opt, map) \ 26#define ip_set_adt_opt_timeout(opt, set) \
27((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout) 27((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
28 28
29static inline unsigned int 29static inline unsigned int
30ip_set_timeout_uget(struct nlattr *tb) 30ip_set_timeout_uget(struct nlattr *tb)
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 127d0b90604f..275505792664 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -23,6 +23,6 @@ struct ip_conntrack_stat {
23}; 23};
24 24
25/* call to create an explicit dependency on nf_conntrack. */ 25/* call to create an explicit dependency on nf_conntrack. */
26extern void need_conntrack(void); 26void need_conntrack(void);
27 27
28#endif /* _NF_CONNTRACK_COMMON_H */ 28#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index f381020eee92..858d9b214053 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -29,13 +29,13 @@ struct nf_ct_h323_master {
29 29
30struct nf_conn; 30struct nf_conn;
31 31
32extern int get_h225_addr(struct nf_conn *ct, unsigned char *data, 32int get_h225_addr(struct nf_conn *ct, unsigned char *data,
33 TransportAddress *taddr, 33 TransportAddress *taddr, union nf_inet_addr *addr,
34 union nf_inet_addr *addr, __be16 *port); 34 __be16 *port);
35extern void nf_conntrack_h245_expect(struct nf_conn *new, 35void nf_conntrack_h245_expect(struct nf_conn *new,
36 struct nf_conntrack_expect *this); 36 struct nf_conntrack_expect *this);
37extern void nf_conntrack_q931_expect(struct nf_conn *new, 37void nf_conntrack_q931_expect(struct nf_conn *new,
38 struct nf_conntrack_expect *this); 38 struct nf_conntrack_expect *this);
39extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff, 39extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
40 unsigned char **data, int dataoff, 40 unsigned char **data, int dataoff,
41 H245_TransportAddress *taddr, 41 H245_TransportAddress *taddr,
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 6a0664c0c451..ec2ffaf418c8 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -87,8 +87,8 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
87/* delete keymap entries */ 87/* delete keymap entries */
88void nf_ct_gre_keymap_destroy(struct nf_conn *ct); 88void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
89 89
90extern void nf_ct_gre_keymap_flush(struct net *net); 90void nf_ct_gre_keymap_flush(struct net *net);
91extern void nf_nat_need_gre(void); 91void nf_nat_need_gre(void);
92 92
93#endif /* __KERNEL__ */ 93#endif /* __KERNEL__ */
94#endif /* _CONNTRACK_PROTO_GRE_H */ 94#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index ba7f571a2b1c..d5af3c27fb7d 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -107,85 +107,93 @@ enum sdp_header_types {
107 SDP_HDR_MEDIA, 107 SDP_HDR_MEDIA,
108}; 108};
109 109
110extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, 110struct nf_nat_sip_hooks {
111 unsigned int protoff, 111 unsigned int (*msg)(struct sk_buff *skb,
112 unsigned int dataoff, 112 unsigned int protoff,
113 const char **dptr, 113 unsigned int dataoff,
114 unsigned int *datalen); 114 const char **dptr,
115extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, 115 unsigned int *datalen);
116 unsigned int protoff, s16 off); 116
117extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, 117 void (*seq_adjust)(struct sk_buff *skb,
118 unsigned int protoff, 118 unsigned int protoff, s16 off);
119 unsigned int dataoff, 119
120 const char **dptr, 120 unsigned int (*expect)(struct sk_buff *skb,
121 unsigned int *datalen, 121 unsigned int protoff,
122 struct nf_conntrack_expect *exp, 122 unsigned int dataoff,
123 unsigned int matchoff, 123 const char **dptr,
124 unsigned int matchlen); 124 unsigned int *datalen,
125extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, 125 struct nf_conntrack_expect *exp,
126 unsigned int protoff, 126 unsigned int matchoff,
127 unsigned int dataoff, 127 unsigned int matchlen);
128 const char **dptr, 128
129 unsigned int *datalen, 129 unsigned int (*sdp_addr)(struct sk_buff *skb,
130 unsigned int sdpoff, 130 unsigned int protoff,
131 enum sdp_header_types type, 131 unsigned int dataoff,
132 enum sdp_header_types term, 132 const char **dptr,
133 const union nf_inet_addr *addr); 133 unsigned int *datalen,
134extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, 134 unsigned int sdpoff,
135 unsigned int protoff,
136 unsigned int dataoff,
137 const char **dptr,
138 unsigned int *datalen,
139 unsigned int matchoff,
140 unsigned int matchlen,
141 u_int16_t port);
142extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
143 unsigned int protoff,
144 unsigned int dataoff,
145 const char **dptr,
146 unsigned int *datalen,
147 unsigned int sdpoff,
148 const union nf_inet_addr *addr);
149extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
150 unsigned int protoff,
151 unsigned int dataoff,
152 const char **dptr,
153 unsigned int *datalen,
154 struct nf_conntrack_expect *rtp_exp,
155 struct nf_conntrack_expect *rtcp_exp,
156 unsigned int mediaoff,
157 unsigned int medialen,
158 union nf_inet_addr *rtp_addr);
159
160extern int ct_sip_parse_request(const struct nf_conn *ct,
161 const char *dptr, unsigned int datalen,
162 unsigned int *matchoff, unsigned int *matchlen,
163 union nf_inet_addr *addr, __be16 *port);
164extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
165 unsigned int dataoff, unsigned int datalen,
166 enum sip_header_types type,
167 unsigned int *matchoff, unsigned int *matchlen);
168extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
169 unsigned int *dataoff, unsigned int datalen,
170 enum sip_header_types type, int *in_header,
171 unsigned int *matchoff, unsigned int *matchlen,
172 union nf_inet_addr *addr, __be16 *port);
173extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
174 unsigned int dataoff, unsigned int datalen,
175 const char *name,
176 unsigned int *matchoff, unsigned int *matchlen,
177 union nf_inet_addr *addr, bool delim);
178extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
179 unsigned int off, unsigned int datalen,
180 const char *name,
181 unsigned int *matchoff, unsigned int *matchen,
182 unsigned int *val);
183
184extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
185 unsigned int dataoff, unsigned int datalen,
186 enum sdp_header_types type, 135 enum sdp_header_types type,
187 enum sdp_header_types term, 136 enum sdp_header_types term,
188 unsigned int *matchoff, unsigned int *matchlen); 137 const union nf_inet_addr *addr);
138
139 unsigned int (*sdp_port)(struct sk_buff *skb,
140 unsigned int protoff,
141 unsigned int dataoff,
142 const char **dptr,
143 unsigned int *datalen,
144 unsigned int matchoff,
145 unsigned int matchlen,
146 u_int16_t port);
147
148 unsigned int (*sdp_session)(struct sk_buff *skb,
149 unsigned int protoff,
150 unsigned int dataoff,
151 const char **dptr,
152 unsigned int *datalen,
153 unsigned int sdpoff,
154 const union nf_inet_addr *addr);
155
156 unsigned int (*sdp_media)(struct sk_buff *skb,
157 unsigned int protoff,
158 unsigned int dataoff,
159 const char **dptr,
160 unsigned int *datalen,
161 struct nf_conntrack_expect *rtp_exp,
162 struct nf_conntrack_expect *rtcp_exp,
163 unsigned int mediaoff,
164 unsigned int medialen,
165 union nf_inet_addr *rtp_addr);
166};
167extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
168
169int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
170 unsigned int datalen, unsigned int *matchoff,
171 unsigned int *matchlen, union nf_inet_addr *addr,
172 __be16 *port);
173int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
174 unsigned int dataoff, unsigned int datalen,
175 enum sip_header_types type, unsigned int *matchoff,
176 unsigned int *matchlen);
177int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
178 unsigned int *dataoff, unsigned int datalen,
179 enum sip_header_types type, int *in_header,
180 unsigned int *matchoff, unsigned int *matchlen,
181 union nf_inet_addr *addr, __be16 *port);
182int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
183 unsigned int dataoff, unsigned int datalen,
184 const char *name, unsigned int *matchoff,
185 unsigned int *matchlen, union nf_inet_addr *addr,
186 bool delim);
187int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
188 unsigned int off, unsigned int datalen,
189 const char *name, unsigned int *matchoff,
190 unsigned int *matchen, unsigned int *val);
191
192int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
193 unsigned int dataoff, unsigned int datalen,
194 enum sdp_header_types type,
195 enum sdp_header_types term,
196 unsigned int *matchoff, unsigned int *matchlen);
189 197
190#endif /* __KERNEL__ */ 198#endif /* __KERNEL__ */
191#endif /* __NF_CONNTRACK_SIP_H__ */ 199#endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index cadb7402d7a7..28c74367e900 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,6 +14,9 @@ struct nfnl_callback {
14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb, 14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
15 const struct nlmsghdr *nlh, 15 const struct nlmsghdr *nlh,
16 const struct nlattr * const cda[]); 16 const struct nlattr * const cda[]);
17 int (*call_batch)(struct sock *nl, struct sk_buff *skb,
18 const struct nlmsghdr *nlh,
19 const struct nlattr * const cda[]);
17 const struct nla_policy *policy; /* netlink attribute policy */ 20 const struct nla_policy *policy; /* netlink attribute policy */
18 const u_int16_t attr_count; /* number of nlattr's */ 21 const u_int16_t attr_count; /* number of nlattr's */
19}; 22};
@@ -23,22 +26,24 @@ struct nfnetlink_subsystem {
23 __u8 subsys_id; /* nfnetlink subsystem ID */ 26 __u8 subsys_id; /* nfnetlink subsystem ID */
24 __u8 cb_count; /* number of callbacks */ 27 __u8 cb_count; /* number of callbacks */
25 const struct nfnl_callback *cb; /* callback for individual types */ 28 const struct nfnl_callback *cb; /* callback for individual types */
29 int (*commit)(struct sk_buff *skb);
30 int (*abort)(struct sk_buff *skb);
26}; 31};
27 32
28extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); 33int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
29extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); 34int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
30 35
31extern int nfnetlink_has_listeners(struct net *net, unsigned int group); 36int nfnetlink_has_listeners(struct net *net, unsigned int group);
32extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size, 37struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
33 u32 dst_portid, gfp_t gfp_mask); 38 u32 dst_portid, gfp_t gfp_mask);
34extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 39int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
35 unsigned int group, int echo, gfp_t flags); 40 unsigned int group, int echo, gfp_t flags);
36extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); 41int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
37extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, 42int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
38 u32 portid, int flags); 43 int flags);
39 44
40extern void nfnl_lock(__u8 subsys_id); 45void nfnl_lock(__u8 subsys_id);
41extern void nfnl_unlock(__u8 subsys_id); 46void nfnl_unlock(__u8 subsys_id);
42 47
43#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ 48#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
44 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) 49 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index bb4bbc9b7a18..b2e85e59f760 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -6,8 +6,8 @@
6 6
7struct nf_acct; 7struct nf_acct;
8 8
9extern struct nf_acct *nfnl_acct_find_get(const char *filter_name); 9struct nf_acct *nfnl_acct_find_get(const char *filter_name);
10extern void nfnl_acct_put(struct nf_acct *acct); 10void nfnl_acct_put(struct nf_acct *acct);
11extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); 11void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
12 12
13#endif /* _NFNL_ACCT_H */ 13#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index dd49566315c6..a3e215bb0241 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -229,50 +229,48 @@ struct xt_table_info {
229 229
230#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ 230#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
231 + nr_cpu_ids * sizeof(char *)) 231 + nr_cpu_ids * sizeof(char *))
232extern int xt_register_target(struct xt_target *target); 232int xt_register_target(struct xt_target *target);
233extern void xt_unregister_target(struct xt_target *target); 233void xt_unregister_target(struct xt_target *target);
234extern int xt_register_targets(struct xt_target *target, unsigned int n); 234int xt_register_targets(struct xt_target *target, unsigned int n);
235extern void xt_unregister_targets(struct xt_target *target, unsigned int n); 235void xt_unregister_targets(struct xt_target *target, unsigned int n);
236 236
237extern int xt_register_match(struct xt_match *target); 237int xt_register_match(struct xt_match *target);
238extern void xt_unregister_match(struct xt_match *target); 238void xt_unregister_match(struct xt_match *target);
239extern int xt_register_matches(struct xt_match *match, unsigned int n); 239int xt_register_matches(struct xt_match *match, unsigned int n);
240extern void xt_unregister_matches(struct xt_match *match, unsigned int n); 240void xt_unregister_matches(struct xt_match *match, unsigned int n);
241 241
242extern int xt_check_match(struct xt_mtchk_param *, 242int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
243 unsigned int size, u_int8_t proto, bool inv_proto); 243 bool inv_proto);
244extern int xt_check_target(struct xt_tgchk_param *, 244int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
245 unsigned int size, u_int8_t proto, bool inv_proto); 245 bool inv_proto);
246 246
247extern struct xt_table *xt_register_table(struct net *net, 247struct xt_table *xt_register_table(struct net *net,
248 const struct xt_table *table, 248 const struct xt_table *table,
249 struct xt_table_info *bootstrap, 249 struct xt_table_info *bootstrap,
250 struct xt_table_info *newinfo); 250 struct xt_table_info *newinfo);
251extern void *xt_unregister_table(struct xt_table *table); 251void *xt_unregister_table(struct xt_table *table);
252 252
253extern struct xt_table_info *xt_replace_table(struct xt_table *table, 253struct xt_table_info *xt_replace_table(struct xt_table *table,
254 unsigned int num_counters, 254 unsigned int num_counters,
255 struct xt_table_info *newinfo, 255 struct xt_table_info *newinfo,
256 int *error); 256 int *error);
257 257
258extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); 258struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
259extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); 259struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
260extern struct xt_match *xt_request_find_match(u8 af, const char *name, 260struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
261 u8 revision); 261struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
262extern struct xt_target *xt_request_find_target(u8 af, const char *name, 262int xt_find_revision(u8 af, const char *name, u8 revision, int target,
263 u8 revision); 263 int *err);
264extern int xt_find_revision(u8 af, const char *name, u8 revision, 264
265 int target, int *err); 265struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
266 266 const char *name);
267extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 267void xt_table_unlock(struct xt_table *t);
268 const char *name); 268
269extern void xt_table_unlock(struct xt_table *t); 269int xt_proto_init(struct net *net, u_int8_t af);
270 270void xt_proto_fini(struct net *net, u_int8_t af);
271extern int xt_proto_init(struct net *net, u_int8_t af); 271
272extern void xt_proto_fini(struct net *net, u_int8_t af); 272struct xt_table_info *xt_alloc_table_info(unsigned int size);
273 273void xt_free_table_info(struct xt_table_info *info);
274extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
275extern void xt_free_table_info(struct xt_table_info *info);
276 274
277/** 275/**
278 * xt_recseq - recursive seqcount for netfilter use 276 * xt_recseq - recursive seqcount for netfilter use
@@ -353,8 +351,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
353 return ret; 351 return ret;
354} 352}
355 353
356extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); 354struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
357extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); 355void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
358 356
359#ifdef CONFIG_COMPAT 357#ifdef CONFIG_COMPAT
360#include <net/compat.h> 358#include <net/compat.h>
@@ -414,25 +412,25 @@ struct _compat_xt_align {
414 412
415#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align)) 413#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
416 414
417extern void xt_compat_lock(u_int8_t af); 415void xt_compat_lock(u_int8_t af);
418extern void xt_compat_unlock(u_int8_t af); 416void xt_compat_unlock(u_int8_t af);
419 417
420extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta); 418int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
421extern void xt_compat_flush_offsets(u_int8_t af); 419void xt_compat_flush_offsets(u_int8_t af);
422extern void xt_compat_init_offsets(u_int8_t af, unsigned int number); 420void xt_compat_init_offsets(u_int8_t af, unsigned int number);
423extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset); 421int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
424 422
425extern int xt_compat_match_offset(const struct xt_match *match); 423int xt_compat_match_offset(const struct xt_match *match);
426extern int xt_compat_match_from_user(struct xt_entry_match *m, 424int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
427 void **dstptr, unsigned int *size); 425 unsigned int *size);
428extern int xt_compat_match_to_user(const struct xt_entry_match *m, 426int xt_compat_match_to_user(const struct xt_entry_match *m,
429 void __user **dstptr, unsigned int *size); 427 void __user **dstptr, unsigned int *size);
430 428
431extern int xt_compat_target_offset(const struct xt_target *target); 429int xt_compat_target_offset(const struct xt_target *target);
432extern void xt_compat_target_from_user(struct xt_entry_target *t, 430void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
433 void **dstptr, unsigned int *size); 431 unsigned int *size);
434extern int xt_compat_target_to_user(const struct xt_entry_target *t, 432int xt_compat_target_to_user(const struct xt_entry_target *t,
435 void __user **dstptr, unsigned int *size); 433 void __user **dstptr, unsigned int *size);
436 434
437#endif /* CONFIG_COMPAT */ 435#endif /* CONFIG_COMPAT */
438#endif /* _X_TABLES_H */ 436#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index dfb4d9e52bcb..8ab1c278b66d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -25,7 +25,7 @@ enum nf_br_hook_priorities {
25#define BRNF_PPPoE 0x20 25#define BRNF_PPPoE 0x20
26 26
27/* Only used in br_forward.c */ 27/* Only used in br_forward.c */
28extern int nf_bridge_copy_header(struct sk_buff *skb); 28int nf_bridge_copy_header(struct sk_buff *skb);
29static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) 29static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
30{ 30{
31 if (skb->nf_bridge && 31 if (skb->nf_bridge &&
@@ -53,7 +53,7 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
53 return 0; 53 return 0;
54} 54}
55 55
56extern int br_handle_frame_finish(struct sk_buff *skb); 56int br_handle_frame_finish(struct sk_buff *skb);
57/* Only used in br_device.c */ 57/* Only used in br_device.c */
58static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) 58static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
59{ 59{
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index dfaf116b3e81..6e4591bb54d4 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,7 @@
6 6
7#include <uapi/linux/netfilter_ipv4.h> 7#include <uapi/linux/netfilter_ipv4.h>
8 8
9extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type); 9int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
10extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, 10__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
11 unsigned int dataoff, u_int8_t protocol); 11 unsigned int dataoff, u_int8_t protocol);
12#endif /*__LINUX_IP_NETFILTER_H*/ 12#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 2d4df6ce043e..64dad1cc1a4b 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -11,12 +11,12 @@
11 11
12 12
13#ifdef CONFIG_NETFILTER 13#ifdef CONFIG_NETFILTER
14extern int ip6_route_me_harder(struct sk_buff *skb); 14int ip6_route_me_harder(struct sk_buff *skb);
15extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 15__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
16 unsigned int dataoff, u_int8_t protocol); 16 unsigned int dataoff, u_int8_t protocol);
17 17
18extern int ipv6_netfilter_init(void); 18int ipv6_netfilter_init(void);
19extern void ipv6_netfilter_fini(void); 19void ipv6_netfilter_fini(void);
20 20
21/* 21/*
22 * Hook functions for ipv6 to allow xt_* modules to be built-in even 22 * Hook functions for ipv6 to allow xt_* modules to be built-in even
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index f3c7c24bec1c..fbfdb9d8d3a7 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -24,7 +24,8 @@ struct netpoll {
24 struct net_device *dev; 24 struct net_device *dev;
25 char dev_name[IFNAMSIZ]; 25 char dev_name[IFNAMSIZ];
26 const char *name; 26 const char *name;
27 void (*rx_hook)(struct netpoll *, int, char *, int); 27 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 int offset, int len);
28 29
29 union inet_addr local_ip, remote_ip; 30 union inet_addr local_ip, remote_ip;
30 bool ipv6; 31 bool ipv6;
@@ -41,7 +42,7 @@ struct netpoll_info {
41 unsigned long rx_flags; 42 unsigned long rx_flags;
42 spinlock_t rx_lock; 43 spinlock_t rx_lock;
43 struct semaphore dev_lock; 44 struct semaphore dev_lock;
44 struct list_head rx_np; /* netpolls that registered an rx_hook */ 45 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
45 46
46 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */ 47 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
47 struct sk_buff_head txq; 48 struct sk_buff_head txq;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e36dee52f224..12c2cb947df5 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -118,6 +118,9 @@ Needs to be updated if more operations are defined in future.*/
118 118
119#define FIRST_NFS4_OP OP_ACCESS 119#define FIRST_NFS4_OP OP_ACCESS
120#define LAST_NFS4_OP OP_RECLAIM_COMPLETE 120#define LAST_NFS4_OP OP_RECLAIM_COMPLETE
121#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
122#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
123#define LAST_NFS42_OP OP_RECLAIM_COMPLETE
121 124
122enum nfsstat4 { 125enum nfsstat4 {
123 NFS4_OK = 0, 126 NFS4_OK = 0,
@@ -395,7 +398,9 @@ enum lock_type4 {
395#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) 398#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
396#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) 399#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
397#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) 400#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
398#define FATTR4_WORD2_SECURITY_LABEL (1UL << 17) 401#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
402#define FATTR4_WORD2_CHANGE_SECURITY_LABEL \
403 (1UL << 17)
399 404
400/* MDS threshold bitmap bits */ 405/* MDS threshold bitmap bits */
401#define THRESHOLD_RD (1UL << 0) 406#define THRESHOLD_RD (1UL << 0)
@@ -408,16 +413,6 @@ enum lock_type4 {
408#define NFS4_VERSION 4 413#define NFS4_VERSION 4
409#define NFS4_MINOR_VERSION 0 414#define NFS4_MINOR_VERSION 0
410 415
411#if defined(CONFIG_NFS_V4_2)
412#define NFS4_MAX_MINOR_VERSION 2
413#else
414#if defined(CONFIG_NFS_V4_1)
415#define NFS4_MAX_MINOR_VERSION 1
416#else
417#define NFS4_MAX_MINOR_VERSION 0
418#endif /* CONFIG_NFS_V4_1 */
419#endif /* CONFIG_NFS_V4_2 */
420
421#define NFS4_DEBUG 1 416#define NFS4_DEBUG 1
422 417
423/* Index of predefined Linux client operations */ 418/* Index of predefined Linux client operations */
@@ -460,6 +455,7 @@ enum {
460 NFSPROC4_CLNT_FS_LOCATIONS, 455 NFSPROC4_CLNT_FS_LOCATIONS,
461 NFSPROC4_CLNT_RELEASE_LOCKOWNER, 456 NFSPROC4_CLNT_RELEASE_LOCKOWNER,
462 NFSPROC4_CLNT_SECINFO, 457 NFSPROC4_CLNT_SECINFO,
458 NFSPROC4_CLNT_FSID_PRESENT,
463 459
464 /* nfs41 */ 460 /* nfs41 */
465 NFSPROC4_CLNT_EXCHANGE_ID, 461 NFSPROC4_CLNT_EXCHANGE_ID,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 3ea4cde8701c..48997374eaf0 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -269,9 +269,13 @@ static inline int NFS_STALE(const struct inode *inode)
269 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 269 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
270} 270}
271 271
272static inline int NFS_FSCACHE(const struct inode *inode) 272static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
273{ 273{
274 return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); 274#ifdef CONFIG_NFS_FSCACHE
275 return NFS_I(inode)->fscache;
276#else
277 return NULL;
278#endif
275} 279}
276 280
277static inline __u64 NFS_FILEID(const struct inode *inode) 281static inline __u64 NFS_FILEID(const struct inode *inode)
@@ -503,24 +507,6 @@ extern int nfs_mountpoint_expiry_timeout;
503extern void nfs_release_automount_timer(void); 507extern void nfs_release_automount_timer(void);
504 508
505/* 509/*
506 * linux/fs/nfs/nfs4proc.c
507 */
508#ifdef CONFIG_NFS_V4_SECURITY_LABEL
509extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
510static inline void nfs4_label_free(struct nfs4_label *label)
511{
512 if (label) {
513 kfree(label->label);
514 kfree(label);
515 }
516 return;
517}
518#else
519static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
520static inline void nfs4_label_free(void *label) {}
521#endif
522
523/*
524 * linux/fs/nfs/unlink.c 510 * linux/fs/nfs/unlink.c
525 */ 511 */
526extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); 512extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b8cedced50c9..1150ea41b626 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -41,6 +41,7 @@ struct nfs_client {
41#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */ 41#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */
42#define NFS_CS_MIGRATION 2 /* - transparent state migr */ 42#define NFS_CS_MIGRATION 2 /* - transparent state migr */
43#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */ 43#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
44#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
44 struct sockaddr_storage cl_addr; /* server identifier */ 45 struct sockaddr_storage cl_addr; /* server identifier */
45 size_t cl_addrlen; 46 size_t cl_addrlen;
46 char * cl_hostname; /* hostname of server */ 47 char * cl_hostname; /* hostname of server */
@@ -78,6 +79,7 @@ struct nfs_client {
78 char cl_ipaddr[48]; 79 char cl_ipaddr[48];
79 u32 cl_cb_ident; /* v4.0 callback identifier */ 80 u32 cl_cb_ident; /* v4.0 callback identifier */
80 const struct nfs4_minor_version_ops *cl_mvops; 81 const struct nfs4_minor_version_ops *cl_mvops;
82 unsigned long cl_mig_gen;
81 83
82 /* NFSv4.0 transport blocking */ 84 /* NFSv4.0 transport blocking */
83 struct nfs4_slot_table *cl_slot_tbl; 85 struct nfs4_slot_table *cl_slot_tbl;
@@ -147,7 +149,9 @@ struct nfs_server {
147 __u64 maxfilesize; /* maximum file size */ 149 __u64 maxfilesize; /* maximum file size */
148 struct timespec time_delta; /* smallest time granularity */ 150 struct timespec time_delta; /* smallest time granularity */
149 unsigned long mount_time; /* when this fs was mounted */ 151 unsigned long mount_time; /* when this fs was mounted */
152 struct super_block *super; /* VFS super block */
150 dev_t s_dev; /* superblock dev numbers */ 153 dev_t s_dev; /* superblock dev numbers */
154 struct nfs_auth_info auth_info; /* parsed auth flavors */
151 155
152#ifdef CONFIG_NFS_FSCACHE 156#ifdef CONFIG_NFS_FSCACHE
153 struct nfs_fscache_key *fscache_key; /* unique key for superblock */ 157 struct nfs_fscache_key *fscache_key; /* unique key for superblock */
@@ -187,6 +191,12 @@ struct nfs_server {
187 struct list_head state_owners_lru; 191 struct list_head state_owners_lru;
188 struct list_head layouts; 192 struct list_head layouts;
189 struct list_head delegations; 193 struct list_head delegations;
194
195 unsigned long mig_gen;
196 unsigned long mig_status;
197#define NFS_MIG_IN_TRANSITION (1)
198#define NFS_MIG_FAILED (2)
199
190 void (*destroy)(struct nfs_server *); 200 void (*destroy)(struct nfs_server *);
191 201
192 atomic_t active; /* Keep trace of any activity to this server */ 202 atomic_t active; /* Keep trace of any activity to this server */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 49f52c8f4422..3ccfcecf8999 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -591,6 +591,13 @@ struct nfs_renameres {
591 struct nfs_fattr *new_fattr; 591 struct nfs_fattr *new_fattr;
592}; 592};
593 593
594/* parsed sec= options */
595#define NFS_AUTH_INFO_MAX_FLAVORS 12 /* see fs/nfs/super.c */
596struct nfs_auth_info {
597 unsigned int flavor_len;
598 rpc_authflavor_t flavors[NFS_AUTH_INFO_MAX_FLAVORS];
599};
600
594/* 601/*
595 * Argument struct for decode_entry function 602 * Argument struct for decode_entry function
596 */ 603 */
@@ -1053,14 +1060,18 @@ struct nfs4_fs_locations {
1053struct nfs4_fs_locations_arg { 1060struct nfs4_fs_locations_arg {
1054 struct nfs4_sequence_args seq_args; 1061 struct nfs4_sequence_args seq_args;
1055 const struct nfs_fh *dir_fh; 1062 const struct nfs_fh *dir_fh;
1063 const struct nfs_fh *fh;
1056 const struct qstr *name; 1064 const struct qstr *name;
1057 struct page *page; 1065 struct page *page;
1058 const u32 *bitmask; 1066 const u32 *bitmask;
1067 clientid4 clientid;
1068 unsigned char migration:1, renew:1;
1059}; 1069};
1060 1070
1061struct nfs4_fs_locations_res { 1071struct nfs4_fs_locations_res {
1062 struct nfs4_sequence_res seq_res; 1072 struct nfs4_sequence_res seq_res;
1063 struct nfs4_fs_locations *fs_locations; 1073 struct nfs4_fs_locations *fs_locations;
1074 unsigned char migration:1, renew:1;
1064}; 1075};
1065 1076
1066struct nfs4_secinfo4 { 1077struct nfs4_secinfo4 {
@@ -1084,6 +1095,19 @@ struct nfs4_secinfo_res {
1084 struct nfs4_secinfo_flavors *flavors; 1095 struct nfs4_secinfo_flavors *flavors;
1085}; 1096};
1086 1097
1098struct nfs4_fsid_present_arg {
1099 struct nfs4_sequence_args seq_args;
1100 const struct nfs_fh *fh;
1101 clientid4 clientid;
1102 unsigned char renew:1;
1103};
1104
1105struct nfs4_fsid_present_res {
1106 struct nfs4_sequence_res seq_res;
1107 struct nfs_fh *fh;
1108 unsigned char renew:1;
1109};
1110
1087#endif /* CONFIG_NFS_V4 */ 1111#endif /* CONFIG_NFS_V4 */
1088 1112
1089struct nfstime4 { 1113struct nfstime4 {
diff --git a/include/linux/of.h b/include/linux/of.h
index f95aee391e30..276c546980d8 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
136 return of_read_number(cell, size); 136 return of_read_number(cell, size);
137} 137}
138 138
139#if defined(CONFIG_SPARC)
139#include <asm/prom.h> 140#include <asm/prom.h>
141#endif
140 142
141/* Default #address and #size cells. Allow arch asm/prom.h to override */ 143/* Default #address and #size cells. Allow arch asm/prom.h to override */
142#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT) 144#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
@@ -226,6 +228,19 @@ static inline int of_get_child_count(const struct device_node *np)
226 return num; 228 return num;
227} 229}
228 230
231static inline int of_get_available_child_count(const struct device_node *np)
232{
233 struct device_node *child;
234 int num = 0;
235
236 for_each_available_child_of_node(np, child)
237 num++;
238
239 return num;
240}
241
242/* cache lookup */
243extern struct device_node *of_find_next_cache_node(const struct device_node *);
229extern struct device_node *of_find_node_with_property( 244extern struct device_node *of_find_node_with_property(
230 struct device_node *from, const char *prop_name); 245 struct device_node *from, const char *prop_name);
231#define for_each_node_with_property(dn, prop_name) \ 246#define for_each_node_with_property(dn, prop_name) \
@@ -275,6 +290,7 @@ extern int of_n_size_cells(struct device_node *np);
275extern const struct of_device_id *of_match_node( 290extern const struct of_device_id *of_match_node(
276 const struct of_device_id *matches, const struct device_node *node); 291 const struct of_device_id *matches, const struct device_node *node);
277extern int of_modalias_node(struct device_node *node, char *modalias, int len); 292extern int of_modalias_node(struct device_node *node, char *modalias, int len);
293extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
278extern struct device_node *of_parse_phandle(const struct device_node *np, 294extern struct device_node *of_parse_phandle(const struct device_node *np,
279 const char *phandle_name, 295 const char *phandle_name,
280 int index); 296 int index);
@@ -364,6 +380,9 @@ static inline bool of_have_populated_dt(void)
364#define for_each_child_of_node(parent, child) \ 380#define for_each_child_of_node(parent, child) \
365 while (0) 381 while (0)
366 382
383#define for_each_available_child_of_node(parent, child) \
384 while (0)
385
367static inline struct device_node *of_get_child_by_name( 386static inline struct device_node *of_get_child_by_name(
368 const struct device_node *node, 387 const struct device_node *node,
369 const char *name) 388 const char *name)
@@ -376,6 +395,11 @@ static inline int of_get_child_count(const struct device_node *np)
376 return 0; 395 return 0;
377} 396}
378 397
398static inline int of_get_available_child_count(const struct device_node *np)
399{
400 return 0;
401}
402
379static inline int of_device_is_compatible(const struct device_node *device, 403static inline int of_device_is_compatible(const struct device_node *device,
380 const char *name) 404 const char *name)
381{ 405{
@@ -534,13 +558,10 @@ static inline const char *of_prop_next_string(struct property *prop,
534#define of_match_node(_matches, _node) NULL 558#define of_match_node(_matches, _node) NULL
535#endif /* CONFIG_OF */ 559#endif /* CONFIG_OF */
536 560
537#ifndef of_node_to_nid 561#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
538static inline int of_node_to_nid(struct device_node *np) 562extern int of_node_to_nid(struct device_node *np);
539{ 563#else
540 return numa_node_id(); 564static inline int of_node_to_nid(struct device_node *device) { return 0; }
541}
542
543#define of_node_to_nid of_node_to_nid
544#endif 565#endif
545 566
546/** 567/**
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 4c2e6f26432c..5f6ed6b182b8 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -34,6 +34,10 @@ static inline void of_pci_range_to_resource(struct of_pci_range *range,
34 res->name = np->full_name; 34 res->name = np->full_name;
35} 35}
36 36
37/* Translate a DMA address from device space to CPU space */
38extern u64 of_translate_dma_address(struct device_node *dev,
39 const __be32 *in_addr);
40
37#ifdef CONFIG_OF_ADDRESS 41#ifdef CONFIG_OF_ADDRESS
38extern u64 of_translate_address(struct device_node *np, const __be32 *addr); 42extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
39extern bool of_can_translate_address(struct device_node *dev); 43extern bool of_can_translate_address(struct device_node *dev);
@@ -52,10 +56,7 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
52extern const __be32 *of_get_address(struct device_node *dev, int index, 56extern const __be32 *of_get_address(struct device_node *dev, int index,
53 u64 *size, unsigned int *flags); 57 u64 *size, unsigned int *flags);
54 58
55#ifndef pci_address_to_pio 59extern unsigned long pci_address_to_pio(phys_addr_t addr);
56static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
57#define pci_address_to_pio pci_address_to_pio
58#endif
59 60
60extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, 61extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
61 struct device_node *node); 62 struct device_node *node);
@@ -63,13 +64,6 @@ extern struct of_pci_range *of_pci_range_parser_one(
63 struct of_pci_range_parser *parser, 64 struct of_pci_range_parser *parser,
64 struct of_pci_range *range); 65 struct of_pci_range *range);
65#else /* CONFIG_OF_ADDRESS */ 66#else /* CONFIG_OF_ADDRESS */
66#ifndef of_address_to_resource
67static inline int of_address_to_resource(struct device_node *dev, int index,
68 struct resource *r)
69{
70 return -EINVAL;
71}
72#endif
73static inline struct device_node *of_find_matching_node_by_address( 67static inline struct device_node *of_find_matching_node_by_address(
74 struct device_node *from, 68 struct device_node *from,
75 const struct of_device_id *matches, 69 const struct of_device_id *matches,
@@ -77,12 +71,7 @@ static inline struct device_node *of_find_matching_node_by_address(
77{ 71{
78 return NULL; 72 return NULL;
79} 73}
80#ifndef of_iomap 74
81static inline void __iomem *of_iomap(struct device_node *device, int index)
82{
83 return NULL;
84}
85#endif
86static inline const __be32 *of_get_address(struct device_node *dev, int index, 75static inline const __be32 *of_get_address(struct device_node *dev, int index,
87 u64 *size, unsigned int *flags) 76 u64 *size, unsigned int *flags)
88{ 77{
@@ -103,6 +92,22 @@ static inline struct of_pci_range *of_pci_range_parser_one(
103} 92}
104#endif /* CONFIG_OF_ADDRESS */ 93#endif /* CONFIG_OF_ADDRESS */
105 94
95#ifdef CONFIG_OF
96extern int of_address_to_resource(struct device_node *dev, int index,
97 struct resource *r);
98void __iomem *of_iomap(struct device_node *node, int index);
99#else
100static inline int of_address_to_resource(struct device_node *dev, int index,
101 struct resource *r)
102{
103 return -EINVAL;
104}
105
106static inline void __iomem *of_iomap(struct device_node *device, int index)
107{
108 return NULL;
109}
110#endif
106 111
107#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) 112#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
108extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, 113extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index a478c62a2aab..0beaee9dac1f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -96,31 +96,30 @@ extern int of_scan_flat_dt_by_path(const char *path,
96 96
97extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, 97extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
98 int depth, void *data); 98 int depth, void *data);
99extern void early_init_dt_check_for_initrd(unsigned long node);
100extern int early_init_dt_scan_memory(unsigned long node, const char *uname, 99extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
101 int depth, void *data); 100 int depth, void *data);
102extern void early_init_dt_add_memory_arch(u64 base, u64 size); 101extern void early_init_dt_add_memory_arch(u64 base, u64 size);
103extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); 102extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
104extern u64 dt_mem_next_cell(int s, __be32 **cellp); 103extern u64 dt_mem_next_cell(int s, __be32 **cellp);
105 104
106/*
107 * If BLK_DEV_INITRD, the fdt early init code will call this function,
108 * to be provided by the arch code. start and end are specified as
109 * physical addresses.
110 */
111#ifdef CONFIG_BLK_DEV_INITRD
112extern void early_init_dt_setup_initrd_arch(u64 start, u64 end);
113#endif
114
115/* Early flat tree scan hooks */ 105/* Early flat tree scan hooks */
116extern int early_init_dt_scan_root(unsigned long node, const char *uname, 106extern int early_init_dt_scan_root(unsigned long node, const char *uname,
117 int depth, void *data); 107 int depth, void *data);
118 108
109extern bool early_init_dt_scan(void *params);
110
111extern const char *of_flat_dt_get_machine_name(void);
112extern const void *of_flat_dt_match_machine(const void *default_match,
113 const void * (*get_next_compat)(const char * const**));
114
119/* Other Prototypes */ 115/* Other Prototypes */
120extern void unflatten_device_tree(void); 116extern void unflatten_device_tree(void);
117extern void unflatten_and_copy_device_tree(void);
121extern void early_init_devtree(void *); 118extern void early_init_devtree(void *);
122#else /* CONFIG_OF_FLATTREE */ 119#else /* CONFIG_OF_FLATTREE */
120static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
123static inline void unflatten_device_tree(void) {} 121static inline void unflatten_device_tree(void) {}
122static inline void unflatten_and_copy_device_tree(void) {}
124#endif /* CONFIG_OF_FLATTREE */ 123#endif /* CONFIG_OF_FLATTREE */
125 124
126#endif /* __ASSEMBLY__ */ 125#endif /* __ASSEMBLY__ */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index a83dc6f5008e..f14123a5a9df 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -19,6 +19,7 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/gpio/consumer.h>
22 23
23struct device_node; 24struct device_node;
24 25
@@ -47,7 +48,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
47 return container_of(gc, struct of_mm_gpio_chip, gc); 48 return container_of(gc, struct of_mm_gpio_chip, gc);
48} 49}
49 50
50extern int of_get_named_gpio_flags(struct device_node *np, 51extern struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
51 const char *list_name, int index, enum of_gpio_flags *flags); 52 const char *list_name, int index, enum of_gpio_flags *flags);
52 53
53extern int of_mm_gpiochip_add(struct device_node *np, 54extern int of_mm_gpiochip_add(struct device_node *np,
@@ -62,10 +63,10 @@ extern int of_gpio_simple_xlate(struct gpio_chip *gc,
62#else /* CONFIG_OF_GPIO */ 63#else /* CONFIG_OF_GPIO */
63 64
64/* Drivers may not strictly depend on the GPIO support, so let them link. */ 65/* Drivers may not strictly depend on the GPIO support, so let them link. */
65static inline int of_get_named_gpio_flags(struct device_node *np, 66static inline struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
66 const char *list_name, int index, enum of_gpio_flags *flags) 67 const char *list_name, int index, enum of_gpio_flags *flags)
67{ 68{
68 return -ENOSYS; 69 return ERR_PTR(-ENOSYS);
69} 70}
70 71
71static inline int of_gpio_simple_xlate(struct gpio_chip *gc, 72static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
@@ -80,6 +81,18 @@ static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
80 81
81#endif /* CONFIG_OF_GPIO */ 82#endif /* CONFIG_OF_GPIO */
82 83
84static inline int of_get_named_gpio_flags(struct device_node *np,
85 const char *list_name, int index, enum of_gpio_flags *flags)
86{
87 struct gpio_desc *desc;
88 desc = of_get_named_gpiod_flags(np, list_name, index, flags);
89
90 if (IS_ERR(desc))
91 return PTR_ERR(desc);
92 else
93 return desc_to_gpio(desc);
94}
95
83/** 96/**
84 * of_gpio_named_count() - Count GPIOs for a device 97 * of_gpio_named_count() - Count GPIOs for a device
85 * @np: device node to count GPIOs for 98 * @np: device node to count GPIOs for
@@ -117,15 +130,21 @@ static inline int of_gpio_count(struct device_node *np)
117} 130}
118 131
119/** 132/**
120 * of_get_gpio_flags() - Get a GPIO number and flags to use with GPIO API 133 * of_get_gpiod_flags() - Get a GPIO descriptor and flags to use with GPIO API
121 * @np: device node to get GPIO from 134 * @np: device node to get GPIO from
122 * @index: index of the GPIO 135 * @index: index of the GPIO
123 * @flags: a flags pointer to fill in 136 * @flags: a flags pointer to fill in
124 * 137 *
125 * Returns GPIO number to use with Linux generic GPIO API, or one of the errno 138 * Returns GPIO descriptor to use with Linux generic GPIO API, or a errno
126 * value on the error condition. If @flags is not NULL the function also fills 139 * value on the error condition. If @flags is not NULL the function also fills
127 * in flags for the GPIO. 140 * in flags for the GPIO.
128 */ 141 */
142static inline struct gpio_desc *of_get_gpiod_flags(struct device_node *np,
143 int index, enum of_gpio_flags *flags)
144{
145 return of_get_named_gpiod_flags(np, "gpios", index, flags);
146}
147
129static inline int of_get_gpio_flags(struct device_node *np, int index, 148static inline int of_get_gpio_flags(struct device_node *np, int index,
130 enum of_gpio_flags *flags) 149 enum of_gpio_flags *flags)
131{ 150{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index fcd63baee5f2..3f23b4472c31 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -8,22 +8,6 @@
8#include <linux/ioport.h> 8#include <linux/ioport.h>
9#include <linux/of.h> 9#include <linux/of.h>
10 10
11/**
12 * of_irq - container for device_node/irq_specifier pair for an irq controller
13 * @controller: pointer to interrupt controller device tree node
14 * @size: size of interrupt specifier
15 * @specifier: array of cells @size long specifing the specific interrupt
16 *
17 * This structure is returned when an interrupt is mapped. The controller
18 * field needs to be put() after use
19 */
20#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
21struct of_irq {
22 struct device_node *controller; /* Interrupt controller node */
23 u32 size; /* Specifier size */
24 u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
25};
26
27typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); 11typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
28 12
29/* 13/*
@@ -35,35 +19,38 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
35#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) 19#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
36extern unsigned int of_irq_workarounds; 20extern unsigned int of_irq_workarounds;
37extern struct device_node *of_irq_dflt_pic; 21extern struct device_node *of_irq_dflt_pic;
38extern int of_irq_map_oldworld(struct device_node *device, int index, 22extern int of_irq_parse_oldworld(struct device_node *device, int index,
39 struct of_irq *out_irq); 23 struct of_phandle_args *out_irq);
40#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ 24#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
41#define of_irq_workarounds (0) 25#define of_irq_workarounds (0)
42#define of_irq_dflt_pic (NULL) 26#define of_irq_dflt_pic (NULL)
43static inline int of_irq_map_oldworld(struct device_node *device, int index, 27static inline int of_irq_parse_oldworld(struct device_node *device, int index,
44 struct of_irq *out_irq) 28 struct of_phandle_args *out_irq)
45{ 29{
46 return -EINVAL; 30 return -EINVAL;
47} 31}
48#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ 32#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
49 33
50 34extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
51extern int of_irq_map_raw(struct device_node *parent, const __be32 *intspec, 35extern int of_irq_parse_one(struct device_node *device, int index,
52 u32 ointsize, const __be32 *addr, 36 struct of_phandle_args *out_irq);
53 struct of_irq *out_irq); 37extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
54extern int of_irq_map_one(struct device_node *device, int index,
55 struct of_irq *out_irq);
56extern unsigned int irq_create_of_mapping(struct device_node *controller,
57 const u32 *intspec,
58 unsigned int intsize);
59extern int of_irq_to_resource(struct device_node *dev, int index, 38extern int of_irq_to_resource(struct device_node *dev, int index,
60 struct resource *r); 39 struct resource *r);
61extern int of_irq_count(struct device_node *dev);
62extern int of_irq_to_resource_table(struct device_node *dev, 40extern int of_irq_to_resource_table(struct device_node *dev,
63 struct resource *res, int nr_irqs); 41 struct resource *res, int nr_irqs);
64 42
65extern void of_irq_init(const struct of_device_id *matches); 43extern void of_irq_init(const struct of_device_id *matches);
66 44
45#ifdef CONFIG_OF_IRQ
46extern int of_irq_count(struct device_node *dev);
47#else
48static inline int of_irq_count(struct device_node *dev)
49{
50 return 0;
51}
52#endif
53
67#if defined(CONFIG_OF) 54#if defined(CONFIG_OF)
68/* 55/*
69 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC 56 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
index ed7f267e6389..6f10e938ff7e 100644
--- a/include/linux/of_mtd.h
+++ b/include/linux/of_mtd.h
@@ -10,10 +10,29 @@
10#define __LINUX_OF_NET_H 10#define __LINUX_OF_NET_H
11 11
12#ifdef CONFIG_OF_MTD 12#ifdef CONFIG_OF_MTD
13
13#include <linux/of.h> 14#include <linux/of.h>
14int of_get_nand_ecc_mode(struct device_node *np); 15int of_get_nand_ecc_mode(struct device_node *np);
15int of_get_nand_bus_width(struct device_node *np); 16int of_get_nand_bus_width(struct device_node *np);
16bool of_get_nand_on_flash_bbt(struct device_node *np); 17bool of_get_nand_on_flash_bbt(struct device_node *np);
17#endif 18
19#else /* CONFIG_OF_MTD */
20
21static inline int of_get_nand_ecc_mode(struct device_node *np)
22{
23 return -ENOSYS;
24}
25
26static inline int of_get_nand_bus_width(struct device_node *np)
27{
28 return -ENOSYS;
29}
30
31static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
32{
33 return false;
34}
35
36#endif /* CONFIG_OF_MTD */
18 37
19#endif /* __LINUX_OF_MTD_H */ 38#endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index fd9c408631a0..1a1f5ffd5288 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -5,8 +5,9 @@
5#include <linux/msi.h> 5#include <linux/msi.h>
6 6
7struct pci_dev; 7struct pci_dev;
8struct of_irq; 8struct of_phandle_args;
9int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq); 9int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
10int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
10 11
11struct device_node; 12struct device_node;
12struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
diff --git a/include/linux/oom.h b/include/linux/oom.h
index da60007075b5..4cd62677feb9 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -82,6 +82,11 @@ static inline void oom_killer_enable(void)
82 oom_killer_disabled = false; 82 oom_killer_disabled = false;
83} 83}
84 84
85static inline bool oom_gfp_allowed(gfp_t gfp_mask)
86{
87 return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
88}
89
85extern struct task_struct *find_lock_task_mm(struct task_struct *p); 90extern struct task_struct *find_lock_task_mm(struct task_struct *p);
86 91
87/* sysctls */ 92/* sysctls */
diff --git a/include/linux/opp.h b/include/linux/opp.h
deleted file mode 100644
index 3aca2b8def33..000000000000
--- a/include/linux/opp.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef __LINUX_OPP_H__
15#define __LINUX_OPP_H__
16
17#include <linux/err.h>
18#include <linux/cpufreq.h>
19#include <linux/notifier.h>
20
21struct opp;
22struct device;
23
24enum opp_event {
25 OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
26};
27
28#if defined(CONFIG_PM_OPP)
29
30unsigned long opp_get_voltage(struct opp *opp);
31
32unsigned long opp_get_freq(struct opp *opp);
33
34int opp_get_opp_count(struct device *dev);
35
36struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
37 bool available);
38
39struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq);
40
41struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq);
42
43int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt);
44
45int opp_enable(struct device *dev, unsigned long freq);
46
47int opp_disable(struct device *dev, unsigned long freq);
48
49struct srcu_notifier_head *opp_get_notifier(struct device *dev);
50#else
51static inline unsigned long opp_get_voltage(struct opp *opp)
52{
53 return 0;
54}
55
56static inline unsigned long opp_get_freq(struct opp *opp)
57{
58 return 0;
59}
60
61static inline int opp_get_opp_count(struct device *dev)
62{
63 return 0;
64}
65
66static inline struct opp *opp_find_freq_exact(struct device *dev,
67 unsigned long freq, bool available)
68{
69 return ERR_PTR(-EINVAL);
70}
71
72static inline struct opp *opp_find_freq_floor(struct device *dev,
73 unsigned long *freq)
74{
75 return ERR_PTR(-EINVAL);
76}
77
78static inline struct opp *opp_find_freq_ceil(struct device *dev,
79 unsigned long *freq)
80{
81 return ERR_PTR(-EINVAL);
82}
83
84static inline int opp_add(struct device *dev, unsigned long freq,
85 unsigned long u_volt)
86{
87 return -EINVAL;
88}
89
90static inline int opp_enable(struct device *dev, unsigned long freq)
91{
92 return 0;
93}
94
95static inline int opp_disable(struct device *dev, unsigned long freq)
96{
97 return 0;
98}
99
100static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
101{
102 return ERR_PTR(-EINVAL);
103}
104#endif /* CONFIG_PM_OPP */
105
106#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
107int of_init_opp_table(struct device *dev);
108#else
109static inline int of_init_opp_table(struct device *dev)
110{
111 return -EINVAL;
112}
113#endif
114
115#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
116int opp_init_cpufreq_table(struct device *dev,
117 struct cpufreq_frequency_table **table);
118void opp_free_cpufreq_table(struct device *dev,
119 struct cpufreq_frequency_table **table);
120#else
121static inline int opp_init_cpufreq_table(struct device *dev,
122 struct cpufreq_frequency_table **table)
123{
124 return -EINVAL;
125}
126
127static inline
128void opp_free_cpufreq_table(struct device *dev,
129 struct cpufreq_frequency_table **table)
130{
131}
132#endif /* CONFIG_CPU_FREQ */
133
134#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 86292beebfe2..438694650471 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -129,10 +129,9 @@ struct parallel_data {
129 struct padata_serial_queue __percpu *squeue; 129 struct padata_serial_queue __percpu *squeue;
130 atomic_t reorder_objects; 130 atomic_t reorder_objects;
131 atomic_t refcnt; 131 atomic_t refcnt;
132 atomic_t seq_nr;
132 struct padata_cpumask cpumask; 133 struct padata_cpumask cpumask;
133 spinlock_t lock ____cacheline_aligned; 134 spinlock_t lock ____cacheline_aligned;
134 spinlock_t seq_lock;
135 unsigned int seq_nr;
136 unsigned int processed; 135 unsigned int processed;
137 struct timer_list timer; 136 struct timer_list timer;
138}; 137};
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 93506a114034..da523661500a 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -38,10 +38,10 @@
38 * The last is when there is insufficient space in page->flags and a separate 38 * The last is when there is insufficient space in page->flags and a separate
39 * lookup is necessary. 39 * lookup is necessary.
40 * 40 *
41 * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | 41 * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
42 * " plus space for last_nid: | NODE | ZONE | LAST_NID ... | FLAGS | 42 * " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
43 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | 43 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
44 * " plus space for last_nid: | SECTION | NODE | ZONE | LAST_NID ... | FLAGS | 44 * " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
45 * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | 45 * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
46 */ 46 */
47#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 47#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -62,15 +62,21 @@
62#endif 62#endif
63 63
64#ifdef CONFIG_NUMA_BALANCING 64#ifdef CONFIG_NUMA_BALANCING
65#define LAST_NID_SHIFT NODES_SHIFT 65#define LAST__PID_SHIFT 8
66#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
67
68#define LAST__CPU_SHIFT NR_CPUS_BITS
69#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
70
71#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
66#else 72#else
67#define LAST_NID_SHIFT 0 73#define LAST_CPUPID_SHIFT 0
68#endif 74#endif
69 75
70#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_NID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS 76#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
71#define LAST_NID_WIDTH LAST_NID_SHIFT 77#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
72#else 78#else
73#define LAST_NID_WIDTH 0 79#define LAST_CPUPID_WIDTH 0
74#endif 80#endif
75 81
76/* 82/*
@@ -81,8 +87,8 @@
81#define NODE_NOT_IN_PAGE_FLAGS 87#define NODE_NOT_IN_PAGE_FLAGS
82#endif 88#endif
83 89
84#if defined(CONFIG_NUMA_BALANCING) && LAST_NID_WIDTH == 0 90#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
85#define LAST_NID_NOT_IN_PAGE_FLAGS 91#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
86#endif 92#endif
87 93
88#endif /* _LINUX_PAGE_FLAGS_LAYOUT */ 94#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6d53675c2b54..98ada58f9942 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -329,7 +329,9 @@ static inline void set_page_writeback(struct page *page)
329 * System with lots of page flags available. This allows separate 329 * System with lots of page flags available. This allows separate
330 * flags for PageHead() and PageTail() checks of compound pages so that bit 330 * flags for PageHead() and PageTail() checks of compound pages so that bit
331 * tests can be used in performance sensitive paths. PageCompound is 331 * tests can be used in performance sensitive paths. PageCompound is
332 * generally not used in hot code paths. 332 * generally not used in hot code paths except arch/powerpc/mm/init_64.c
333 * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
334 * and avoid handling those in real mode.
333 */ 335 */
334__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) 336__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
335__PAGEFLAG(Tail, tail) 337__PAGEFLAG(Tail, tail)
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index d006f0ca60f4..5a462c4e5009 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -27,7 +27,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
27 while (!pci_is_root_bus(pbus)) 27 while (!pci_is_root_bus(pbus))
28 pbus = pbus->parent; 28 pbus = pbus->parent;
29 29
30 return DEVICE_ACPI_HANDLE(pbus->bridge); 30 return ACPI_HANDLE(pbus->bridge);
31} 31}
32 32
33static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) 33static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
@@ -39,7 +39,7 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
39 else 39 else
40 dev = &pbus->self->dev; 40 dev = &pbus->self->dev;
41 41
42 return DEVICE_ACPI_HANDLE(dev); 42 return ACPI_HANDLE(dev);
43} 43}
44 44
45void acpi_pci_add_bus(struct pci_bus *bus); 45void acpi_pci_add_bus(struct pci_bus *bus);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index da172f956ad6..a13d6825e586 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -32,7 +32,6 @@
32#include <linux/irqreturn.h> 32#include <linux/irqreturn.h>
33#include <uapi/linux/pci.h> 33#include <uapi/linux/pci.h>
34 34
35/* Include the ID list */
36#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
37 36
38/* 37/*
@@ -42,9 +41,10 @@
42 * 41 *
43 * 7:3 = slot 42 * 7:3 = slot
44 * 2:0 = function 43 * 2:0 = function
45 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined uapi/linux/pci.h 44 *
45 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
46 * In the interest of not exposing interfaces to user-space unnecessarily, 46 * In the interest of not exposing interfaces to user-space unnecessarily,
47 * the following kernel only defines are being added here. 47 * the following kernel-only defines are being added here.
48 */ 48 */
49#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn) 49#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
50/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 50/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
@@ -153,10 +153,10 @@ enum pcie_reset_state {
153 /* Reset is NOT asserted (Use to deassert reset) */ 153 /* Reset is NOT asserted (Use to deassert reset) */
154 pcie_deassert_reset = (__force pcie_reset_state_t) 1, 154 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
155 155
156 /* Use #PERST to reset PCI-E device */ 156 /* Use #PERST to reset PCIe device */
157 pcie_warm_reset = (__force pcie_reset_state_t) 2, 157 pcie_warm_reset = (__force pcie_reset_state_t) 2,
158 158
159 /* Use PCI-E Hot Reset to reset device */ 159 /* Use PCIe Hot Reset to reset device */
160 pcie_hot_reset = (__force pcie_reset_state_t) 3 160 pcie_hot_reset = (__force pcie_reset_state_t) 3
161}; 161};
162 162
@@ -259,13 +259,13 @@ struct pci_dev {
259 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 259 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
260 u8 revision; /* PCI revision, low byte of class word */ 260 u8 revision; /* PCI revision, low byte of class word */
261 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 261 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
262 u8 pcie_cap; /* PCI-E capability offset */ 262 u8 pcie_cap; /* PCIe capability offset */
263 u8 msi_cap; /* MSI capability offset */ 263 u8 msi_cap; /* MSI capability offset */
264 u8 msix_cap; /* MSI-X capability offset */ 264 u8 msix_cap; /* MSI-X capability offset */
265 u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ 265 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
266 u8 rom_base_reg; /* which config register controls the ROM */ 266 u8 rom_base_reg; /* which config register controls the ROM */
267 u8 pin; /* which interrupt pin this device uses */ 267 u8 pin; /* which interrupt pin this device uses */
268 u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */ 268 u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
269 269
270 struct pci_driver *driver; /* which driver has allocated this device */ 270 struct pci_driver *driver; /* which driver has allocated this device */
271 u64 dma_mask; /* Mask of the bits of bus address this 271 u64 dma_mask; /* Mask of the bits of bus address this
@@ -300,7 +300,7 @@ struct pci_dev {
300 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ 300 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
301 301
302#ifdef CONFIG_PCIEASPM 302#ifdef CONFIG_PCIEASPM
303 struct pcie_link_state *link_state; /* ASPM link state. */ 303 struct pcie_link_state *link_state; /* ASPM link state */
304#endif 304#endif
305 305
306 pci_channel_state_t error_state; /* current connectivity state */ 306 pci_channel_state_t error_state; /* current connectivity state */
@@ -317,7 +317,7 @@ struct pci_dev {
317 317
318 bool match_driver; /* Skip attaching driver */ 318 bool match_driver; /* Skip attaching driver */
319 /* These fields are used by common fixups */ 319 /* These fields are used by common fixups */
320 unsigned int transparent:1; /* Transparent PCI bridge */ 320 unsigned int transparent:1; /* Subtractive decode PCI bridge */
321 unsigned int multifunction:1;/* Part of multi-function device */ 321 unsigned int multifunction:1;/* Part of multi-function device */
322 /* keep track of device state */ 322 /* keep track of device state */
323 unsigned int is_added:1; 323 unsigned int is_added:1;
@@ -326,12 +326,10 @@ struct pci_dev {
326 unsigned int block_cfg_access:1; /* config space access is blocked */ 326 unsigned int block_cfg_access:1; /* config space access is blocked */
327 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 327 unsigned int broken_parity_status:1; /* Device generates false positive parity */
328 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 328 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
329 unsigned int msi_enabled:1; 329 unsigned int msi_enabled:1;
330 unsigned int msix_enabled:1; 330 unsigned int msix_enabled:1;
331 unsigned int ari_enabled:1; /* ARI forwarding */ 331 unsigned int ari_enabled:1; /* ARI forwarding */
332 unsigned int is_managed:1; 332 unsigned int is_managed:1;
333 unsigned int is_pcie:1; /* Obsolete. Will be removed.
334 Use pci_is_pcie() instead */
335 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 333 unsigned int needs_freset:1; /* Dev requires fundamental reset */
336 unsigned int state_saved:1; 334 unsigned int state_saved:1;
337 unsigned int is_physfn:1; 335 unsigned int is_physfn:1;
@@ -373,7 +371,6 @@ static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
373 if (dev->is_virtfn) 371 if (dev->is_virtfn)
374 dev = dev->physfn; 372 dev = dev->physfn;
375#endif 373#endif
376
377 return dev; 374 return dev;
378} 375}
379 376
@@ -458,7 +455,7 @@ struct pci_bus {
458 char name[48]; 455 char name[48];
459 456
460 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ 457 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
461 pci_bus_flags_t bus_flags; /* Inherited by child busses */ 458 pci_bus_flags_t bus_flags; /* inherited by child buses */
462 struct device *bridge; 459 struct device *bridge;
463 struct device dev; 460 struct device dev;
464 struct bin_attribute *legacy_io; /* legacy I/O for this bus */ 461 struct bin_attribute *legacy_io; /* legacy I/O for this bus */
@@ -470,14 +467,27 @@ struct pci_bus {
470#define to_pci_bus(n) container_of(n, struct pci_bus, dev) 467#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
471 468
472/* 469/*
473 * Returns true if the pci bus is root (behind host-pci bridge), 470 * Returns true if the PCI bus is root (behind host-PCI bridge),
474 * false otherwise 471 * false otherwise
472 *
473 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
474 * This is incorrect because "virtual" buses added for SR-IOV (via
475 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
475 */ 476 */
476static inline bool pci_is_root_bus(struct pci_bus *pbus) 477static inline bool pci_is_root_bus(struct pci_bus *pbus)
477{ 478{
478 return !(pbus->parent); 479 return !(pbus->parent);
479} 480}
480 481
482static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
483{
484 dev = pci_physfn(dev);
485 if (pci_is_root_bus(dev->bus))
486 return NULL;
487
488 return dev->bus->self;
489}
490
481#ifdef CONFIG_PCI_MSI 491#ifdef CONFIG_PCI_MSI
482static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 492static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
483{ 493{
@@ -499,7 +509,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
499#define PCIBIOS_BUFFER_TOO_SMALL 0x89 509#define PCIBIOS_BUFFER_TOO_SMALL 0x89
500 510
501/* 511/*
502 * Translate above to generic errno for passing back through non-pci. 512 * Translate above to generic errno for passing back through non-PCI code.
503 */ 513 */
504static inline int pcibios_err_to_errno(int err) 514static inline int pcibios_err_to_errno(int err)
505{ 515{
@@ -550,11 +560,12 @@ struct pci_dynids {
550 struct list_head list; /* for IDs added at runtime */ 560 struct list_head list; /* for IDs added at runtime */
551}; 561};
552 562
553/* ---------------------------------------------------------------- */ 563
554/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides 564/*
555 * a set of callbacks in struct pci_error_handlers, then that device driver 565 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
556 * will be notified of PCI bus errors, and will be driven to recovery 566 * a set of callbacks in struct pci_error_handlers, that device driver
557 * when an error occurs. 567 * will be notified of PCI bus errors, and will be driven to recovery
568 * when an error occurs.
558 */ 569 */
559 570
560typedef unsigned int __bitwise pci_ers_result_t; 571typedef unsigned int __bitwise pci_ers_result_t;
@@ -598,7 +609,6 @@ struct pci_error_handlers {
598 void (*resume)(struct pci_dev *dev); 609 void (*resume)(struct pci_dev *dev);
599}; 610};
600 611
601/* ---------------------------------------------------------------- */
602 612
603struct module; 613struct module;
604struct pci_driver { 614struct pci_driver {
@@ -702,10 +712,10 @@ extern enum pcie_bus_config_types pcie_bus_config;
702 712
703extern struct bus_type pci_bus_type; 713extern struct bus_type pci_bus_type;
704 714
705/* Do NOT directly access these two variables, unless you are arch specific pci 715/* Do NOT directly access these two variables, unless you are arch-specific PCI
706 * code, or pci core code. */ 716 * code, or PCI core code. */
707extern struct list_head pci_root_buses; /* list of all known PCI buses */ 717extern struct list_head pci_root_buses; /* list of all known PCI buses */
708/* Some device drivers need know if pci is initiated */ 718/* Some device drivers need know if PCI is initiated */
709int no_pci_devices(void); 719int no_pci_devices(void);
710 720
711void pcibios_resource_survey_bus(struct pci_bus *bus); 721void pcibios_resource_survey_bus(struct pci_bus *bus);
@@ -713,7 +723,7 @@ void pcibios_add_bus(struct pci_bus *bus);
713void pcibios_remove_bus(struct pci_bus *bus); 723void pcibios_remove_bus(struct pci_bus *bus);
714void pcibios_fixup_bus(struct pci_bus *); 724void pcibios_fixup_bus(struct pci_bus *);
715int __must_check pcibios_enable_device(struct pci_dev *, int mask); 725int __must_check pcibios_enable_device(struct pci_dev *, int mask);
716/* Architecture specific versions may override this (weak) */ 726/* Architecture-specific versions may override this (weak) */
717char *pcibios_setup(char *str); 727char *pcibios_setup(char *str);
718 728
719/* Used only when drivers/pci/setup.c is used */ 729/* Used only when drivers/pci/setup.c is used */
@@ -950,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno);
950int __must_check pci_assign_resource(struct pci_dev *dev, int i); 960int __must_check pci_assign_resource(struct pci_dev *dev, int i);
951int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); 961int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
952int pci_select_bars(struct pci_dev *dev, unsigned long flags); 962int pci_select_bars(struct pci_dev *dev, unsigned long flags);
963bool pci_device_is_present(struct pci_dev *pdev);
953 964
954/* ROM control related routines */ 965/* ROM control related routines */
955int pci_enable_rom(struct pci_dev *pdev); 966int pci_enable_rom(struct pci_dev *pdev);
@@ -1247,7 +1258,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
1247 1258
1248/* 1259/*
1249 * PCI domain support. Sometimes called PCI segment (eg by ACPI), 1260 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1250 * a PCI domain is defined to be a set of PCI busses which share 1261 * a PCI domain is defined to be a set of PCI buses which share
1251 * configuration space. 1262 * configuration space.
1252 */ 1263 */
1253#ifdef CONFIG_PCI_DOMAINS 1264#ifdef CONFIG_PCI_DOMAINS
@@ -1557,65 +1568,65 @@ enum pci_fixup_pass {
1557/* Anonymous variables would be nice... */ 1568/* Anonymous variables would be nice... */
1558#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 1569#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1559 class_shift, hook) \ 1570 class_shift, hook) \
1560 static const struct pci_fixup __pci_fixup_##name __used \ 1571 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1561 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 1572 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1562 = { vendor, device, class, class_shift, hook }; 1573 = { vendor, device, class, class_shift, hook };
1563 1574
1564#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 1575#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1565 class_shift, hook) \ 1576 class_shift, hook) \
1566 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1577 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1567 vendor##device##hook, vendor, device, class, class_shift, hook) 1578 hook, vendor, device, class, class_shift, hook)
1568#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 1579#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1569 class_shift, hook) \ 1580 class_shift, hook) \
1570 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1581 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1571 vendor##device##hook, vendor, device, class, class_shift, hook) 1582 hook, vendor, device, class, class_shift, hook)
1572#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 1583#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1573 class_shift, hook) \ 1584 class_shift, hook) \
1574 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1585 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1575 vendor##device##hook, vendor, device, class, class_shift, hook) 1586 hook, vendor, device, class, class_shift, hook)
1576#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 1587#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1577 class_shift, hook) \ 1588 class_shift, hook) \
1578 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1589 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1579 vendor##device##hook, vendor, device, class, class_shift, hook) 1590 hook, vendor, device, class, class_shift, hook)
1580#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 1591#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1581 class_shift, hook) \ 1592 class_shift, hook) \
1582 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1593 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1583 resume##vendor##device##hook, vendor, device, class, \ 1594 resume##hook, vendor, device, class, \
1584 class_shift, hook) 1595 class_shift, hook)
1585#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 1596#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1586 class_shift, hook) \ 1597 class_shift, hook) \
1587 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1598 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1588 resume_early##vendor##device##hook, vendor, device, \ 1599 resume_early##hook, vendor, device, \
1589 class, class_shift, hook) 1600 class, class_shift, hook)
1590#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 1601#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1591 class_shift, hook) \ 1602 class_shift, hook) \
1592 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1603 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1593 suspend##vendor##device##hook, vendor, device, class, \ 1604 suspend##hook, vendor, device, class, \
1594 class_shift, hook) 1605 class_shift, hook)
1595 1606
1596#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1607#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1597 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1608 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1598 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1609 hook, vendor, device, PCI_ANY_ID, 0, hook)
1599#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 1610#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1600 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1611 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1601 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1612 hook, vendor, device, PCI_ANY_ID, 0, hook)
1602#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 1613#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1603 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1614 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1604 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1615 hook, vendor, device, PCI_ANY_ID, 0, hook)
1605#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 1616#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1606 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1617 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1607 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1618 hook, vendor, device, PCI_ANY_ID, 0, hook)
1608#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1619#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1609 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1620 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1610 resume##vendor##device##hook, vendor, device, \ 1621 resume##hook, vendor, device, \
1611 PCI_ANY_ID, 0, hook) 1622 PCI_ANY_ID, 0, hook)
1612#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1623#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1613 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1624 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1614 resume_early##vendor##device##hook, vendor, device, \ 1625 resume_early##hook, vendor, device, \
1615 PCI_ANY_ID, 0, hook) 1626 PCI_ANY_ID, 0, hook)
1616#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1627#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1617 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1628 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1618 suspend##vendor##device##hook, vendor, device, \ 1629 suspend##hook, vendor, device, \
1619 PCI_ANY_ID, 0, hook) 1630 PCI_ANY_ID, 0, hook)
1620 1631
1621#ifdef CONFIG_PCI_QUIRKS 1632#ifdef CONFIG_PCI_QUIRKS
@@ -1661,7 +1672,7 @@ extern u8 pci_cache_line_size;
1661extern unsigned long pci_hotplug_io_size; 1672extern unsigned long pci_hotplug_io_size;
1662extern unsigned long pci_hotplug_mem_size; 1673extern unsigned long pci_hotplug_mem_size;
1663 1674
1664/* Architecture specific versions may override these (weak) */ 1675/* Architecture-specific versions may override these (weak) */
1665int pcibios_add_platform_entries(struct pci_dev *dev); 1676int pcibios_add_platform_entries(struct pci_dev *dev);
1666void pcibios_disable_device(struct pci_dev *dev); 1677void pcibios_disable_device(struct pci_dev *dev);
1667void pcibios_set_master(struct pci_dev *dev); 1678void pcibios_set_master(struct pci_dev *dev);
@@ -1749,11 +1760,11 @@ static inline int pci_pcie_cap(struct pci_dev *dev)
1749 * pci_is_pcie - check if the PCI device is PCI Express capable 1760 * pci_is_pcie - check if the PCI device is PCI Express capable
1750 * @dev: PCI device 1761 * @dev: PCI device
1751 * 1762 *
1752 * Retrun true if the PCI device is PCI Express capable, false otherwise. 1763 * Returns: true if the PCI device is PCI Express capable, false otherwise.
1753 */ 1764 */
1754static inline bool pci_is_pcie(struct pci_dev *dev) 1765static inline bool pci_is_pcie(struct pci_dev *dev)
1755{ 1766{
1756 return !!pci_pcie_cap(dev); 1767 return pci_pcie_cap(dev);
1757} 1768}
1758 1769
1759/** 1770/**
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 430dd963707b..a2e2f1d17e16 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -39,8 +39,8 @@
39 * @hardware_test: Called to run a specified hardware test on the specified 39 * @hardware_test: Called to run a specified hardware test on the specified
40 * slot. 40 * slot.
41 * @get_power_status: Called to get the current power status of a slot. 41 * @get_power_status: Called to get the current power status of a slot.
42 * If this field is NULL, the value passed in the struct hotplug_slot_info 42 * If this field is NULL, the value passed in the struct hotplug_slot_info
43 * will be used when this value is requested by a user. 43 * will be used when this value is requested by a user.
44 * @get_attention_status: Called to get the current attention status of a slot. 44 * @get_attention_status: Called to get the current attention status of a slot.
45 * If this field is NULL, the value passed in the struct hotplug_slot_info 45 * If this field is NULL, the value passed in the struct hotplug_slot_info
46 * will be used when this value is requested by a user. 46 * will be used when this value is requested by a user.
@@ -191,4 +191,3 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
191 191
192void pci_configure_slot(struct pci_dev *dev); 192void pci_configure_slot(struct pci_dev *dev);
193#endif 193#endif
194
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 9572669eea97..4f1089f2cc98 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -23,7 +23,7 @@
23#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) 23#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
24 24
25struct pcie_device { 25struct pcie_device {
26 int irq; /* Service IRQ/MSI/MSI-X Vector */ 26 int irq; /* Service IRQ/MSI/MSI-X Vector */
27 struct pci_dev *port; /* Root/Upstream/Downstream Port */ 27 struct pci_dev *port; /* Root/Upstream/Downstream Port */
28 u32 service; /* Port service this device represents */ 28 u32 service; /* Port service this device represents */
29 void *priv_data; /* Service Private Data */ 29 void *priv_data; /* Service Private Data */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57e890abe1f0..a5fc7d01aad6 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -69,6 +69,7 @@
69 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 69 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
70 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 70 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
71 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 71 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
72 extern __PCPU_ATTRS(sec) __typeof__(type) name; \
72 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 73 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
73 __typeof__(type) name 74 __typeof__(type) name
74#else 75#else
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index c74088ab103b..9e4761caa80c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -375,22 +375,6 @@ do { \
375# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) 375# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
376#endif 376#endif
377 377
378#ifndef this_cpu_xor
379# ifndef this_cpu_xor_1
380# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
381# endif
382# ifndef this_cpu_xor_2
383# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
384# endif
385# ifndef this_cpu_xor_4
386# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
387# endif
388# ifndef this_cpu_xor_8
389# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
390# endif
391# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
392#endif
393
394#define _this_cpu_generic_add_return(pcp, val) \ 378#define _this_cpu_generic_add_return(pcp, val) \
395({ \ 379({ \
396 typeof(pcp) ret__; \ 380 typeof(pcp) ret__; \
@@ -629,22 +613,6 @@ do { \
629# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) 613# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
630#endif 614#endif
631 615
632#ifndef __this_cpu_xor
633# ifndef __this_cpu_xor_1
634# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
635# endif
636# ifndef __this_cpu_xor_2
637# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
638# endif
639# ifndef __this_cpu_xor_4
640# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
641# endif
642# ifndef __this_cpu_xor_8
643# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
644# endif
645# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
646#endif
647
648#define __this_cpu_generic_add_return(pcp, val) \ 616#define __this_cpu_generic_add_return(pcp, val) \
649({ \ 617({ \
650 __this_cpu_add(pcp, val); \ 618 __this_cpu_add(pcp, val); \
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
index 0b23edbee309..1900bd0fa639 100644
--- a/include/linux/percpu_ida.h
+++ b/include/linux/percpu_ida.h
@@ -16,6 +16,8 @@ struct percpu_ida {
16 * percpu_ida_init() 16 * percpu_ida_init()
17 */ 17 */
18 unsigned nr_tags; 18 unsigned nr_tags;
19 unsigned percpu_max_size;
20 unsigned percpu_batch_size;
19 21
20 struct percpu_ida_cpu __percpu *tag_cpu; 22 struct percpu_ida_cpu __percpu *tag_cpu;
21 23
@@ -51,10 +53,29 @@ struct percpu_ida {
51 } ____cacheline_aligned_in_smp; 53 } ____cacheline_aligned_in_smp;
52}; 54};
53 55
56/*
57 * Number of tags we move between the percpu freelist and the global freelist at
58 * a time
59 */
60#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
61/* Max size of percpu freelist, */
62#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
63
54int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); 64int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
55void percpu_ida_free(struct percpu_ida *pool, unsigned tag); 65void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
56 66
57void percpu_ida_destroy(struct percpu_ida *pool); 67void percpu_ida_destroy(struct percpu_ida *pool);
58int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags); 68int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
69 unsigned long max_size, unsigned long batch_size);
70static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
71{
72 return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
73 IDA_DEFAULT_PCPU_BATCH_MOVE);
74}
75
76typedef int (*percpu_ida_cb)(unsigned, void *);
77int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
78 void *data);
59 79
80unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
60#endif /* __PERCPU_IDA_H__ */ 81#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8ba627c1d60..2e069d1288df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -584,6 +584,10 @@ struct perf_sample_data {
584 struct perf_regs_user regs_user; 584 struct perf_regs_user regs_user;
585 u64 stack_user_size; 585 u64 stack_user_size;
586 u64 weight; 586 u64 weight;
587 /*
588 * Transaction flags for abort events:
589 */
590 u64 txn;
587}; 591};
588 592
589static inline void perf_sample_data_init(struct perf_sample_data *data, 593static inline void perf_sample_data_init(struct perf_sample_data *data,
@@ -599,6 +603,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
599 data->stack_user_size = 0; 603 data->stack_user_size = 0;
600 data->weight = 0; 604 data->weight = 0;
601 data->data_src.val = 0; 605 data->data_src.val = 0;
606 data->txn = 0;
602} 607}
603 608
604extern void perf_output_sample(struct perf_output_handle *handle, 609extern void perf_output_sample(struct perf_output_handle *handle,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 64ab823f7b74..48a4dc3cb8cf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -559,6 +559,7 @@ static inline int phy_read_status(struct phy_device *phydev) {
559 return phydev->drv->read_status(phydev); 559 return phydev->drv->read_status(phydev);
560} 560}
561 561
562int genphy_setup_forced(struct phy_device *phydev);
562int genphy_restart_aneg(struct phy_device *phydev); 563int genphy_restart_aneg(struct phy_device *phydev);
563int genphy_config_aneg(struct phy_device *phydev); 564int genphy_config_aneg(struct phy_device *phydev);
564int genphy_update_link(struct phy_device *phydev); 565int genphy_update_link(struct phy_device *phydev);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
new file mode 100644
index 000000000000..6d722695e027
--- /dev/null
+++ b/include/linux/phy/phy.h
@@ -0,0 +1,270 @@
1/*
2 * phy.h -- generic phy header file
3 *
4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __DRIVERS_PHY_H
15#define __DRIVERS_PHY_H
16
17#include <linux/err.h>
18#include <linux/of.h>
19#include <linux/device.h>
20#include <linux/pm_runtime.h>
21
22struct phy;
23
24/**
25 * struct phy_ops - set of function pointers for performing phy operations
26 * @init: operation to be performed for initializing phy
27 * @exit: operation to be performed while exiting
28 * @power_on: powering on the phy
29 * @power_off: powering off the phy
30 * @owner: the module owner containing the ops
31 */
32struct phy_ops {
33 int (*init)(struct phy *phy);
34 int (*exit)(struct phy *phy);
35 int (*power_on)(struct phy *phy);
36 int (*power_off)(struct phy *phy);
37 struct module *owner;
38};
39
40/**
41 * struct phy - represents the phy device
42 * @dev: phy device
43 * @id: id of the phy device
44 * @ops: function pointers for performing phy operations
45 * @init_data: list of PHY consumers (non-dt only)
46 * @mutex: mutex to protect phy_ops
47 * @init_count: used to protect when the PHY is used by multiple consumers
48 * @power_count: used to protect when the PHY is used by multiple consumers
49 */
50struct phy {
51 struct device dev;
52 int id;
53 const struct phy_ops *ops;
54 struct phy_init_data *init_data;
55 struct mutex mutex;
56 int init_count;
57 int power_count;
58};
59
60/**
61 * struct phy_provider - represents the phy provider
62 * @dev: phy provider device
63 * @owner: the module owner having of_xlate
64 * @of_xlate: function pointer to obtain phy instance from phy pointer
65 * @list: to maintain a linked list of PHY providers
66 */
67struct phy_provider {
68 struct device *dev;
69 struct module *owner;
70 struct list_head list;
71 struct phy * (*of_xlate)(struct device *dev,
72 struct of_phandle_args *args);
73};
74
75/**
76 * struct phy_consumer - represents the phy consumer
77 * @dev_name: the device name of the controller that will use this PHY device
78 * @port: name given to the consumer port
79 */
80struct phy_consumer {
81 const char *dev_name;
82 const char *port;
83};
84
85/**
86 * struct phy_init_data - contains the list of PHY consumers
87 * @num_consumers: number of consumers for this PHY device
88 * @consumers: list of PHY consumers
89 */
90struct phy_init_data {
91 unsigned int num_consumers;
92 struct phy_consumer *consumers;
93};
94
95#define PHY_CONSUMER(_dev_name, _port) \
96{ \
97 .dev_name = _dev_name, \
98 .port = _port, \
99}
100
101#define to_phy(dev) (container_of((dev), struct phy, dev))
102
103#define of_phy_provider_register(dev, xlate) \
104 __of_phy_provider_register((dev), THIS_MODULE, (xlate))
105
106#define devm_of_phy_provider_register(dev, xlate) \
107 __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate))
108
109static inline void phy_set_drvdata(struct phy *phy, void *data)
110{
111 dev_set_drvdata(&phy->dev, data);
112}
113
114static inline void *phy_get_drvdata(struct phy *phy)
115{
116 return dev_get_drvdata(&phy->dev);
117}
118
119#if IS_ENABLED(CONFIG_GENERIC_PHY)
120int phy_pm_runtime_get(struct phy *phy);
121int phy_pm_runtime_get_sync(struct phy *phy);
122int phy_pm_runtime_put(struct phy *phy);
123int phy_pm_runtime_put_sync(struct phy *phy);
124void phy_pm_runtime_allow(struct phy *phy);
125void phy_pm_runtime_forbid(struct phy *phy);
126int phy_init(struct phy *phy);
127int phy_exit(struct phy *phy);
128int phy_power_on(struct phy *phy);
129int phy_power_off(struct phy *phy);
130struct phy *phy_get(struct device *dev, const char *string);
131struct phy *devm_phy_get(struct device *dev, const char *string);
132void phy_put(struct phy *phy);
133void devm_phy_put(struct device *dev, struct phy *phy);
134struct phy *of_phy_simple_xlate(struct device *dev,
135 struct of_phandle_args *args);
136struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
137 struct phy_init_data *init_data);
138struct phy *devm_phy_create(struct device *dev,
139 const struct phy_ops *ops, struct phy_init_data *init_data);
140void phy_destroy(struct phy *phy);
141void devm_phy_destroy(struct device *dev, struct phy *phy);
142struct phy_provider *__of_phy_provider_register(struct device *dev,
143 struct module *owner, struct phy * (*of_xlate)(struct device *dev,
144 struct of_phandle_args *args));
145struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
146 struct module *owner, struct phy * (*of_xlate)(struct device *dev,
147 struct of_phandle_args *args));
148void of_phy_provider_unregister(struct phy_provider *phy_provider);
149void devm_of_phy_provider_unregister(struct device *dev,
150 struct phy_provider *phy_provider);
151#else
152static inline int phy_pm_runtime_get(struct phy *phy)
153{
154 return -ENOSYS;
155}
156
157static inline int phy_pm_runtime_get_sync(struct phy *phy)
158{
159 return -ENOSYS;
160}
161
162static inline int phy_pm_runtime_put(struct phy *phy)
163{
164 return -ENOSYS;
165}
166
167static inline int phy_pm_runtime_put_sync(struct phy *phy)
168{
169 return -ENOSYS;
170}
171
172static inline void phy_pm_runtime_allow(struct phy *phy)
173{
174 return;
175}
176
177static inline void phy_pm_runtime_forbid(struct phy *phy)
178{
179 return;
180}
181
182static inline int phy_init(struct phy *phy)
183{
184 return -ENOSYS;
185}
186
187static inline int phy_exit(struct phy *phy)
188{
189 return -ENOSYS;
190}
191
192static inline int phy_power_on(struct phy *phy)
193{
194 return -ENOSYS;
195}
196
197static inline int phy_power_off(struct phy *phy)
198{
199 return -ENOSYS;
200}
201
202static inline struct phy *phy_get(struct device *dev, const char *string)
203{
204 return ERR_PTR(-ENOSYS);
205}
206
207static inline struct phy *devm_phy_get(struct device *dev, const char *string)
208{
209 return ERR_PTR(-ENOSYS);
210}
211
212static inline void phy_put(struct phy *phy)
213{
214}
215
216static inline void devm_phy_put(struct device *dev, struct phy *phy)
217{
218}
219
220static inline struct phy *of_phy_simple_xlate(struct device *dev,
221 struct of_phandle_args *args)
222{
223 return ERR_PTR(-ENOSYS);
224}
225
226static inline struct phy *phy_create(struct device *dev,
227 const struct phy_ops *ops, struct phy_init_data *init_data)
228{
229 return ERR_PTR(-ENOSYS);
230}
231
232static inline struct phy *devm_phy_create(struct device *dev,
233 const struct phy_ops *ops, struct phy_init_data *init_data)
234{
235 return ERR_PTR(-ENOSYS);
236}
237
238static inline void phy_destroy(struct phy *phy)
239{
240}
241
242static inline void devm_phy_destroy(struct device *dev, struct phy *phy)
243{
244}
245
246static inline struct phy_provider *__of_phy_provider_register(
247 struct device *dev, struct module *owner, struct phy * (*of_xlate)(
248 struct device *dev, struct of_phandle_args *args))
249{
250 return ERR_PTR(-ENOSYS);
251}
252
253static inline struct phy_provider *__devm_of_phy_provider_register(struct device
254 *dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev,
255 struct of_phandle_args *args))
256{
257 return ERR_PTR(-ENOSYS);
258}
259
260static inline void of_phy_provider_unregister(struct phy_provider *phy_provider)
261{
262}
263
264static inline void devm_of_phy_provider_unregister(struct device *dev,
265 struct phy_provider *phy_provider)
266{
267}
268#endif
269
270#endif /* __DRIVERS_PHY_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index e2772666f004..7246ef3d4455 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -23,6 +23,7 @@ struct bsd_acct_struct;
23struct pid_namespace { 23struct pid_namespace {
24 struct kref kref; 24 struct kref kref;
25 struct pidmap pidmap[PIDMAP_ENTRIES]; 25 struct pidmap pidmap[PIDMAP_ENTRIES];
26 struct rcu_head rcu;
26 int last_pid; 27 int last_pid;
27 unsigned int nr_hashed; 28 unsigned int nr_hashed;
28 struct task_struct *child_reaper; 29 struct task_struct *child_reaper;
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 5979147d2bda..fefb88663975 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -144,6 +144,9 @@ extern struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname,
144extern struct pinctrl_gpio_range * 144extern struct pinctrl_gpio_range *
145pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev, 145pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
146 unsigned int pin); 146 unsigned int pin);
147extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
148 const char *pin_group, const unsigned **pins,
149 unsigned *num_pins);
147 150
148#ifdef CONFIG_OF 151#ifdef CONFIG_OF
149extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np); 152extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
diff --git a/include/linux/i2c/at24.h b/include/linux/platform_data/at24.h
index 285025a9cdc9..c42aa89d34ee 100644
--- a/include/linux/i2c/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -28,7 +28,7 @@
28 * 28 *
29 * void get_mac_addr(struct memory_accessor *mem_acc, void *context) 29 * void get_mac_addr(struct memory_accessor *mem_acc, void *context)
30 * { 30 * {
31 * u8 *mac_addr = ethernet_pdata->mac_addr; 31 * u8 *mac_addr = ethernet_pdata->mac_addr;
32 * off_t offset = context; 32 * off_t offset = context;
33 * 33 *
34 * // Read MAC addr from EEPROM 34 * // Read MAC addr from EEPROM
diff --git a/include/linux/platform_data/clk-nomadik.h b/include/linux/platform_data/clk-nomadik.h
deleted file mode 100644
index 5713c87b2477..000000000000
--- a/include/linux/platform_data/clk-nomadik.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* Minimal platform data header */
2void nomadik_clk_init(void);
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 9d98f3aaa16c..97baf831e071 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,6 +10,9 @@
10#ifndef __CLK_UX500_H 10#ifndef __CLK_UX500_H
11#define __CLK_UX500_H 11#define __CLK_UX500_H
12 12
13void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
14 u32 clkrst5_base, u32 clkrst6_base);
15
13void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, 16void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
14 u32 clkrst5_base, u32 clkrst6_base); 17 u32 clkrst5_base, u32 clkrst6_base);
15void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, 18void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 8db5ae03b6e3..689a856b86f9 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -84,6 +84,8 @@ struct snd_platform_data {
84 u8 version; 84 u8 version;
85 u8 txnumevt; 85 u8 txnumevt;
86 u8 rxnumevt; 86 u8 rxnumevt;
87 int tx_dma_channel;
88 int rx_dma_channel;
87}; 89};
88 90
89enum { 91enum {
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
new file mode 100644
index 000000000000..89ba1b0c90e4
--- /dev/null
+++ b/include/linux/platform_data/dma-s3c24xx.h
@@ -0,0 +1,46 @@
1/*
2 * S3C24XX DMA handling
3 *
4 * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12/* Helper to encode the source selection constraints for early s3c socs. */
13#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4)
14
15enum s3c24xx_dma_bus {
16 S3C24XX_DMA_APB,
17 S3C24XX_DMA_AHB,
18};
19
20/**
21 * @bus: on which bus does the peripheral reside - AHB or APB.
22 * @handshake: is a handshake with the peripheral necessary
23 * @chansel: channel selection information, depending on variant; reqsel for
24 * s3c2443 and later and channel-selection map for earlier SoCs
25 * see CHANSEL doc in s3c2443-dma.c
26 */
27struct s3c24xx_dma_channel {
28 enum s3c24xx_dma_bus bus;
29 bool handshake;
30 u16 chansel;
31};
32
33/**
34 * struct s3c24xx_dma_platdata - platform specific settings
35 * @num_phy_channels: number of physical channels
36 * @channels: array of virtual channel descriptions
37 * @num_channels: number of virtual channels
38 */
39struct s3c24xx_dma_platdata {
40 int num_phy_channels;
41 struct s3c24xx_dma_channel *channels;
42 int num_channels;
43};
44
45struct dma_chan;
46bool s3c24xx_dma_filter(struct dma_chan *chan, void *param);
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 179fb91bb5f2..f50821cb64be 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -67,10 +67,10 @@ struct edmacc_param {
67#define ITCCHEN BIT(23) 67#define ITCCHEN BIT(23)
68 68
69/*ch_status paramater of callback function possible values*/ 69/*ch_status paramater of callback function possible values*/
70#define DMA_COMPLETE 1 70#define EDMA_DMA_COMPLETE 1
71#define DMA_CC_ERROR 2 71#define EDMA_DMA_CC_ERROR 2
72#define DMA_TC1_ERROR 3 72#define EDMA_DMA_TC1_ERROR 3
73#define DMA_TC2_ERROR 4 73#define EDMA_DMA_TC2_ERROR 4
74 74
75enum address_mode { 75enum address_mode {
76 INCR = 0, 76 INCR = 0,
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
new file mode 100644
index 000000000000..6efd20264585
--- /dev/null
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -0,0 +1,60 @@
1/*
2 * DaVinci GPIO Platform Related Defines
3 *
4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DAVINCI_GPIO_PLATFORM_H
17#define __DAVINCI_GPIO_PLATFORM_H
18
19#include <linux/io.h>
20#include <linux/spinlock.h>
21
22#include <asm-generic/gpio.h>
23
24enum davinci_gpio_type {
25 GPIO_TYPE_TNETV107X = 0,
26};
27
28struct davinci_gpio_platform_data {
29 u32 ngpio;
30 u32 gpio_unbanked;
31 u32 intc_irq_num;
32};
33
34
35struct davinci_gpio_controller {
36 struct gpio_chip chip;
37 int irq_base;
38 /* Serialize access to GPIO registers */
39 spinlock_t lock;
40 void __iomem *regs;
41 void __iomem *set_data;
42 void __iomem *clr_data;
43 void __iomem *in_data;
44 int gpio_unbanked;
45 unsigned gpio_irq;
46};
47
48/*
49 * basic gpio routines
50 */
51#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
52
53/* Convert GPIO signal to GPIO pin number */
54#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
55
56static inline u32 __gpio_mask(unsigned gpio)
57{
58 return 1 << (gpio % 32);
59}
60#endif
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 51a2ff579d60..624ff9edad6f 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -22,6 +22,7 @@
22 22
23struct lp55xx_led_config { 23struct lp55xx_led_config {
24 const char *name; 24 const char *name;
25 const char *default_trigger;
25 u8 chan_nr; 26 u8 chan_nr;
26 u8 led_current; /* mA x10, 0 if led is not connected */ 27 u8 led_current; /* mA x10, 0 if led is not connected */
27 u8 max_current; 28 u8 max_current;
@@ -66,10 +67,8 @@ struct lp55xx_platform_data {
66 /* Clock configuration */ 67 /* Clock configuration */
67 u8 clock_mode; 68 u8 clock_mode;
68 69
69 /* Platform specific functions */ 70 /* optional enable GPIO */
70 int (*setup_resources)(void); 71 int enable_gpio;
71 void (*release_resources)(void);
72 void (*enable)(bool state);
73 72
74 /* Predefined pattern data */ 73 /* Predefined pattern data */
75 struct lp55xx_predef_pattern *patterns; 74 struct lp55xx_predef_pattern *patterns;
diff --git a/include/linux/platform_data/leds-pca9685.h b/include/linux/platform_data/leds-pca9685.h
new file mode 100644
index 000000000000..778e9e4249cc
--- /dev/null
+++ b/include/linux/platform_data/leds-pca9685.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2013 Maximilian Güntner <maximilian.guentner@gmail.com>
3 *
4 * This file is subject to the terms and conditions of version 2 of
5 * the GNU General Public License. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Based on leds-pca963x.h by Peter Meerwald <p.meerwald@bct-electronic.com>
9 *
10 * LED driver for the NXP PCA9685 PWM chip
11 *
12 */
13
14#ifndef __LINUX_PCA9685_H
15#define __LINUX_PCA9685_H
16
17#include <linux/leds.h>
18
19enum pca9685_outdrv {
20 PCA9685_OPEN_DRAIN,
21 PCA9685_TOTEM_POLE,
22};
23
24enum pca9685_inverted {
25 PCA9685_NOT_INVERTED,
26 PCA9685_INVERTED,
27};
28
29struct pca9685_platform_data {
30 struct led_platform_data leds;
31 enum pca9685_outdrv outdrv;
32 enum pca9685_inverted inverted;
33};
34
35#endif /* __LINUX_PCA9685_H */
diff --git a/include/linux/platform_data/lm3630_bl.h b/include/linux/platform_data/lm3630_bl.h
deleted file mode 100644
index 9176dd3f2d63..000000000000
--- a/include/linux/platform_data/lm3630_bl.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2* Simple driver for Texas Instruments LM3630 LED Flash driver chip
3* Copyright (C) 2012 Texas Instruments
4*
5* This program is free software; you can redistribute it and/or modify
6* it under the terms of the GNU General Public License version 2 as
7* published by the Free Software Foundation.
8*
9*/
10
11#ifndef __LINUX_LM3630_H
12#define __LINUX_LM3630_H
13
14#define LM3630_NAME "lm3630_bl"
15
16enum lm3630_pwm_ctrl {
17 PWM_CTRL_DISABLE = 0,
18 PWM_CTRL_BANK_A,
19 PWM_CTRL_BANK_B,
20 PWM_CTRL_BANK_ALL,
21};
22
23enum lm3630_pwm_active {
24 PWM_ACTIVE_HIGH = 0,
25 PWM_ACTIVE_LOW,
26};
27
28enum lm3630_bank_a_ctrl {
29 BANK_A_CTRL_DISABLE = 0x0,
30 BANK_A_CTRL_LED1 = 0x4,
31 BANK_A_CTRL_LED2 = 0x1,
32 BANK_A_CTRL_ALL = 0x5,
33};
34
35enum lm3630_bank_b_ctrl {
36 BANK_B_CTRL_DISABLE = 0,
37 BANK_B_CTRL_LED2,
38};
39
40struct lm3630_platform_data {
41
42 /* maximum brightness */
43 int max_brt_led1;
44 int max_brt_led2;
45
46 /* initial on brightness */
47 int init_brt_led1;
48 int init_brt_led2;
49 enum lm3630_pwm_ctrl pwm_ctrl;
50 enum lm3630_pwm_active pwm_active;
51 enum lm3630_bank_a_ctrl bank_a_ctrl;
52 enum lm3630_bank_b_ctrl bank_b_ctrl;
53 unsigned int pwm_period;
54 void (*pwm_set_intensity) (int brightness, int max_brightness);
55};
56
57#endif /* __LINUX_LM3630_H */
diff --git a/include/linux/platform_data/lm3630a_bl.h b/include/linux/platform_data/lm3630a_bl.h
new file mode 100644
index 000000000000..7538e38e270b
--- /dev/null
+++ b/include/linux/platform_data/lm3630a_bl.h
@@ -0,0 +1,65 @@
1/*
2* Simple driver for Texas Instruments LM3630A LED Flash driver chip
3* Copyright (C) 2012 Texas Instruments
4*
5* This program is free software; you can redistribute it and/or modify
6* it under the terms of the GNU General Public License version 2 as
7* published by the Free Software Foundation.
8*
9*/
10
11#ifndef __LINUX_LM3630A_H
12#define __LINUX_LM3630A_H
13
14#define LM3630A_NAME "lm3630a_bl"
15
16enum lm3630a_pwm_ctrl {
17 LM3630A_PWM_DISABLE = 0x00,
18 LM3630A_PWM_BANK_A,
19 LM3630A_PWM_BANK_B,
20 LM3630A_PWM_BANK_ALL,
21 LM3630A_PWM_BANK_A_ACT_LOW = 0x05,
22 LM3630A_PWM_BANK_B_ACT_LOW,
23 LM3630A_PWM_BANK_ALL_ACT_LOW,
24};
25
26enum lm3630a_leda_ctrl {
27 LM3630A_LEDA_DISABLE = 0x00,
28 LM3630A_LEDA_ENABLE = 0x04,
29 LM3630A_LEDA_ENABLE_LINEAR = 0x14,
30};
31
32enum lm3630a_ledb_ctrl {
33 LM3630A_LEDB_DISABLE = 0x00,
34 LM3630A_LEDB_ON_A = 0x01,
35 LM3630A_LEDB_ENABLE = 0x02,
36 LM3630A_LEDB_ENABLE_LINEAR = 0x0A,
37};
38
39#define LM3630A_MAX_BRIGHTNESS 255
40/*
41 *@leda_init_brt : led a init brightness. 4~255
42 *@leda_max_brt : led a max brightness. 4~255
43 *@leda_ctrl : led a disable, enable linear, enable exponential
44 *@ledb_init_brt : led b init brightness. 4~255
45 *@ledb_max_brt : led b max brightness. 4~255
46 *@ledb_ctrl : led b disable, enable linear, enable exponential
47 *@pwm_period : pwm period
48 *@pwm_ctrl : pwm disable, bank a or b, active high or low
49 */
50struct lm3630a_platform_data {
51
52 /* led a config. */
53 int leda_init_brt;
54 int leda_max_brt;
55 enum lm3630a_leda_ctrl leda_ctrl;
56 /* led b config. */
57 int ledb_init_brt;
58 int ledb_max_brt;
59 enum lm3630a_ledb_ctrl ledb_ctrl;
60 /* pwm config. */
61 unsigned int pwm_period;
62 enum lm3630a_pwm_ctrl pwm_ctrl;
63};
64
65#endif /* __LINUX_LM3630A_H */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index ea3200527dd3..1b2ba24e4e03 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -40,6 +40,17 @@
40#define LP8553_PWM_CONFIG LP8550_PWM_CONFIG 40#define LP8553_PWM_CONFIG LP8550_PWM_CONFIG
41#define LP8553_I2C_CONFIG LP8550_I2C_CONFIG 41#define LP8553_I2C_CONFIG LP8550_I2C_CONFIG
42 42
43/* CONFIG register - LP8555 */
44#define LP8555_PWM_STANDBY BIT(7)
45#define LP8555_PWM_FILTER BIT(6)
46#define LP8555_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset
47 when the backlight turns on */
48#define LP8555_OFF_OPENLEDS BIT(2)
49#define LP8555_PWM_CONFIG LP8555_PWM_ONLY
50#define LP8555_I2C_CONFIG LP8555_I2C_ONLY
51#define LP8555_COMB1_CONFIG LP8555_COMBINED1
52#define LP8555_COMB2_CONFIG LP8555_COMBINED2
53
43/* DEVICE CONTROL register - LP8556 */ 54/* DEVICE CONTROL register - LP8556 */
44#define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT) 55#define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT)
45#define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT) 56#define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT)
@@ -65,6 +76,7 @@ enum lp855x_chip_id {
65 LP8551, 76 LP8551,
66 LP8552, 77 LP8552,
67 LP8553, 78 LP8553,
79 LP8555,
68 LP8556, 80 LP8556,
69 LP8557, 81 LP8557,
70}; 82};
@@ -89,6 +101,13 @@ enum lp8553_brighntess_source {
89 LP8553_I2C_ONLY = LP8550_I2C_ONLY, 101 LP8553_I2C_ONLY = LP8550_I2C_ONLY,
90}; 102};
91 103
104enum lp8555_brightness_source {
105 LP8555_PWM_ONLY,
106 LP8555_I2C_ONLY,
107 LP8555_COMBINED1, /* Brightness register with shaped PWM */
108 LP8555_COMBINED2, /* PWM with shaped brightness register */
109};
110
92enum lp8556_brightness_source { 111enum lp8556_brightness_source {
93 LP8556_PWM_ONLY, 112 LP8556_PWM_ONLY,
94 LP8556_COMBINED1, /* pwm + i2c before the shaper block */ 113 LP8556_COMBINED1, /* pwm + i2c before the shaper block */
diff --git a/include/linux/platform_data/mipi-csis.h b/include/linux/platform_data/mipi-csis.h
index bf34e17cee7f..c2fd9024717c 100644
--- a/include/linux/platform_data/mipi-csis.h
+++ b/include/linux/platform_data/mipi-csis.h
@@ -25,13 +25,4 @@ struct s5p_platform_mipi_csis {
25 u8 hs_settle; 25 u8 hs_settle;
26}; 26};
27 27
28/**
29 * s5p_csis_phy_enable - global MIPI-CSI receiver D-PHY control
30 * @id: MIPI-CSIS harware instance index (0...1)
31 * @on: true to enable D-PHY and deassert its reset
32 * false to disable D-PHY
33 * @return: 0 on success, or negative error code on failure
34 */
35int s5p_csis_phy_enable(int id, bool on);
36
37#endif /* __PLAT_SAMSUNG_MIPI_CSIS_H_ */ 28#endif /* __PLAT_SAMSUNG_MIPI_CSIS_H_ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index d44912d81578..75f70f6ac137 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -10,6 +10,8 @@
10#ifndef __ASM_ARCH_IMX_ESDHC_H 10#ifndef __ASM_ARCH_IMX_ESDHC_H
11#define __ASM_ARCH_IMX_ESDHC_H 11#define __ASM_ARCH_IMX_ESDHC_H
12 12
13#include <linux/types.h>
14
13enum wp_types { 15enum wp_types {
14 ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ 16 ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
15 ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ 17 ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -32,6 +34,7 @@ enum cd_types {
32 * @cd_gpio: gpio for card_detect interrupt 34 * @cd_gpio: gpio for card_detect interrupt
33 * @wp_type: type of write_protect method (see wp_types enum above) 35 * @wp_type: type of write_protect method (see wp_types enum above)
34 * @cd_type: type of card_detect method (see cd_types enum above) 36 * @cd_type: type of card_detect method (see cd_types enum above)
37 * @support_vsel: indicate it supports 1.8v switching
35 */ 38 */
36 39
37struct esdhc_platform_data { 40struct esdhc_platform_data {
@@ -41,5 +44,7 @@ struct esdhc_platform_data {
41 enum cd_types cd_type; 44 enum cd_types cd_type;
42 int max_bus_width; 45 int max_bus_width;
43 unsigned int f_max; 46 unsigned int f_max;
47 bool support_vsel;
48 unsigned int delay_line;
44}; 49};
45#endif /* __ASM_ARCH_IMX_ESDHC_H */ 50#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 6bf9ef43ddb1..4da5bfa2147f 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -23,13 +23,16 @@ enum nand_io {
23}; 23};
24 24
25enum omap_ecc { 25enum omap_ecc {
26 /* 1-bit ecc: stored at end of spare area */ 26 /* 1-bit ECC calculation by GPMC, Error detection by Software */
27 OMAP_ECC_HAMMING_CODE_DEFAULT = 0, /* Default, s/w method */ 27 OMAP_ECC_HAM1_CODE_HW = 0,
28 OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */ 28 /* 4-bit ECC calculation by GPMC, Error detection by Software */
29 /* 1-bit ecc: stored at beginning of spare area as romcode */ 29 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW,
30 OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */ 30 /* 4-bit ECC calculation by GPMC, Error detection by ELM */
31 OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */ 31 OMAP_ECC_BCH4_CODE_HW,
32 OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */ 32 /* 8-bit ECC calculation by GPMC, Error detection by Software */
33 OMAP_ECC_BCH8_CODE_HW_DETECTION_SW,
34 /* 8-bit ECC calculation by GPMC, Error detection by ELM */
35 OMAP_ECC_BCH8_CODE_HW,
33}; 36};
34 37
35struct gpmc_nand_regs { 38struct gpmc_nand_regs {
@@ -63,5 +66,6 @@ struct omap_nand_platform_data {
63 66
64 /* for passing the partitions */ 67 /* for passing the partitions */
65 struct device_node *of_node; 68 struct device_node *of_node;
69 struct device_node *elm_of_node;
66}; 70};
67#endif 71#endif
diff --git a/include/linux/platform_data/pinctrl-adi2.h b/include/linux/platform_data/pinctrl-adi2.h
new file mode 100644
index 000000000000..8f91300617ec
--- /dev/null
+++ b/include/linux/platform_data/pinctrl-adi2.h
@@ -0,0 +1,40 @@
1/*
2 * Pinctrl Driver for ADI GPIO2 controller
3 *
4 * Copyright 2007-2013 Analog Devices Inc.
5 *
6 * Licensed under the GPLv2 or later
7 */
8
9
10#ifndef PINCTRL_ADI2_H
11#define PINCTRL_ADI2_H
12
13#include <linux/io.h>
14#include <linux/platform_device.h>
15
16/**
17 * struct adi_pinctrl_gpio_platform_data - Pinctrl gpio platform data
18 * for ADI GPIO2 device.
19 *
20 * @port_gpio_base: Optional global GPIO index of the GPIO bank.
21 * 0 means driver decides.
22 * @port_pin_base: Pin index of the pin controller device.
23 * @port_width: PIN number of the GPIO bank device
24 * @pint_id: GPIO PINT device id that this GPIO bank should map to.
25 * @pint_assign: The 32-bit GPIO PINT registers can be divided into 2 parts. A
26 * GPIO bank can be mapped into either low 16 bits[0] or high 16
27 * bits[1] of each PINT register.
28 * @pint_map: GIOP bank mapping code in PINT device
29 */
30struct adi_pinctrl_gpio_platform_data {
31 unsigned int port_gpio_base;
32 unsigned int port_pin_base;
33 unsigned int port_width;
34 u8 pinctrl_id;
35 u8 pint_id;
36 bool pint_assign;
37 u8 pint_map;
38};
39
40#endif
diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h
new file mode 100644
index 000000000000..72eacda9b360
--- /dev/null
+++ b/include/linux/platform_data/pinctrl-single.h
@@ -0,0 +1,12 @@
1/**
2 * irq: optional wake-up interrupt
3 * rearm: optional soc specific rearm function
4 *
5 * Note that the irq and rearm setup should come from device
6 * tree except for omap where there are still some dependencies
7 * to the legacy PRM code.
8 */
9struct pcs_pdata {
10 int irq;
11 void (*rearm)(void);
12};
diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
deleted file mode 100644
index 5f28cae18582..000000000000
--- a/include/linux/platform_data/usb-ehci-s5p.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Author: Joonyoung Shim <jy0922.shim@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __PLAT_SAMSUNG_EHCI_H
12#define __PLAT_SAMSUNG_EHCI_H __FILE__
13
14struct s5p_ehci_platdata {
15 int (*phy_init)(struct platform_device *pdev, int type);
16 int (*phy_exit)(struct platform_device *pdev, int type);
17};
18
19extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
20
21#endif /* __PLAT_SAMSUNG_EHCI_H */
diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
deleted file mode 100644
index c256c595be5e..000000000000
--- a/include/linux/platform_data/usb-ohci-exynos.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * http://www.samsung.com/
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MACH_EXYNOS_OHCI_H
12#define __MACH_EXYNOS_OHCI_H
13
14struct exynos4_ohci_platdata {
15 int (*phy_init)(struct platform_device *pdev, int type);
16 int (*phy_exit)(struct platform_device *pdev, int type);
17};
18
19extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
20
21#endif /* __MACH_EXYNOS_OHCI_H */
diff --git a/include/linux/platform_data/usb-rcar-gen2-phy.h b/include/linux/platform_data/usb-rcar-gen2-phy.h
new file mode 100644
index 000000000000..dd3ba46c0d90
--- /dev/null
+++ b/include/linux/platform_data/usb-rcar-gen2-phy.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2013 Renesas Solutions Corp.
3 * Copyright (C) 2013 Cogent Embedded, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __USB_RCAR_GEN2_PHY_H
11#define __USB_RCAR_GEN2_PHY_H
12
13#include <linux/types.h>
14
15struct rcar_gen2_phy_platform_data {
16 /* USB channel 0 configuration */
17 bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */
18 /* USB channel 2 configuration */
19 bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */
20};
21
22#endif
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
new file mode 100644
index 000000000000..0472ab2f6ede
--- /dev/null
+++ b/include/linux/platform_data/zforce_ts.h
@@ -0,0 +1,26 @@
1/* drivers/input/touchscreen/zforce.c
2 *
3 * Copyright (C) 2012-2013 MundoReader S.L.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _LINUX_INPUT_ZFORCE_TS_H
16#define _LINUX_INPUT_ZFORCE_TS_H
17
18struct zforce_ts_platdata {
19 int gpio_int;
20 int gpio_rst;
21
22 unsigned int x_max;
23 unsigned int y_max;
24};
25
26#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index ce8e4ffd78c7..16f6654082dd 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -178,6 +178,7 @@ struct platform_driver {
178 int (*resume)(struct platform_device *); 178 int (*resume)(struct platform_device *);
179 struct device_driver driver; 179 struct device_driver driver;
180 const struct platform_device_id *id_table; 180 const struct platform_device_id *id_table;
181 bool prevent_deferred_probe;
181}; 182};
182 183
183#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ 184#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
new file mode 100644
index 000000000000..5151b0059585
--- /dev/null
+++ b/include/linux/pm_opp.h
@@ -0,0 +1,139 @@
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef __LINUX_OPP_H__
15#define __LINUX_OPP_H__
16
17#include <linux/err.h>
18#include <linux/cpufreq.h>
19#include <linux/notifier.h>
20
21struct dev_pm_opp;
22struct device;
23
24enum dev_pm_opp_event {
25 OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
26};
27
28#if defined(CONFIG_PM_OPP)
29
30unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
31
32unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
33
34int dev_pm_opp_get_opp_count(struct device *dev);
35
36struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
37 unsigned long freq,
38 bool available);
39
40struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
41 unsigned long *freq);
42
43struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
44 unsigned long *freq);
45
46int dev_pm_opp_add(struct device *dev, unsigned long freq,
47 unsigned long u_volt);
48
49int dev_pm_opp_enable(struct device *dev, unsigned long freq);
50
51int dev_pm_opp_disable(struct device *dev, unsigned long freq);
52
53struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
54#else
55static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
56{
57 return 0;
58}
59
60static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
61{
62 return 0;
63}
64
65static inline int dev_pm_opp_get_opp_count(struct device *dev)
66{
67 return 0;
68}
69
70static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
71 unsigned long freq, bool available)
72{
73 return ERR_PTR(-EINVAL);
74}
75
76static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
77 unsigned long *freq)
78{
79 return ERR_PTR(-EINVAL);
80}
81
82static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
83 unsigned long *freq)
84{
85 return ERR_PTR(-EINVAL);
86}
87
88static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
89 unsigned long u_volt)
90{
91 return -EINVAL;
92}
93
94static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
95{
96 return 0;
97}
98
99static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
100{
101 return 0;
102}
103
104static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
105 struct device *dev)
106{
107 return ERR_PTR(-EINVAL);
108}
109#endif /* CONFIG_PM_OPP */
110
111#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
112int of_init_opp_table(struct device *dev);
113#else
114static inline int of_init_opp_table(struct device *dev)
115{
116 return -EINVAL;
117}
118#endif
119
120#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
121int dev_pm_opp_init_cpufreq_table(struct device *dev,
122 struct cpufreq_frequency_table **table);
123void dev_pm_opp_free_cpufreq_table(struct device *dev,
124 struct cpufreq_frequency_table **table);
125#else
126static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
127 struct cpufreq_frequency_table **table)
128{
129 return -EINVAL;
130}
131
132static inline
133void dev_pm_opp_free_cpufreq_table(struct device *dev,
134 struct cpufreq_frequency_table **table)
135{
136}
137#endif /* CONFIG_CPU_FREQ */
138
139#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/irqchip/bcm2835.h b/include/linux/power/bq24735-charger.h
index 48a859bc9dca..f536164a6069 100644
--- a/include/linux/irqchip/bcm2835.h
+++ b/include/linux/power/bq24735-charger.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * Copyright (C) 2010 Broadcom
3 * 2 *
4 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 4 * it under the terms of the GNU General Public License as published by
@@ -16,14 +15,25 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 16 */
18 17
19#ifndef __LINUX_IRQCHIP_BCM2835_H_ 18#ifndef __CHARGER_BQ24735_H_
20#define __LINUX_IRQCHIP_BCM2835_H_ 19#define __CHARGER_BQ24735_H_
21 20
22#include <asm/exception.h> 21#include <linux/types.h>
22#include <linux/power_supply.h>
23 23
24extern void bcm2835_init_irq(void); 24struct bq24735_platform {
25 uint32_t charge_current;
26 uint32_t charge_voltage;
27 uint32_t input_current;
25 28
26extern asmlinkage void __exception_irq_entry bcm2835_handle_irq( 29 const char *name;
27 struct pt_regs *regs);
28 30
29#endif 31 int status_gpio;
32 int status_gpio_active_low;
33 bool status_gpio_valid;
34
35 char **supplied_to;
36 size_t num_supplicants;
37};
38
39#endif /* __CHARGER_BQ24735_H_ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
new file mode 100644
index 000000000000..4e250417ee30
--- /dev/null
+++ b/include/linux/powercap.h
@@ -0,0 +1,325 @@
1/*
2 * powercap.h: Data types and headers for sysfs power capping interface
3 * Copyright (c) 2013, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.
16 *
17 */
18
19#ifndef __POWERCAP_H__
20#define __POWERCAP_H__
21
22#include <linux/device.h>
23#include <linux/idr.h>
24
25/*
26 * A power cap class device can contain multiple powercap control_types.
27 * Each control_type can have multiple power zones, which can be independently
28 * controlled. Each power zone can have one or more constraints.
29 */
30
31struct powercap_control_type;
32struct powercap_zone;
33struct powercap_zone_constraint;
34
35/**
36 * struct powercap_control_type_ops - Define control type callbacks
37 * @set_enable: Enable/Disable whole control type.
38 * Default is enabled. But this callback allows all zones
39 * to be in disable state and remove any applied power
40 * limits. If disabled power zone can only be monitored
41 * not controlled.
42 * @get_enable: get Enable/Disable status.
43 * @release: Callback to inform that last reference to this
44 * control type is closed. So it is safe to free data
45 * structure associated with this control type.
46 * This callback is mandatory if the client own memory
47 * for the control type.
48 *
49 * This structure defines control type callbacks to be implemented by client
50 * drivers
51 */
52struct powercap_control_type_ops {
53 int (*set_enable) (struct powercap_control_type *, bool mode);
54 int (*get_enable) (struct powercap_control_type *, bool *mode);
55 int (*release) (struct powercap_control_type *);
56};
57
58/**
59 * struct powercap_control_type- Defines a powercap control_type
60 * @name: name of control_type
61 * @dev: device for this control_type
62 * @idr: idr to have unique id for its child
63 * @root_node: Root holding power zones for this control_type
64 * @ops: Pointer to callback struct
65 * @node_lock: mutex for control type
66 * @allocated: This is possible that client owns the memory
67 * used by this structure. In this case
68 * this flag is set to false by framework to
69 * prevent deallocation during release process.
70 * Otherwise this flag is set to true.
71 * @ctrl_inst: link to the control_type list
72 *
73 * Defines powercap control_type. This acts as a container for power
74 * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
75 * All fields are private and should not be used by client drivers.
76 */
77struct powercap_control_type {
78 struct device dev;
79 struct idr idr;
80 int nr_zones;
81 const struct powercap_control_type_ops *ops;
82 struct mutex lock;
83 bool allocated;
84 struct list_head node;
85};
86
87/**
88 * struct powercap_zone_ops - Define power zone callbacks
89 * @get_max_energy_range_uj: Get maximum range of energy counter in
90 * micro-joules.
91 * @get_energy_uj: Get current energy counter in micro-joules.
92 * @reset_energy_uj: Reset micro-joules energy counter.
93 * @get_max_power_range_uw: Get maximum range of power counter in
94 * micro-watts.
95 * @get_power_uw: Get current power counter in micro-watts.
96 * @set_enable: Enable/Disable power zone controls.
97 * Default is enabled.
98 * @get_enable: get Enable/Disable status.
99 * @release: Callback to inform that last reference to this
100 * control type is closed. So it is safe to free
101 * data structure associated with this
102 * control type. Mandatory, if client driver owns
103 * the power_zone memory.
104 *
105 * This structure defines zone callbacks to be implemented by client drivers.
106 * Client drives can define both energy and power related callbacks. But at
107 * the least one type (either power or energy) is mandatory. Client drivers
108 * should handle mutual exclusion, if required in callbacks.
109 */
110struct powercap_zone_ops {
111 int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *);
112 int (*get_energy_uj) (struct powercap_zone *, u64 *);
113 int (*reset_energy_uj) (struct powercap_zone *);
114 int (*get_max_power_range_uw) (struct powercap_zone *, u64 *);
115 int (*get_power_uw) (struct powercap_zone *, u64 *);
116 int (*set_enable) (struct powercap_zone *, bool mode);
117 int (*get_enable) (struct powercap_zone *, bool *mode);
118 int (*release) (struct powercap_zone *);
119};
120
121#define POWERCAP_ZONE_MAX_ATTRS 6
122#define POWERCAP_CONSTRAINTS_ATTRS 8
123#define MAX_CONSTRAINTS_PER_ZONE 10
124/**
125 * struct powercap_zone- Defines instance of a power cap zone
126 * @id: Unique id
127 * @name: Power zone name.
128 * @control_type_inst: Control type instance for this zone.
129 * @ops: Pointer to the zone operation structure.
130 * @dev: Instance of a device.
131 * @const_id_cnt: Number of constraint defined.
132 * @idr: Instance to an idr entry for children zones.
133 * @parent_idr: To remove reference from the parent idr.
134 * @private_data: Private data pointer if any for this zone.
135 * @zone_dev_attrs: Attributes associated with this device.
136 * @zone_attr_count: Attribute count.
137 * @dev_zone_attr_group: Attribute group for attributes.
138 * @dev_attr_groups: Attribute group store to register with device.
139 * @allocated: This is possible that client owns the memory
140 * used by this structure. In this case
141 * this flag is set to false by framework to
142 * prevent deallocation during release process.
143 * Otherwise this flag is set to true.
144 * @constraint_ptr: List of constraints for this zone.
145 *
146 * This defines a power zone instance. The fields of this structure are
147 * private, and should not be used by client drivers.
148 */
149struct powercap_zone {
150 int id;
151 char *name;
152 void *control_type_inst;
153 const struct powercap_zone_ops *ops;
154 struct device dev;
155 int const_id_cnt;
156 struct idr idr;
157 struct idr *parent_idr;
158 void *private_data;
159 struct attribute **zone_dev_attrs;
160 int zone_attr_count;
161 struct attribute_group dev_zone_attr_group;
162 const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */
163 bool allocated;
164 struct powercap_zone_constraint *constraints;
165};
166
167/**
168 * struct powercap_zone_constraint_ops - Define constraint callbacks
169 * @set_power_limit_uw: Set power limit in micro-watts.
170 * @get_power_limit_uw: Get power limit in micro-watts.
171 * @set_time_window_us: Set time window in micro-seconds.
172 * @get_time_window_us: Get time window in micro-seconds.
173 * @get_max_power_uw: Get max power allowed in micro-watts.
174 * @get_min_power_uw: Get min power allowed in micro-watts.
175 * @get_max_time_window_us: Get max time window allowed in micro-seconds.
176 * @get_min_time_window_us: Get min time window allowed in micro-seconds.
177 * @get_name: Get the name of constraint
178 *
179 * This structure is used to define the constraint callbacks for the client
180 * drivers. The following callbacks are mandatory and can't be NULL:
181 * set_power_limit_uw
182 * get_power_limit_uw
183 * set_time_window_us
184 * get_time_window_us
185 * get_name
186 * Client drivers should handle mutual exclusion, if required in callbacks.
187 */
188struct powercap_zone_constraint_ops {
189 int (*set_power_limit_uw) (struct powercap_zone *, int, u64);
190 int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *);
191 int (*set_time_window_us) (struct powercap_zone *, int, u64);
192 int (*get_time_window_us) (struct powercap_zone *, int, u64 *);
193 int (*get_max_power_uw) (struct powercap_zone *, int, u64 *);
194 int (*get_min_power_uw) (struct powercap_zone *, int, u64 *);
195 int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *);
196 int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *);
197 const char *(*get_name) (struct powercap_zone *, int);
198};
199
200/**
201 * struct powercap_zone_constraint- Defines instance of a constraint
202 * @id: Instance Id of this constraint.
203 * @power_zone: Pointer to the power zone for this constraint.
204 * @ops: Pointer to the constraint callbacks.
205 *
206 * This defines a constraint instance.
207 */
208struct powercap_zone_constraint {
209 int id;
210 struct powercap_zone *power_zone;
211 struct powercap_zone_constraint_ops *ops;
212};
213
214
215/* For clients to get their device pointer, may be used for dev_dbgs */
216#define POWERCAP_GET_DEV(power_zone) (&power_zone->dev)
217
218/**
219* powercap_set_zone_data() - Set private data for a zone
220* @power_zone: A pointer to the valid zone instance.
221* @pdata: A pointer to the user private data.
222*
223* Allows client drivers to associate some private data to zone instance.
224*/
225static inline void powercap_set_zone_data(struct powercap_zone *power_zone,
226 void *pdata)
227{
228 if (power_zone)
229 power_zone->private_data = pdata;
230}
231
232/**
233* powercap_get_zone_data() - Get private data for a zone
234* @power_zone: A pointer to the valid zone instance.
235*
236* Allows client drivers to get private data associate with a zone,
237* using call to powercap_set_zone_data.
238*/
239static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
240{
241 if (power_zone)
242 return power_zone->private_data;
243 return NULL;
244}
245
246/**
247* powercap_register_control_type() - Register a control_type with framework
248* @control_type: Pointer to client allocated memory for the control type
249* structure storage. If this is NULL, powercap framework
250* will allocate memory and own it.
251* Advantage of this parameter is that client can embed
252* this data in its data structures and allocate in a
253* single call, preventing multiple allocations.
254* @control_type_name: The Name of this control_type, which will be shown
255* in the sysfs Interface.
256* @ops: Callbacks for control type. This parameter is optional.
257*
258* Used to create a control_type with the power capping class. Here control_type
259* can represent a type of technology, which can control a range of power zones.
260* For example a control_type can be RAPL (Running Average Power Limit)
261* Intel® 64 and IA-32 Processor Architectures. The name can be any string
262* which must be unique, otherwise this function returns NULL.
263* A pointer to the control_type instance is returned on success.
264*/
265struct powercap_control_type *powercap_register_control_type(
266 struct powercap_control_type *control_type,
267 const char *name,
268 const struct powercap_control_type_ops *ops);
269
270/**
271* powercap_unregister_control_type() - Unregister a control_type from framework
272* @instance: A pointer to the valid control_type instance.
273*
274* Used to unregister a control_type with the power capping class.
275* All power zones registered under this control type have to be unregistered
276* before calling this function, or it will fail with an error code.
277*/
278int powercap_unregister_control_type(struct powercap_control_type *instance);
279
280/* Zone register/unregister API */
281
282/**
283* powercap_register_zone() - Register a power zone
284* @power_zone: Pointer to client allocated memory for the power zone structure
285* storage. If this is NULL, powercap framework will allocate
286* memory and own it. Advantage of this parameter is that client
287* can embed this data in its data structures and allocate in a
288* single call, preventing multiple allocations.
289* @control_type: A control_type instance under which this zone operates.
290* @name: A name for this zone.
291* @parent: A pointer to the parent power zone instance if any or NULL
292* @ops: Pointer to zone operation callback structure.
293* @no_constraints: Number of constraints for this zone
294* @const_ops: Pointer to constraint callback structure
295*
296* Register a power zone under a given control type. A power zone must register
297* a pointer to a structure representing zone callbacks.
298* A power zone can be located under a parent power zone, in which case @parent
299* should point to it. Otherwise, if @parent is NULL, the new power zone will
300* be located directly under the given control type
301* For each power zone there may be a number of constraints that appear in the
302* sysfs under that zone as attributes with unique numeric IDs.
303* Returns pointer to the power_zone on success.
304*/
305struct powercap_zone *powercap_register_zone(
306 struct powercap_zone *power_zone,
307 struct powercap_control_type *control_type,
308 const char *name,
309 struct powercap_zone *parent,
310 const struct powercap_zone_ops *ops,
311 int nr_constraints,
312 struct powercap_zone_constraint_ops *const_ops);
313
314/**
315* powercap_unregister_zone() - Unregister a zone device
316* @control_type: A pointer to the valid instance of a control_type.
317* @power_zone: A pointer to the valid zone instance for a control_type
318*
319* Used to unregister a zone device for a control_type. Caller should
320* make sure that children for this zone are unregistered first.
321*/
322int powercap_unregister_zone(struct powercap_control_type *control_type,
323 struct powercap_zone *power_zone);
324
325#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index f5d4723cdb3d..a3d9dc8c2c00 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -6,106 +6,95 @@
6 * preempt_count (used for kernel preemption, interrupt count, etc.) 6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */ 7 */
8 8
9#include <linux/thread_info.h>
10#include <linux/linkage.h> 9#include <linux/linkage.h>
11#include <linux/list.h> 10#include <linux/list.h>
12 11
13#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 12/*
14 extern void add_preempt_count(int val); 13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
15 extern void sub_preempt_count(int val); 14 * the other bits -- can't include that header due to inclusion hell.
16#else 15 */
17# define add_preempt_count(val) do { preempt_count() += (val); } while (0) 16#define PREEMPT_NEED_RESCHED 0x80000000
18# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
19#endif
20
21#define inc_preempt_count() add_preempt_count(1)
22#define dec_preempt_count() sub_preempt_count(1)
23
24#define preempt_count() (current_thread_info()->preempt_count)
25
26#ifdef CONFIG_PREEMPT
27
28asmlinkage void preempt_schedule(void);
29
30#define preempt_check_resched() \
31do { \
32 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
33 preempt_schedule(); \
34} while (0)
35
36#ifdef CONFIG_CONTEXT_TRACKING
37 17
38void preempt_schedule_context(void); 18#include <asm/preempt.h>
39 19
40#define preempt_check_resched_context() \ 20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
41do { \ 21extern void preempt_count_add(int val);
42 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ 22extern void preempt_count_sub(int val);
43 preempt_schedule_context(); \ 23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
44} while (0)
45#else 24#else
25#define preempt_count_add(val) __preempt_count_add(val)
26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
28#endif
46 29
47#define preempt_check_resched_context() preempt_check_resched() 30#define __preempt_count_inc() __preempt_count_add(1)
48 31#define __preempt_count_dec() __preempt_count_sub(1)
49#endif /* CONFIG_CONTEXT_TRACKING */
50
51#else /* !CONFIG_PREEMPT */
52
53#define preempt_check_resched() do { } while (0)
54#define preempt_check_resched_context() do { } while (0)
55
56#endif /* CONFIG_PREEMPT */
57 32
33#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
58 35
59#ifdef CONFIG_PREEMPT_COUNT 36#ifdef CONFIG_PREEMPT_COUNT
60 37
61#define preempt_disable() \ 38#define preempt_disable() \
62do { \ 39do { \
63 inc_preempt_count(); \ 40 preempt_count_inc(); \
64 barrier(); \ 41 barrier(); \
65} while (0) 42} while (0)
66 43
67#define sched_preempt_enable_no_resched() \ 44#define sched_preempt_enable_no_resched() \
68do { \ 45do { \
69 barrier(); \ 46 barrier(); \
70 dec_preempt_count(); \ 47 preempt_count_dec(); \
71} while (0) 48} while (0)
72 49
73#define preempt_enable_no_resched() sched_preempt_enable_no_resched() 50#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
74 51
52#ifdef CONFIG_PREEMPT
75#define preempt_enable() \ 53#define preempt_enable() \
76do { \ 54do { \
77 preempt_enable_no_resched(); \
78 barrier(); \ 55 barrier(); \
79 preempt_check_resched(); \ 56 if (unlikely(preempt_count_dec_and_test())) \
57 __preempt_schedule(); \
58} while (0)
59
60#define preempt_check_resched() \
61do { \
62 if (should_resched()) \
63 __preempt_schedule(); \
80} while (0) 64} while (0)
81 65
82/* For debugging and tracer internals only! */ 66#else
83#define add_preempt_count_notrace(val) \ 67#define preempt_enable() preempt_enable_no_resched()
84 do { preempt_count() += (val); } while (0) 68#define preempt_check_resched() do { } while (0)
85#define sub_preempt_count_notrace(val) \ 69#endif
86 do { preempt_count() -= (val); } while (0)
87#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
88#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
89 70
90#define preempt_disable_notrace() \ 71#define preempt_disable_notrace() \
91do { \ 72do { \
92 inc_preempt_count_notrace(); \ 73 __preempt_count_inc(); \
93 barrier(); \ 74 barrier(); \
94} while (0) 75} while (0)
95 76
96#define preempt_enable_no_resched_notrace() \ 77#define preempt_enable_no_resched_notrace() \
97do { \ 78do { \
98 barrier(); \ 79 barrier(); \
99 dec_preempt_count_notrace(); \ 80 __preempt_count_dec(); \
100} while (0) 81} while (0)
101 82
102/* preempt_check_resched is OK to trace */ 83#ifdef CONFIG_PREEMPT
84
85#ifndef CONFIG_CONTEXT_TRACKING
86#define __preempt_schedule_context() __preempt_schedule()
87#endif
88
103#define preempt_enable_notrace() \ 89#define preempt_enable_notrace() \
104do { \ 90do { \
105 preempt_enable_no_resched_notrace(); \
106 barrier(); \ 91 barrier(); \
107 preempt_check_resched_context(); \ 92 if (unlikely(__preempt_count_dec_and_test())) \
93 __preempt_schedule_context(); \
108} while (0) 94} while (0)
95#else
96#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
97#endif
109 98
110#else /* !CONFIG_PREEMPT_COUNT */ 99#else /* !CONFIG_PREEMPT_COUNT */
111 100
@@ -115,10 +104,11 @@ do { \
115 * that can cause faults and scheduling migrate into our preempt-protected 104 * that can cause faults and scheduling migrate into our preempt-protected
116 * region. 105 * region.
117 */ 106 */
118#define preempt_disable() barrier() 107#define preempt_disable() barrier()
119#define sched_preempt_enable_no_resched() barrier() 108#define sched_preempt_enable_no_resched() barrier()
120#define preempt_enable_no_resched() barrier() 109#define preempt_enable_no_resched() barrier()
121#define preempt_enable() barrier() 110#define preempt_enable() barrier()
111#define preempt_check_resched() do { } while (0)
122 112
123#define preempt_disable_notrace() barrier() 113#define preempt_disable_notrace() barrier()
124#define preempt_enable_no_resched_notrace() barrier() 114#define preempt_enable_no_resched_notrace() barrier()
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index 931bc616219f..d169820203dd 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -11,36 +11,23 @@
11 * - bits 0-7 are the preemption count (max preemption depth: 256) 11 * - bits 0-7 are the preemption count (max preemption depth: 256)
12 * - bits 8-15 are the softirq count (max # of softirqs: 256) 12 * - bits 8-15 are the softirq count (max # of softirqs: 256)
13 * 13 *
14 * The hardirq count can in theory reach the same as NR_IRQS. 14 * The hardirq count could in theory be the same as the number of
15 * In reality, the number of nested IRQS is limited to the stack 15 * interrupts in the system, but we run all interrupt handlers with
16 * size as well. For archs with over 1000 IRQS it is not practical 16 * interrupts disabled, so we cannot have nesting interrupts. Though
17 * to expect that they will all nest. We give a max of 10 bits for 17 * there are a few palaeontologic drivers which reenable interrupts in
18 * hardirq nesting. An arch may choose to give less than 10 bits. 18 * the handler, so we need more than one bit here.
19 * m68k expects it to be 8.
20 * 19 *
21 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) 20 * PREEMPT_MASK: 0x000000ff
22 * - bit 26 is the NMI_MASK 21 * SOFTIRQ_MASK: 0x0000ff00
23 * - bit 27 is the PREEMPT_ACTIVE flag 22 * HARDIRQ_MASK: 0x000f0000
24 * 23 * NMI_MASK: 0x00100000
25 * PREEMPT_MASK: 0x000000ff 24 * PREEMPT_ACTIVE: 0x00200000
26 * SOFTIRQ_MASK: 0x0000ff00
27 * HARDIRQ_MASK: 0x03ff0000
28 * NMI_MASK: 0x04000000
29 */ 25 */
30#define PREEMPT_BITS 8 26#define PREEMPT_BITS 8
31#define SOFTIRQ_BITS 8 27#define SOFTIRQ_BITS 8
28#define HARDIRQ_BITS 4
32#define NMI_BITS 1 29#define NMI_BITS 1
33 30
34#define MAX_HARDIRQ_BITS 10
35
36#ifndef HARDIRQ_BITS
37# define HARDIRQ_BITS MAX_HARDIRQ_BITS
38#endif
39
40#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
41#error HARDIRQ_BITS too high!
42#endif
43
44#define PREEMPT_SHIFT 0 31#define PREEMPT_SHIFT 0
45#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 32#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
46#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 33#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
@@ -60,15 +47,9 @@
60 47
61#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) 48#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
62 49
63#ifndef PREEMPT_ACTIVE
64#define PREEMPT_ACTIVE_BITS 1 50#define PREEMPT_ACTIVE_BITS 1
65#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) 51#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
66#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) 52#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
67#endif
68
69#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
70#error PREEMPT_ACTIVE is too low!
71#endif
72 53
73#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 54#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
74#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 55#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
diff --git a/include/linux/printk.h b/include/linux/printk.h
index e6131a782481..694925837a16 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -233,6 +233,8 @@ extern asmlinkage void dump_stack(void) __cold;
233 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) 233 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
234#endif 234#endif
235 235
236#include <linux/dynamic_debug.h>
237
236/* If you are writing a driver, please use dev_dbg instead */ 238/* If you are writing a driver, please use dev_dbg instead */
237#if defined(CONFIG_DYNAMIC_DEBUG) 239#if defined(CONFIG_DYNAMIC_DEBUG)
238/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ 240/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
@@ -343,7 +345,19 @@ extern asmlinkage void dump_stack(void) __cold;
343#endif 345#endif
344 346
345/* If you are writing a driver, please use dev_dbg instead */ 347/* If you are writing a driver, please use dev_dbg instead */
346#if defined(DEBUG) 348#if defined(CONFIG_DYNAMIC_DEBUG)
349/* descriptor check is first to prevent flooding with "callbacks suppressed" */
350#define pr_debug_ratelimited(fmt, ...) \
351do { \
352 static DEFINE_RATELIMIT_STATE(_rs, \
353 DEFAULT_RATELIMIT_INTERVAL, \
354 DEFAULT_RATELIMIT_BURST); \
355 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
356 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
357 __ratelimit(&_rs)) \
358 __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
359} while (0)
360#elif defined(DEBUG)
347#define pr_debug_ratelimited(fmt, ...) \ 361#define pr_debug_ratelimited(fmt, ...) \
348 printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) 362 printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
349#else 363#else
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
51 char *buf; 51 char *buf;
52 size_t bufsize; 52 size_t bufsize;
53 struct mutex read_mutex; /* serialize open/read/close */ 53 struct mutex read_mutex; /* serialize open/read/close */
54 int flags;
54 int (*open)(struct pstore_info *psi); 55 int (*open)(struct pstore_info *psi);
55 int (*close)(struct pstore_info *psi); 56 int (*close)(struct pstore_info *psi);
56 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 57 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
70 void *data; 71 void *data;
71}; 72};
72 73
74#define PSTORE_FLAGS_FRAGILE 1
75
73#ifdef CONFIG_PSTORE 76#ifdef CONFIG_PSTORE
74extern int pstore_register(struct pstore_info *); 77extern int pstore_register(struct pstore_info *);
75extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); 78extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 56f4a866539a..2de2e275b2cb 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -6,6 +6,9 @@
6 6
7#include <linux/backlight.h> 7#include <linux/backlight.h>
8 8
9/* TODO: convert to gpiod_*() API once it has been merged */
10#define PWM_BACKLIGHT_GPIO_ACTIVE_LOW (1 << 0)
11
9struct platform_pwm_backlight_data { 12struct platform_pwm_backlight_data {
10 int pwm_id; 13 int pwm_id;
11 unsigned int max_brightness; 14 unsigned int max_brightness;
@@ -13,6 +16,8 @@ struct platform_pwm_backlight_data {
13 unsigned int lth_brightness; 16 unsigned int lth_brightness;
14 unsigned int pwm_period_ns; 17 unsigned int pwm_period_ns;
15 unsigned int *levels; 18 unsigned int *levels;
19 int enable_gpio;
20 unsigned long enable_gpio_flags;
16 int (*init)(struct device *dev); 21 int (*init)(struct device *dev);
17 int (*notify)(struct device *dev, int brightness); 22 int (*notify)(struct device *dev, int brightness);
18 void (*notify_after)(struct device *dev, int brightness); 23 void (*notify_after)(struct device *dev, int brightness);
diff --git a/include/linux/random.h b/include/linux/random.h
index 6312dd9ba449..4002b3df4c85 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -29,8 +29,13 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
29u32 prandom_u32(void); 29u32 prandom_u32(void);
30void prandom_bytes(void *buf, int nbytes); 30void prandom_bytes(void *buf, int nbytes);
31void prandom_seed(u32 seed); 31void prandom_seed(u32 seed);
32void prandom_reseed_late(void);
32 33
33u32 prandom_u32_state(struct rnd_state *); 34struct rnd_state {
35 __u32 s1, s2, s3, s4;
36};
37
38u32 prandom_u32_state(struct rnd_state *state);
34void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); 39void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
35 40
36/* 41/*
@@ -50,9 +55,10 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
50{ 55{
51 u32 i = (seed >> 32) ^ (seed << 10) ^ seed; 56 u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
52 57
53 state->s1 = __seed(i, 1); 58 state->s1 = __seed(i, 2U);
54 state->s2 = __seed(i, 7); 59 state->s2 = __seed(i, 8U);
55 state->s3 = __seed(i, 15); 60 state->s3 = __seed(i, 16U);
61 state->s4 = __seed(i, 128U);
56} 62}
57 63
58#ifdef CONFIG_ARCH_RANDOM 64#ifdef CONFIG_ARCH_RANDOM
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index aa870a4ddf54..57e75ae9910f 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -85,6 +85,11 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
85 *rb_link = node; 85 *rb_link = node;
86} 86}
87 87
88#define rb_entry_safe(ptr, type, member) \
89 ({ typeof(ptr) ____ptr = (ptr); \
90 ____ptr ? rb_entry(____ptr, type, member) : NULL; \
91 })
92
88/** 93/**
89 * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of 94 * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
90 * given type safe against removal of rb_node entry 95 * given type safe against removal of rb_node entry
@@ -95,12 +100,9 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
95 * @field: the name of the rb_node field within 'type'. 100 * @field: the name of the rb_node field within 'type'.
96 */ 101 */
97#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ 102#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
98 for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\ 103 for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
99 n = rb_entry(rb_next_postorder(&pos->field), \ 104 pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
100 typeof(*pos), field); \ 105 typeof(*pos), field); 1; }); \
101 &pos->field; \ 106 pos = n)
102 pos = n, \
103 n = rb_entry(rb_next_postorder(&pos->field), \
104 typeof(*pos), field))
105 107
106#endif /* _LINUX_RBTREE_H */ 108#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4106721c4e5e..45a0a9e81478 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -19,6 +19,21 @@
19 */ 19 */
20 20
21/* 21/*
22 * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
23 * @list: list to be initialized
24 *
25 * You should instead use INIT_LIST_HEAD() for normal initialization and
26 * cleanup tasks, when readers have no access to the list being initialized.
27 * However, if the list being initialized is visible to readers, you
28 * need to keep the compiler from being too mischievous.
29 */
30static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
31{
32 ACCESS_ONCE(list->next) = list;
33 ACCESS_ONCE(list->prev) = list;
34}
35
36/*
22 * return the ->next pointer of a list_head in an rcu safe 37 * return the ->next pointer of a list_head in an rcu safe
23 * way, we must not access it directly 38 * way, we must not access it directly
24 */ 39 */
@@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list,
191 if (list_empty(list)) 206 if (list_empty(list))
192 return; 207 return;
193 208
194 /* "first" and "last" tracking list, so initialize it. */ 209 /*
210 * "first" and "last" tracking list, so initialize it. RCU readers
211 * have access to this list, so we must use INIT_LIST_HEAD_RCU()
212 * instead of INIT_LIST_HEAD().
213 */
195 214
196 INIT_LIST_HEAD(list); 215 INIT_LIST_HEAD_RCU(list);
197 216
198 /* 217 /*
199 * At this point, the list body still points to the source list. 218 * At this point, the list body still points to the source list.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f1f1bc39346b..39cbb889e20d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
261 rcu_irq_exit(); \ 261 rcu_irq_exit(); \
262 } while (0) 262 } while (0)
263 263
264#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
265extern bool __rcu_is_watching(void);
266#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
267
264/* 268/*
265 * Infrastructure to implement the synchronize_() primitives in 269 * Infrastructure to implement the synchronize_() primitives in
266 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 270 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
297} 301}
298#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 302#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
299 303
300#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
301extern int rcu_is_cpu_idle(void);
302#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
303
304#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) 304#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
305bool rcu_lockdep_current_cpu_online(void); 305bool rcu_lockdep_current_cpu_online(void);
306#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 306#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void)
351{ 351{
352 if (!debug_lockdep_rcu_enabled()) 352 if (!debug_lockdep_rcu_enabled())
353 return 1; 353 return 1;
354 if (rcu_is_cpu_idle()) 354 if (!rcu_is_watching())
355 return 0; 355 return 0;
356 if (!rcu_lockdep_current_cpu_online()) 356 if (!rcu_lockdep_current_cpu_online())
357 return 0; 357 return 0;
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void)
402 402
403 if (!debug_lockdep_rcu_enabled()) 403 if (!debug_lockdep_rcu_enabled())
404 return 1; 404 return 1;
405 if (rcu_is_cpu_idle()) 405 if (!rcu_is_watching())
406 return 0; 406 return 0;
407 if (!rcu_lockdep_current_cpu_online()) 407 if (!rcu_lockdep_current_cpu_online())
408 return 0; 408 return 0;
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void)
771 __rcu_read_lock(); 771 __rcu_read_lock();
772 __acquire(RCU); 772 __acquire(RCU);
773 rcu_lock_acquire(&rcu_lock_map); 773 rcu_lock_acquire(&rcu_lock_map);
774 rcu_lockdep_assert(!rcu_is_cpu_idle(), 774 rcu_lockdep_assert(rcu_is_watching(),
775 "rcu_read_lock() used illegally while idle"); 775 "rcu_read_lock() used illegally while idle");
776} 776}
777 777
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void)
792 */ 792 */
793static inline void rcu_read_unlock(void) 793static inline void rcu_read_unlock(void)
794{ 794{
795 rcu_lockdep_assert(!rcu_is_cpu_idle(), 795 rcu_lockdep_assert(rcu_is_watching(),
796 "rcu_read_unlock() used illegally while idle"); 796 "rcu_read_unlock() used illegally while idle");
797 rcu_lock_release(&rcu_lock_map); 797 rcu_lock_release(&rcu_lock_map);
798 __release(RCU); 798 __release(RCU);
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void)
821 local_bh_disable(); 821 local_bh_disable();
822 __acquire(RCU_BH); 822 __acquire(RCU_BH);
823 rcu_lock_acquire(&rcu_bh_lock_map); 823 rcu_lock_acquire(&rcu_bh_lock_map);
824 rcu_lockdep_assert(!rcu_is_cpu_idle(), 824 rcu_lockdep_assert(rcu_is_watching(),
825 "rcu_read_lock_bh() used illegally while idle"); 825 "rcu_read_lock_bh() used illegally while idle");
826} 826}
827 827
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void)
832 */ 832 */
833static inline void rcu_read_unlock_bh(void) 833static inline void rcu_read_unlock_bh(void)
834{ 834{
835 rcu_lockdep_assert(!rcu_is_cpu_idle(), 835 rcu_lockdep_assert(rcu_is_watching(),
836 "rcu_read_unlock_bh() used illegally while idle"); 836 "rcu_read_unlock_bh() used illegally while idle");
837 rcu_lock_release(&rcu_bh_lock_map); 837 rcu_lock_release(&rcu_bh_lock_map);
838 __release(RCU_BH); 838 __release(RCU_BH);
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void)
857 preempt_disable(); 857 preempt_disable();
858 __acquire(RCU_SCHED); 858 __acquire(RCU_SCHED);
859 rcu_lock_acquire(&rcu_sched_lock_map); 859 rcu_lock_acquire(&rcu_sched_lock_map);
860 rcu_lockdep_assert(!rcu_is_cpu_idle(), 860 rcu_lockdep_assert(rcu_is_watching(),
861 "rcu_read_lock_sched() used illegally while idle"); 861 "rcu_read_lock_sched() used illegally while idle");
862} 862}
863 863
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
875 */ 875 */
876static inline void rcu_read_unlock_sched(void) 876static inline void rcu_read_unlock_sched(void)
877{ 877{
878 rcu_lockdep_assert(!rcu_is_cpu_idle(), 878 rcu_lockdep_assert(rcu_is_watching(),
879 "rcu_read_unlock_sched() used illegally while idle"); 879 "rcu_read_unlock_sched() used illegally while idle");
880 rcu_lock_release(&rcu_sched_lock_map); 880 rcu_lock_release(&rcu_sched_lock_map);
881 __release(RCU_SCHED); 881 __release(RCU_SCHED);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e31005ee339e..09ebcbe9fd78 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void)
132} 132}
133#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 133#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
134 134
135#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
136
137static inline bool rcu_is_watching(void)
138{
139 return __rcu_is_watching();
140}
141
142#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
143
144static inline bool rcu_is_watching(void)
145{
146 return true;
147}
148
149
150#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
151
135#endif /* __LINUX_RCUTINY_H */ 152#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 226169d1bd2b..4b9c81548742 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -90,4 +90,6 @@ extern void exit_rcu(void);
90extern void rcu_scheduler_starting(void); 90extern void rcu_scheduler_starting(void);
91extern int rcu_scheduler_active __read_mostly; 91extern int rcu_scheduler_active __read_mostly;
92 92
93extern bool rcu_is_watching(void);
94
93#endif /* __LINUX_RCUTREE_H */ 95#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
43 * Architecture-specific implementations of sys_reboot commands. 43 * Architecture-specific implementations of sys_reboot commands.
44 */ 44 */
45 45
46extern void migrate_to_reboot_cpu(void);
46extern void machine_restart(char *cmd); 47extern void machine_restart(char *cmd);
47extern void machine_halt(void); 48extern void machine_halt(void);
48extern void machine_power_off(void); 49extern void machine_power_off(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a10380bfbeac..e55907804d39 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -23,6 +23,7 @@ struct device;
23struct i2c_client; 23struct i2c_client;
24struct irq_domain; 24struct irq_domain;
25struct spi_device; 25struct spi_device;
26struct spmi_device;
26struct regmap; 27struct regmap;
27struct regmap_range_cfg; 28struct regmap_range_cfg;
28struct regmap_field; 29struct regmap_field;
@@ -70,6 +71,8 @@ struct regmap_range {
70 unsigned int range_max; 71 unsigned int range_max;
71}; 72};
72 73
74#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
75
73/* 76/*
74 * A table of ranges including some yes ranges and some no ranges. 77 * A table of ranges including some yes ranges and some no ranges.
75 * If a register belongs to a no_range, the corresponding check function 78 * If a register belongs to a no_range, the corresponding check function
@@ -318,6 +321,8 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
318 const struct regmap_config *config); 321 const struct regmap_config *config);
319struct regmap *regmap_init_spi(struct spi_device *dev, 322struct regmap *regmap_init_spi(struct spi_device *dev,
320 const struct regmap_config *config); 323 const struct regmap_config *config);
324struct regmap *regmap_init_spmi(struct spmi_device *dev,
325 const struct regmap_config *config);
321struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, 326struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
322 void __iomem *regs, 327 void __iomem *regs,
323 const struct regmap_config *config); 328 const struct regmap_config *config);
@@ -330,6 +335,8 @@ struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
330 const struct regmap_config *config); 335 const struct regmap_config *config);
331struct regmap *devm_regmap_init_spi(struct spi_device *dev, 336struct regmap *devm_regmap_init_spi(struct spi_device *dev,
332 const struct regmap_config *config); 337 const struct regmap_config *config);
338struct regmap *devm_regmap_init_spmi(struct spmi_device *dev,
339 const struct regmap_config *config);
333struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, 340struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
334 void __iomem *regs, 341 void __iomem *regs,
335 const struct regmap_config *config); 342 const struct regmap_config *config);
@@ -374,10 +381,13 @@ int regmap_reinit_cache(struct regmap *map,
374 const struct regmap_config *config); 381 const struct regmap_config *config);
375struct regmap *dev_get_regmap(struct device *dev, const char *name); 382struct regmap *dev_get_regmap(struct device *dev, const char *name);
376int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); 383int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
384int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
377int regmap_raw_write(struct regmap *map, unsigned int reg, 385int regmap_raw_write(struct regmap *map, unsigned int reg,
378 const void *val, size_t val_len); 386 const void *val, size_t val_len);
379int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 387int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
380 size_t val_count); 388 size_t val_count);
389int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
390 int num_regs);
381int regmap_raw_write_async(struct regmap *map, unsigned int reg, 391int regmap_raw_write_async(struct regmap *map, unsigned int reg,
382 const void *val, size_t val_len); 392 const void *val, size_t val_len);
383int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); 393int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
@@ -387,9 +397,14 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
387 size_t val_count); 397 size_t val_count);
388int regmap_update_bits(struct regmap *map, unsigned int reg, 398int regmap_update_bits(struct regmap *map, unsigned int reg,
389 unsigned int mask, unsigned int val); 399 unsigned int mask, unsigned int val);
400int regmap_update_bits_async(struct regmap *map, unsigned int reg,
401 unsigned int mask, unsigned int val);
390int regmap_update_bits_check(struct regmap *map, unsigned int reg, 402int regmap_update_bits_check(struct regmap *map, unsigned int reg,
391 unsigned int mask, unsigned int val, 403 unsigned int mask, unsigned int val,
392 bool *change); 404 bool *change);
405int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
406 unsigned int mask, unsigned int val,
407 bool *change);
393int regmap_get_val_bytes(struct regmap *map); 408int regmap_get_val_bytes(struct regmap *map);
394int regmap_async_complete(struct regmap *map); 409int regmap_async_complete(struct regmap *map);
395bool regmap_can_raw_write(struct regmap *map); 410bool regmap_can_raw_write(struct regmap *map);
@@ -425,11 +440,15 @@ bool regmap_reg_in_ranges(unsigned int reg,
425 * @reg: Offset of the register within the regmap bank 440 * @reg: Offset of the register within the regmap bank
426 * @lsb: lsb of the register field. 441 * @lsb: lsb of the register field.
427 * @reg: msb of the register field. 442 * @reg: msb of the register field.
443 * @id_size: port size if it has some ports
444 * @id_offset: address offset for each ports
428 */ 445 */
429struct reg_field { 446struct reg_field {
430 unsigned int reg; 447 unsigned int reg;
431 unsigned int lsb; 448 unsigned int lsb;
432 unsigned int msb; 449 unsigned int msb;
450 unsigned int id_size;
451 unsigned int id_offset;
433}; 452};
434 453
435#define REG_FIELD(_reg, _lsb, _msb) { \ 454#define REG_FIELD(_reg, _lsb, _msb) { \
@@ -448,6 +467,15 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
448 467
449int regmap_field_read(struct regmap_field *field, unsigned int *val); 468int regmap_field_read(struct regmap_field *field, unsigned int *val);
450int regmap_field_write(struct regmap_field *field, unsigned int val); 469int regmap_field_write(struct regmap_field *field, unsigned int val);
470int regmap_field_update_bits(struct regmap_field *field,
471 unsigned int mask, unsigned int val);
472
473int regmap_fields_write(struct regmap_field *field, unsigned int id,
474 unsigned int val);
475int regmap_fields_read(struct regmap_field *field, unsigned int id,
476 unsigned int *val);
477int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
478 unsigned int mask, unsigned int val);
451 479
452/** 480/**
453 * Description of an IRQ for the generic regmap irq_chip. 481 * Description of an IRQ for the generic regmap irq_chip.
@@ -527,6 +555,13 @@ static inline int regmap_write(struct regmap *map, unsigned int reg,
527 return -EINVAL; 555 return -EINVAL;
528} 556}
529 557
558static inline int regmap_write_async(struct regmap *map, unsigned int reg,
559 unsigned int val)
560{
561 WARN_ONCE(1, "regmap API is disabled");
562 return -EINVAL;
563}
564
530static inline int regmap_raw_write(struct regmap *map, unsigned int reg, 565static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
531 const void *val, size_t val_len) 566 const void *val, size_t val_len)
532{ 567{
@@ -576,6 +611,14 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
576 return -EINVAL; 611 return -EINVAL;
577} 612}
578 613
614static inline int regmap_update_bits_async(struct regmap *map,
615 unsigned int reg,
616 unsigned int mask, unsigned int val)
617{
618 WARN_ONCE(1, "regmap API is disabled");
619 return -EINVAL;
620}
621
579static inline int regmap_update_bits_check(struct regmap *map, 622static inline int regmap_update_bits_check(struct regmap *map,
580 unsigned int reg, 623 unsigned int reg,
581 unsigned int mask, unsigned int val, 624 unsigned int mask, unsigned int val,
@@ -585,6 +628,16 @@ static inline int regmap_update_bits_check(struct regmap *map,
585 return -EINVAL; 628 return -EINVAL;
586} 629}
587 630
631static inline int regmap_update_bits_check_async(struct regmap *map,
632 unsigned int reg,
633 unsigned int mask,
634 unsigned int val,
635 bool *change)
636{
637 WARN_ONCE(1, "regmap API is disabled");
638 return -EINVAL;
639}
640
588static inline int regmap_get_val_bytes(struct regmap *map) 641static inline int regmap_get_val_bytes(struct regmap *map)
589{ 642{
590 WARN_ONCE(1, "regmap API is disabled"); 643 WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 27be915caa96..e530681bea70 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -146,6 +146,32 @@ struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
146void regulator_put(struct regulator *regulator); 146void regulator_put(struct regulator *regulator);
147void devm_regulator_put(struct regulator *regulator); 147void devm_regulator_put(struct regulator *regulator);
148 148
149int regulator_register_supply_alias(struct device *dev, const char *id,
150 struct device *alias_dev,
151 const char *alias_id);
152void regulator_unregister_supply_alias(struct device *dev, const char *id);
153
154int regulator_bulk_register_supply_alias(struct device *dev, const char **id,
155 struct device *alias_dev,
156 const char **alias_id, int num_id);
157void regulator_bulk_unregister_supply_alias(struct device *dev,
158 const char **id, int num_id);
159
160int devm_regulator_register_supply_alias(struct device *dev, const char *id,
161 struct device *alias_dev,
162 const char *alias_id);
163void devm_regulator_unregister_supply_alias(struct device *dev,
164 const char *id);
165
166int devm_regulator_bulk_register_supply_alias(struct device *dev,
167 const char **id,
168 struct device *alias_dev,
169 const char **alias_id,
170 int num_id);
171void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
172 const char **id,
173 int num_id);
174
149/* regulator output control and status */ 175/* regulator output control and status */
150int __must_check regulator_enable(struct regulator *regulator); 176int __must_check regulator_enable(struct regulator *regulator);
151int regulator_disable(struct regulator *regulator); 177int regulator_disable(struct regulator *regulator);
@@ -250,6 +276,59 @@ static inline void devm_regulator_put(struct regulator *regulator)
250{ 276{
251} 277}
252 278
279static inline int regulator_register_supply_alias(struct device *dev,
280 const char *id,
281 struct device *alias_dev,
282 const char *alias_id)
283{
284 return 0;
285}
286
287static inline void regulator_unregister_supply_alias(struct device *dev,
288 const char *id)
289{
290}
291
292static inline int regulator_bulk_register_supply_alias(struct device *dev,
293 const char **id,
294 struct device *alias_dev,
295 const char **alias_id,
296 int num_id)
297{
298 return 0;
299}
300
301static inline void regulator_bulk_unregister_supply_alias(struct device *dev,
302 const char **id,
303 int num_id)
304{
305}
306
307static inline int devm_regulator_register_supply_alias(struct device *dev,
308 const char *id,
309 struct device *alias_dev,
310 const char *alias_id)
311{
312 return 0;
313}
314
315static inline void devm_regulator_unregister_supply_alias(struct device *dev,
316 const char *id)
317{
318}
319
320static inline int devm_regulator_bulk_register_supply_alias(
321 struct device *dev, const char **id, struct device *alias_dev,
322 const char **alias_id, int num_id)
323{
324 return 0;
325}
326
327static inline void devm_regulator_bulk_unregister_supply_alias(
328 struct device *dev, const char **id, int num_id)
329{
330}
331
253static inline int regulator_enable(struct regulator *regulator) 332static inline int regulator_enable(struct regulator *regulator)
254{ 333{
255 return 0; 334 return 0;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 9bdad43ad228..9370e65348a4 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -46,19 +46,26 @@ enum regulator_status {
46 * regulator_list_linear_range(). 46 * regulator_list_linear_range().
47 * 47 *
48 * @min_uV: Lowest voltage in range 48 * @min_uV: Lowest voltage in range
49 * @max_uV: Highest voltage in range
50 * @min_sel: Lowest selector for range 49 * @min_sel: Lowest selector for range
51 * @max_sel: Highest selector for range 50 * @max_sel: Highest selector for range
52 * @uV_step: Step size 51 * @uV_step: Step size
53 */ 52 */
54struct regulator_linear_range { 53struct regulator_linear_range {
55 unsigned int min_uV; 54 unsigned int min_uV;
56 unsigned int max_uV;
57 unsigned int min_sel; 55 unsigned int min_sel;
58 unsigned int max_sel; 56 unsigned int max_sel;
59 unsigned int uV_step; 57 unsigned int uV_step;
60}; 58};
61 59
60/* Initialize struct regulator_linear_range */
61#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
62{ \
63 .min_uV = _min_uV, \
64 .min_sel = _min_sel, \
65 .max_sel = _max_sel, \
66 .uV_step = _step_uV, \
67}
68
62/** 69/**
63 * struct regulator_ops - regulator operations. 70 * struct regulator_ops - regulator operations.
64 * 71 *
@@ -209,6 +216,7 @@ enum regulator_type {
209 * @min_uV: Voltage given by the lowest selector (if linear mapping) 216 * @min_uV: Voltage given by the lowest selector (if linear mapping)
210 * @uV_step: Voltage increase with each selector (if linear mapping) 217 * @uV_step: Voltage increase with each selector (if linear mapping)
211 * @linear_min_sel: Minimal selector for starting linear mapping 218 * @linear_min_sel: Minimal selector for starting linear mapping
219 * @fixed_uV: Fixed voltage of rails.
212 * @ramp_delay: Time to settle down after voltage change (unit: uV/us) 220 * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
213 * @volt_table: Voltage mapping table (if table based mapping) 221 * @volt_table: Voltage mapping table (if table based mapping)
214 * 222 *
@@ -241,6 +249,7 @@ struct regulator_desc {
241 unsigned int min_uV; 249 unsigned int min_uV;
242 unsigned int uV_step; 250 unsigned int uV_step;
243 unsigned int linear_min_sel; 251 unsigned int linear_min_sel;
252 int fixed_uV;
244 unsigned int ramp_delay; 253 unsigned int ramp_delay;
245 254
246 const struct regulator_linear_range *linear_ranges; 255 const struct regulator_linear_range *linear_ranges;
@@ -336,7 +345,12 @@ struct regulator_dev {
336struct regulator_dev * 345struct regulator_dev *
337regulator_register(const struct regulator_desc *regulator_desc, 346regulator_register(const struct regulator_desc *regulator_desc,
338 const struct regulator_config *config); 347 const struct regulator_config *config);
348struct regulator_dev *
349devm_regulator_register(struct device *dev,
350 const struct regulator_desc *regulator_desc,
351 const struct regulator_config *config);
339void regulator_unregister(struct regulator_dev *rdev); 352void regulator_unregister(struct regulator_dev *rdev);
353void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
340 354
341int regulator_notifier_call_chain(struct regulator_dev *rdev, 355int regulator_notifier_call_chain(struct regulator_dev *rdev,
342 unsigned long event, void *data); 356 unsigned long event, void *data);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 999b20ce06cf..730e638c5589 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -95,6 +95,7 @@ struct regulator_state {
95 * @initial_state: Suspend state to set by default. 95 * @initial_state: Suspend state to set by default.
96 * @initial_mode: Mode to set at startup. 96 * @initial_mode: Mode to set at startup.
97 * @ramp_delay: Time to settle down after voltage change (unit: uV/us) 97 * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
98 * @enable_time: Turn-on time of the rails (unit: microseconds)
98 */ 99 */
99struct regulation_constraints { 100struct regulation_constraints {
100 101
@@ -129,6 +130,7 @@ struct regulation_constraints {
129 unsigned int initial_mode; 130 unsigned int initial_mode;
130 131
131 unsigned int ramp_delay; 132 unsigned int ramp_delay;
133 unsigned int enable_time;
132 134
133 /* constraint flags */ 135 /* constraint flags */
134 unsigned always_on:1; /* regulator never off when system is on */ 136 unsigned always_on:1; /* regulator never off when system is on */
@@ -193,15 +195,10 @@ int regulator_suspend_finish(void);
193 195
194#ifdef CONFIG_REGULATOR 196#ifdef CONFIG_REGULATOR
195void regulator_has_full_constraints(void); 197void regulator_has_full_constraints(void);
196void regulator_use_dummy_regulator(void);
197#else 198#else
198static inline void regulator_has_full_constraints(void) 199static inline void regulator_has_full_constraints(void)
199{ 200{
200} 201}
201
202static inline void regulator_use_dummy_regulator(void)
203{
204}
205#endif 202#endif
206 203
207#endif 204#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index f28544b2f9af..8e3e66ac0a52 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -15,7 +15,7 @@ extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
15extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, 15extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
16 u32 id, long expires, u32 error); 16 u32 id, long expires, u32 error);
17 17
18extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); 18void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
19 19
20/* RTNL is used as a global lock for all changes to network configuration */ 20/* RTNL is used as a global lock for all changes to network configuration */
21extern void rtnl_lock(void); 21extern void rtnl_lock(void);
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
24extern int rtnl_is_locked(void); 24extern int rtnl_is_locked(void);
25#ifdef CONFIG_PROVE_LOCKING 25#ifdef CONFIG_PROVE_LOCKING
26extern int lockdep_rtnl_is_held(void); 26extern int lockdep_rtnl_is_held(void);
27#else
28static inline int lockdep_rtnl_is_held(void)
29{
30 return 1;
31}
27#endif /* #ifdef CONFIG_PROVE_LOCKING */ 32#endif /* #ifdef CONFIG_PROVE_LOCKING */
28 33
29/** 34/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e27baeeda3f4..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -22,6 +22,7 @@ struct sched_param {
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/nodemask.h> 23#include <linux/nodemask.h>
24#include <linux/mm_types.h> 24#include <linux/mm_types.h>
25#include <linux/preempt_mask.h>
25 26
26#include <asm/page.h> 27#include <asm/page.h>
27#include <asm/ptrace.h> 28#include <asm/ptrace.h>
@@ -285,6 +286,14 @@ static inline void lockup_detector_init(void)
285} 286}
286#endif 287#endif
287 288
289#ifdef CONFIG_DETECT_HUNG_TASK
290void reset_hung_task_detector(void);
291#else
292static inline void reset_hung_task_detector(void)
293{
294}
295#endif
296
288/* Attach to any functions which should be ignored in wchan output. */ 297/* Attach to any functions which should be ignored in wchan output. */
289#define __sched __attribute__((__section__(".sched.text"))) 298#define __sched __attribute__((__section__(".sched.text")))
290 299
@@ -322,6 +331,10 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
322extern void set_dumpable(struct mm_struct *mm, int value); 331extern void set_dumpable(struct mm_struct *mm, int value);
323extern int get_dumpable(struct mm_struct *mm); 332extern int get_dumpable(struct mm_struct *mm);
324 333
334#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
335#define SUID_DUMP_USER 1 /* Dump as user of process */
336#define SUID_DUMP_ROOT 2 /* Dump as root */
337
325/* mm flags */ 338/* mm flags */
326/* dumpable bits */ 339/* dumpable bits */
327#define MMF_DUMPABLE 0 /* core dump is permitted */ 340#define MMF_DUMPABLE 0 /* core dump is permitted */
@@ -427,6 +440,12 @@ struct task_cputime {
427 .sum_exec_runtime = 0, \ 440 .sum_exec_runtime = 0, \
428 } 441 }
429 442
443#ifdef CONFIG_PREEMPT_COUNT
444#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
445#else
446#define PREEMPT_DISABLED PREEMPT_ENABLED
447#endif
448
430/* 449/*
431 * Disable preemption until the scheduler is running. 450 * Disable preemption until the scheduler is running.
432 * Reset by start_kernel()->sched_init()->init_idle(). 451 * Reset by start_kernel()->sched_init()->init_idle().
@@ -434,7 +453,7 @@ struct task_cputime {
434 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 453 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
435 * before the scheduler is active -- see should_resched(). 454 * before the scheduler is active -- see should_resched().
436 */ 455 */
437#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) 456#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
438 457
439/** 458/**
440 * struct thread_group_cputimer - thread group interval timer counts 459 * struct thread_group_cputimer - thread group interval timer counts
@@ -768,6 +787,7 @@ enum cpu_idle_type {
768#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 787#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
769#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 788#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
770#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 789#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
790#define SD_NUMA 0x4000 /* cross-node balancing */
771 791
772extern int __weak arch_sd_sibiling_asym_packing(void); 792extern int __weak arch_sd_sibiling_asym_packing(void);
773 793
@@ -809,7 +829,9 @@ struct sched_domain {
809 unsigned int balance_interval; /* initialise to 1. units in ms. */ 829 unsigned int balance_interval; /* initialise to 1. units in ms. */
810 unsigned int nr_balance_failed; /* initialise to 0 */ 830 unsigned int nr_balance_failed; /* initialise to 0 */
811 831
812 u64 last_update; 832 /* idle_balance() stats */
833 u64 max_newidle_lb_cost;
834 unsigned long next_decay_max_lb_cost;
813 835
814#ifdef CONFIG_SCHEDSTATS 836#ifdef CONFIG_SCHEDSTATS
815 /* load_balance() stats */ 837 /* load_balance() stats */
@@ -908,7 +930,8 @@ struct pipe_inode_info;
908struct uts_namespace; 930struct uts_namespace;
909 931
910struct load_weight { 932struct load_weight {
911 unsigned long weight, inv_weight; 933 unsigned long weight;
934 u32 inv_weight;
912}; 935};
913 936
914struct sched_avg { 937struct sched_avg {
@@ -1029,6 +1052,8 @@ struct task_struct {
1029 struct task_struct *last_wakee; 1052 struct task_struct *last_wakee;
1030 unsigned long wakee_flips; 1053 unsigned long wakee_flips;
1031 unsigned long wakee_flip_decay_ts; 1054 unsigned long wakee_flip_decay_ts;
1055
1056 int wake_cpu;
1032#endif 1057#endif
1033 int on_rq; 1058 int on_rq;
1034 1059
@@ -1046,15 +1071,6 @@ struct task_struct {
1046 struct hlist_head preempt_notifiers; 1071 struct hlist_head preempt_notifiers;
1047#endif 1072#endif
1048 1073
1049 /*
1050 * fpu_counter contains the number of consecutive context switches
1051 * that the FPU is used. If this is over a threshold, the lazy fpu
1052 * saving becomes unlazy to save the trap. This is an unsigned char
1053 * so that after 256 times the counter wraps and the behavior turns
1054 * lazy again; this to deal with bursty apps that only use FPU for
1055 * a short time
1056 */
1057 unsigned char fpu_counter;
1058#ifdef CONFIG_BLK_DEV_IO_TRACE 1074#ifdef CONFIG_BLK_DEV_IO_TRACE
1059 unsigned int btrace_seq; 1075 unsigned int btrace_seq;
1060#endif 1076#endif
@@ -1324,10 +1340,41 @@ struct task_struct {
1324#endif 1340#endif
1325#ifdef CONFIG_NUMA_BALANCING 1341#ifdef CONFIG_NUMA_BALANCING
1326 int numa_scan_seq; 1342 int numa_scan_seq;
1327 int numa_migrate_seq;
1328 unsigned int numa_scan_period; 1343 unsigned int numa_scan_period;
1344 unsigned int numa_scan_period_max;
1345 int numa_preferred_nid;
1346 int numa_migrate_deferred;
1347 unsigned long numa_migrate_retry;
1329 u64 node_stamp; /* migration stamp */ 1348 u64 node_stamp; /* migration stamp */
1330 struct callback_head numa_work; 1349 struct callback_head numa_work;
1350
1351 struct list_head numa_entry;
1352 struct numa_group *numa_group;
1353
1354 /*
1355 * Exponential decaying average of faults on a per-node basis.
1356 * Scheduling placement decisions are made based on the these counts.
1357 * The values remain static for the duration of a PTE scan
1358 */
1359 unsigned long *numa_faults;
1360 unsigned long total_numa_faults;
1361
1362 /*
1363 * numa_faults_buffer records faults per node during the current
1364 * scan window. When the scan completes, the counts in numa_faults
1365 * decay and these values are copied.
1366 */
1367 unsigned long *numa_faults_buffer;
1368
1369 /*
1370 * numa_faults_locality tracks if faults recorded during the last
1371 * scan window were remote/local. The task scan period is adapted
1372 * based on the locality of the faults with different weights
1373 * depending on whether they were shared or private faults
1374 */
1375 unsigned long numa_faults_locality[2];
1376
1377 unsigned long numa_pages_migrated;
1331#endif /* CONFIG_NUMA_BALANCING */ 1378#endif /* CONFIG_NUMA_BALANCING */
1332 1379
1333 struct rcu_head rcu; 1380 struct rcu_head rcu;
@@ -1412,16 +1459,33 @@ struct task_struct {
1412/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1459/* Future-safe accessor for struct task_struct's cpus_allowed. */
1413#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1460#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1414 1461
1462#define TNF_MIGRATED 0x01
1463#define TNF_NO_GROUP 0x02
1464#define TNF_SHARED 0x04
1465#define TNF_FAULT_LOCAL 0x08
1466
1415#ifdef CONFIG_NUMA_BALANCING 1467#ifdef CONFIG_NUMA_BALANCING
1416extern void task_numa_fault(int node, int pages, bool migrated); 1468extern void task_numa_fault(int last_node, int node, int pages, int flags);
1469extern pid_t task_numa_group_id(struct task_struct *p);
1417extern void set_numabalancing_state(bool enabled); 1470extern void set_numabalancing_state(bool enabled);
1471extern void task_numa_free(struct task_struct *p);
1472
1473extern unsigned int sysctl_numa_balancing_migrate_deferred;
1418#else 1474#else
1419static inline void task_numa_fault(int node, int pages, bool migrated) 1475static inline void task_numa_fault(int last_node, int node, int pages,
1476 int flags)
1420{ 1477{
1421} 1478}
1479static inline pid_t task_numa_group_id(struct task_struct *p)
1480{
1481 return 0;
1482}
1422static inline void set_numabalancing_state(bool enabled) 1483static inline void set_numabalancing_state(bool enabled)
1423{ 1484{
1424} 1485}
1486static inline void task_numa_free(struct task_struct *p)
1487{
1488}
1425#endif 1489#endif
1426 1490
1427static inline struct pid *task_pid(struct task_struct *task) 1491static inline struct pid *task_pid(struct task_struct *task)
@@ -1974,7 +2038,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
1974#else 2038#else
1975 static inline void kick_process(struct task_struct *tsk) { } 2039 static inline void kick_process(struct task_struct *tsk) { }
1976#endif 2040#endif
1977extern void sched_fork(struct task_struct *p); 2041extern void sched_fork(unsigned long clone_flags, struct task_struct *p);
1978extern void sched_dead(struct task_struct *p); 2042extern void sched_dead(struct task_struct *p);
1979 2043
1980extern void proc_caches_init(void); 2044extern void proc_caches_init(void);
@@ -2401,11 +2465,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
2401 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2465 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2402} 2466}
2403 2467
2404static inline int need_resched(void)
2405{
2406 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2407}
2408
2409/* 2468/*
2410 * cond_resched() and cond_resched_lock(): latency reduction via 2469 * cond_resched() and cond_resched_lock(): latency reduction via
2411 * explicit rescheduling in places that are safe. The return 2470 * explicit rescheduling in places that are safe. The return
@@ -2474,36 +2533,105 @@ static inline int tsk_is_polling(struct task_struct *p)
2474{ 2533{
2475 return task_thread_info(p)->status & TS_POLLING; 2534 return task_thread_info(p)->status & TS_POLLING;
2476} 2535}
2477static inline void current_set_polling(void) 2536static inline void __current_set_polling(void)
2478{ 2537{
2479 current_thread_info()->status |= TS_POLLING; 2538 current_thread_info()->status |= TS_POLLING;
2480} 2539}
2481 2540
2482static inline void current_clr_polling(void) 2541static inline bool __must_check current_set_polling_and_test(void)
2542{
2543 __current_set_polling();
2544
2545 /*
2546 * Polling state must be visible before we test NEED_RESCHED,
2547 * paired by resched_task()
2548 */
2549 smp_mb();
2550
2551 return unlikely(tif_need_resched());
2552}
2553
2554static inline void __current_clr_polling(void)
2483{ 2555{
2484 current_thread_info()->status &= ~TS_POLLING; 2556 current_thread_info()->status &= ~TS_POLLING;
2485 smp_mb__after_clear_bit(); 2557}
2558
2559static inline bool __must_check current_clr_polling_and_test(void)
2560{
2561 __current_clr_polling();
2562
2563 /*
2564 * Polling state must be visible before we test NEED_RESCHED,
2565 * paired by resched_task()
2566 */
2567 smp_mb();
2568
2569 return unlikely(tif_need_resched());
2486} 2570}
2487#elif defined(TIF_POLLING_NRFLAG) 2571#elif defined(TIF_POLLING_NRFLAG)
2488static inline int tsk_is_polling(struct task_struct *p) 2572static inline int tsk_is_polling(struct task_struct *p)
2489{ 2573{
2490 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); 2574 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2491} 2575}
2492static inline void current_set_polling(void) 2576
2577static inline void __current_set_polling(void)
2493{ 2578{
2494 set_thread_flag(TIF_POLLING_NRFLAG); 2579 set_thread_flag(TIF_POLLING_NRFLAG);
2495} 2580}
2496 2581
2497static inline void current_clr_polling(void) 2582static inline bool __must_check current_set_polling_and_test(void)
2583{
2584 __current_set_polling();
2585
2586 /*
2587 * Polling state must be visible before we test NEED_RESCHED,
2588 * paired by resched_task()
2589 *
2590 * XXX: assumes set/clear bit are identical barrier wise.
2591 */
2592 smp_mb__after_clear_bit();
2593
2594 return unlikely(tif_need_resched());
2595}
2596
2597static inline void __current_clr_polling(void)
2498{ 2598{
2499 clear_thread_flag(TIF_POLLING_NRFLAG); 2599 clear_thread_flag(TIF_POLLING_NRFLAG);
2500} 2600}
2601
2602static inline bool __must_check current_clr_polling_and_test(void)
2603{
2604 __current_clr_polling();
2605
2606 /*
2607 * Polling state must be visible before we test NEED_RESCHED,
2608 * paired by resched_task()
2609 */
2610 smp_mb__after_clear_bit();
2611
2612 return unlikely(tif_need_resched());
2613}
2614
2501#else 2615#else
2502static inline int tsk_is_polling(struct task_struct *p) { return 0; } 2616static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2503static inline void current_set_polling(void) { } 2617static inline void __current_set_polling(void) { }
2504static inline void current_clr_polling(void) { } 2618static inline void __current_clr_polling(void) { }
2619
2620static inline bool __must_check current_set_polling_and_test(void)
2621{
2622 return unlikely(tif_need_resched());
2623}
2624static inline bool __must_check current_clr_polling_and_test(void)
2625{
2626 return unlikely(tif_need_resched());
2627}
2505#endif 2628#endif
2506 2629
2630static __always_inline bool need_resched(void)
2631{
2632 return unlikely(tif_need_resched());
2633}
2634
2507/* 2635/*
2508 * Thread group CPU time accounting. 2636 * Thread group CPU time accounting.
2509 */ 2637 */
@@ -2545,6 +2673,11 @@ static inline unsigned int task_cpu(const struct task_struct *p)
2545 return task_thread_info(p)->cpu; 2673 return task_thread_info(p)->cpu;
2546} 2674}
2547 2675
2676static inline int task_node(const struct task_struct *p)
2677{
2678 return cpu_to_node(task_cpu(p));
2679}
2680
2548extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2681extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2549 2682
2550#else 2683#else
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index bf8086b2506e..41467f8ff8ec 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -2,8 +2,8 @@
2#define _SCHED_SYSCTL_H 2#define _SCHED_SYSCTL_H
3 3
4#ifdef CONFIG_DETECT_HUNG_TASK 4#ifdef CONFIG_DETECT_HUNG_TASK
5extern int sysctl_hung_task_check_count;
5extern unsigned int sysctl_hung_task_panic; 6extern unsigned int sysctl_hung_task_panic;
6extern unsigned long sysctl_hung_task_check_count;
7extern unsigned long sysctl_hung_task_timeout_secs; 7extern unsigned long sysctl_hung_task_timeout_secs;
8extern unsigned long sysctl_hung_task_warnings; 8extern unsigned long sysctl_hung_task_warnings;
9extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 9extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
@@ -47,7 +47,6 @@ extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
47extern unsigned int sysctl_numa_balancing_scan_delay; 47extern unsigned int sysctl_numa_balancing_scan_delay;
48extern unsigned int sysctl_numa_balancing_scan_period_min; 48extern unsigned int sysctl_numa_balancing_scan_period_min;
49extern unsigned int sysctl_numa_balancing_scan_period_max; 49extern unsigned int sysctl_numa_balancing_scan_period_max;
50extern unsigned int sysctl_numa_balancing_scan_period_reset;
51extern unsigned int sysctl_numa_balancing_scan_size; 50extern unsigned int sysctl_numa_balancing_scan_size;
52extern unsigned int sysctl_numa_balancing_settle_count; 51extern unsigned int sysctl_numa_balancing_settle_count;
53 52
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index fa7922c80a41..cddf0c2940b6 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -15,7 +15,7 @@ static inline void sched_clock_postinit(void) { }
15#endif 15#endif
16 16
17extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); 17extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
18 18extern void sched_clock_register(u64 (*read)(void), int bits,
19extern unsigned long long (*sched_clock_func)(void); 19 unsigned long rate);
20 20
21#endif 21#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 9d37e2b9d3ec..5623a7f965b7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1052 * @xfrm_policy_delete_security: 1052 * @xfrm_policy_delete_security:
1053 * @ctx contains the xfrm_sec_ctx. 1053 * @ctx contains the xfrm_sec_ctx.
1054 * Authorize deletion of xp->security. 1054 * Authorize deletion of xp->security.
1055 * @xfrm_state_alloc_security: 1055 * @xfrm_state_alloc:
1056 * @x contains the xfrm_state being added to the Security Association 1056 * @x contains the xfrm_state being added to the Security Association
1057 * Database by the XFRM system. 1057 * Database by the XFRM system.
1058 * @sec_ctx contains the security context information being provided by 1058 * @sec_ctx contains the security context information being provided by
1059 * the user-level SA generation program (e.g., setkey or racoon). 1059 * the user-level SA generation program (e.g., setkey or racoon).
1060 * @secid contains the secid from which to take the mls portion of the context.
1061 * Allocate a security structure to the x->security field; the security 1060 * Allocate a security structure to the x->security field; the security
1062 * field is initialized to NULL when the xfrm_state is allocated. Set the 1061 * field is initialized to NULL when the xfrm_state is allocated. Set the
1063 * context to correspond to either sec_ctx or polsec, with the mls portion 1062 * context to correspond to sec_ctx. Return 0 if operation was successful
1064 * taken from secid in the latter case. 1063 * (memory to allocate, legal context).
1065 * Return 0 if operation was successful (memory to allocate, legal context). 1064 * @xfrm_state_alloc_acquire:
1065 * @x contains the xfrm_state being added to the Security Association
1066 * Database by the XFRM system.
1067 * @polsec contains the policy's security context.
1068 * @secid contains the secid from which to take the mls portion of the
1069 * context.
1070 * Allocate a security structure to the x->security field; the security
1071 * field is initialized to NULL when the xfrm_state is allocated. Set the
1072 * context to correspond to secid. Return 0 if operation was successful
1073 * (memory to allocate, legal context).
1066 * @xfrm_state_free_security: 1074 * @xfrm_state_free_security:
1067 * @x contains the xfrm_state. 1075 * @x contains the xfrm_state.
1068 * Deallocate x->security. 1076 * Deallocate x->security.
@@ -1679,9 +1687,11 @@ struct security_operations {
1679 int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); 1687 int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
1680 void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); 1688 void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
1681 int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); 1689 int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
1682 int (*xfrm_state_alloc_security) (struct xfrm_state *x, 1690 int (*xfrm_state_alloc) (struct xfrm_state *x,
1683 struct xfrm_user_sec_ctx *sec_ctx, 1691 struct xfrm_user_sec_ctx *sec_ctx);
1684 u32 secid); 1692 int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
1693 struct xfrm_sec_ctx *polsec,
1694 u32 secid);
1685 void (*xfrm_state_free_security) (struct xfrm_state *x); 1695 void (*xfrm_state_free_security) (struct xfrm_state *x);
1686 int (*xfrm_state_delete_security) (struct xfrm_state *x); 1696 int (*xfrm_state_delete_security) (struct xfrm_state *x);
1687 int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); 1697 int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 4e32edc8f506..52e0097f61f0 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,6 +20,7 @@ struct seq_file {
20 size_t size; 20 size_t size;
21 size_t from; 21 size_t from;
22 size_t count; 22 size_t count;
23 size_t pad_until;
23 loff_t index; 24 loff_t index;
24 loff_t read_pos; 25 loff_t read_pos;
25 u64 version; 26 u64 version;
@@ -79,6 +80,20 @@ static inline void seq_commit(struct seq_file *m, int num)
79 } 80 }
80} 81}
81 82
83/**
84 * seq_setwidth - set padding width
85 * @m: the seq_file handle
86 * @size: the max number of bytes to pad.
87 *
88 * Call seq_setwidth() for setting max width, then call seq_printf() etc. and
89 * finally call seq_pad() to pad the remaining bytes.
90 */
91static inline void seq_setwidth(struct seq_file *m, size_t size)
92{
93 m->pad_until = m->count + size;
94}
95void seq_pad(struct seq_file *m, char c);
96
82char *mangle_path(char *s, const char *p, const char *esc); 97char *mangle_path(char *s, const char *p, const char *esc);
83int seq_open(struct file *, const struct seq_operations *); 98int seq_open(struct file *, const struct seq_operations *);
84ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 99ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 21a209336e79..535f158977b9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -34,6 +34,7 @@
34 34
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/preempt.h> 36#include <linux/preempt.h>
37#include <linux/lockdep.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
38 39
39/* 40/*
@@ -44,10 +45,50 @@
44 */ 45 */
45typedef struct seqcount { 46typedef struct seqcount {
46 unsigned sequence; 47 unsigned sequence;
48#ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
50#endif
47} seqcount_t; 51} seqcount_t;
48 52
49#define SEQCNT_ZERO { 0 } 53static inline void __seqcount_init(seqcount_t *s, const char *name,
50#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) 54 struct lock_class_key *key)
55{
56 /*
57 * Make sure we are not reinitializing a held lock:
58 */
59 lockdep_init_map(&s->dep_map, name, key, 0);
60 s->sequence = 0;
61}
62
63#ifdef CONFIG_DEBUG_LOCK_ALLOC
64# define SEQCOUNT_DEP_MAP_INIT(lockname) \
65 .dep_map = { .name = #lockname } \
66
67# define seqcount_init(s) \
68 do { \
69 static struct lock_class_key __key; \
70 __seqcount_init((s), #s, &__key); \
71 } while (0)
72
73static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
74{
75 seqcount_t *l = (seqcount_t *)s;
76 unsigned long flags;
77
78 local_irq_save(flags);
79 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80 seqcount_release(&l->dep_map, 1, _RET_IP_);
81 local_irq_restore(flags);
82}
83
84#else
85# define SEQCOUNT_DEP_MAP_INIT(lockname)
86# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87# define seqcount_lockdep_reader_access(x)
88#endif
89
90#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
91
51 92
52/** 93/**
53 * __read_seqcount_begin - begin a seq-read critical section (without barrier) 94 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
@@ -76,6 +117,22 @@ repeat:
76} 117}
77 118
78/** 119/**
120 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
123 *
124 * raw_read_seqcount_begin opens a read critical section of the given
125 * seqcount, but without any lockdep checking. Validity of the critical
126 * section is tested by checking read_seqcount_retry function.
127 */
128static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
129{
130 unsigned ret = __read_seqcount_begin(s);
131 smp_rmb();
132 return ret;
133}
134
135/**
79 * read_seqcount_begin - begin a seq-read critical section 136 * read_seqcount_begin - begin a seq-read critical section
80 * @s: pointer to seqcount_t 137 * @s: pointer to seqcount_t
81 * Returns: count to be passed to read_seqcount_retry 138 * Returns: count to be passed to read_seqcount_retry
@@ -86,9 +143,8 @@ repeat:
86 */ 143 */
87static inline unsigned read_seqcount_begin(const seqcount_t *s) 144static inline unsigned read_seqcount_begin(const seqcount_t *s)
88{ 145{
89 unsigned ret = __read_seqcount_begin(s); 146 seqcount_lockdep_reader_access(s);
90 smp_rmb(); 147 return raw_read_seqcount_begin(s);
91 return ret;
92} 148}
93 149
94/** 150/**
@@ -108,6 +164,8 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
108static inline unsigned raw_seqcount_begin(const seqcount_t *s) 164static inline unsigned raw_seqcount_begin(const seqcount_t *s)
109{ 165{
110 unsigned ret = ACCESS_ONCE(s->sequence); 166 unsigned ret = ACCESS_ONCE(s->sequence);
167
168 seqcount_lockdep_reader_access(s);
111 smp_rmb(); 169 smp_rmb();
112 return ret & ~1; 170 return ret & ~1;
113} 171}
@@ -148,20 +206,38 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
148} 206}
149 207
150 208
209
210static inline void raw_write_seqcount_begin(seqcount_t *s)
211{
212 s->sequence++;
213 smp_wmb();
214}
215
216static inline void raw_write_seqcount_end(seqcount_t *s)
217{
218 smp_wmb();
219 s->sequence++;
220}
221
151/* 222/*
152 * Sequence counter only version assumes that callers are using their 223 * Sequence counter only version assumes that callers are using their
153 * own mutexing. 224 * own mutexing.
154 */ 225 */
226static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
227{
228 raw_write_seqcount_begin(s);
229 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
230}
231
155static inline void write_seqcount_begin(seqcount_t *s) 232static inline void write_seqcount_begin(seqcount_t *s)
156{ 233{
157 s->sequence++; 234 write_seqcount_begin_nested(s, 0);
158 smp_wmb();
159} 235}
160 236
161static inline void write_seqcount_end(seqcount_t *s) 237static inline void write_seqcount_end(seqcount_t *s)
162{ 238{
163 smp_wmb(); 239 seqcount_release(&s->dep_map, 1, _RET_IP_);
164 s->sequence++; 240 raw_write_seqcount_end(s);
165} 241}
166 242
167/** 243/**
@@ -188,7 +264,7 @@ typedef struct {
188 */ 264 */
189#define __SEQLOCK_UNLOCKED(lockname) \ 265#define __SEQLOCK_UNLOCKED(lockname) \
190 { \ 266 { \
191 .seqcount = SEQCNT_ZERO, \ 267 .seqcount = SEQCNT_ZERO(lockname), \
192 .lock = __SPIN_LOCK_UNLOCKED(lockname) \ 268 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
193 } 269 }
194 270
@@ -289,6 +365,35 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
289 spin_unlock(&sl->lock); 365 spin_unlock(&sl->lock);
290} 366}
291 367
368/**
369 * read_seqbegin_or_lock - begin a sequence number check or locking block
370 * @lock: sequence lock
371 * @seq : sequence number to be checked
372 *
373 * First try it once optimistically without taking the lock. If that fails,
374 * take the lock. The sequence number is also used as a marker for deciding
375 * whether to be a reader (even) or writer (odd).
376 * N.B. seq must be initialized to an even number to begin with.
377 */
378static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
379{
380 if (!(*seq & 1)) /* Even */
381 *seq = read_seqbegin(lock);
382 else /* Odd */
383 read_seqlock_excl(lock);
384}
385
386static inline int need_seqretry(seqlock_t *lock, int seq)
387{
388 return !(seq & 1) && read_seqretry(lock, seq);
389}
390
391static inline void done_seqretry(seqlock_t *lock, int seq)
392{
393 if (seq & 1)
394 read_sequnlock_excl(lock);
395}
396
292static inline void read_seqlock_excl_bh(seqlock_t *sl) 397static inline void read_seqlock_excl_bh(seqlock_t *sl)
293{ 398{
294 spin_lock_bh(&sl->lock); 399 spin_lock_bh(&sl->lock);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index b98291ac7f14..f729be981da0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -66,7 +66,6 @@ struct uart_ops {
66 void (*set_ldisc)(struct uart_port *, int new); 66 void (*set_ldisc)(struct uart_port *, int new);
67 void (*pm)(struct uart_port *, unsigned int state, 67 void (*pm)(struct uart_port *, unsigned int state,
68 unsigned int oldstate); 68 unsigned int oldstate);
69 int (*set_wake)(struct uart_port *, unsigned int state);
70 69
71 /* 70 /*
72 * Return a string describing the type of the port 71 * Return a string describing the type of the port
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index d34049712a4d..50fe651da965 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -5,7 +5,7 @@
5#include <linux/sh_dma.h> 5#include <linux/sh_dma.h>
6 6
7/* 7/*
8 * Generic header for SuperH (H)SCI(F) (used by sh/sh64/h8300 and related parts) 8 * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts)
9 */ 9 */
10 10
11#define SCIx_NOT_SUPPORTED (-1) 11#define SCIx_NOT_SUPPORTED (-1)
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index fe817918b30e..d9b436f09925 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -59,6 +59,9 @@
59#ifndef _LINUX_SFI_H 59#ifndef _LINUX_SFI_H
60#define _LINUX_SFI_H 60#define _LINUX_SFI_H
61 61
62#include <linux/init.h>
63#include <linux/types.h>
64
62/* Table signatures reserved by the SFI specification */ 65/* Table signatures reserved by the SFI specification */
63#define SFI_SIG_SYST "SYST" 66#define SFI_SIG_SYST "SYST"
64#define SFI_SIG_FREQ "FREQ" 67#define SFI_SIG_FREQ "FREQ"
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 30aa0dc60d75..9d55438bc4ad 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -47,6 +47,8 @@ extern int shmem_init(void);
47extern int shmem_fill_super(struct super_block *sb, void *data, int silent); 47extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
48extern struct file *shmem_file_setup(const char *name, 48extern struct file *shmem_file_setup(const char *name,
49 loff_t size, unsigned long flags); 49 loff_t size, unsigned long flags);
50extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
51 unsigned long flags);
50extern int shmem_zero_setup(struct vm_area_struct *); 52extern int shmem_zero_setup(struct vm_area_struct *);
51extern int shmem_lock(struct file *file, int lock, struct user_struct *user); 53extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
52extern void shmem_unlock_mapping(struct address_space *mapping); 54extern void shmem_unlock_mapping(struct address_space *mapping);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c2d89335f637..6f69b3f914fb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -318,9 +318,13 @@ enum {
318 318
319 SKB_GSO_GRE = 1 << 6, 319 SKB_GSO_GRE = 1 << 6,
320 320
321 SKB_GSO_UDP_TUNNEL = 1 << 7, 321 SKB_GSO_IPIP = 1 << 7,
322 322
323 SKB_GSO_MPLS = 1 << 8, 323 SKB_GSO_SIT = 1 << 8,
324
325 SKB_GSO_UDP_TUNNEL = 1 << 9,
326
327 SKB_GSO_MPLS = 1 << 10,
324}; 328};
325 329
326#if BITS_PER_LONG > 32 330#if BITS_PER_LONG > 32
@@ -333,11 +337,6 @@ typedef unsigned int sk_buff_data_t;
333typedef unsigned char *sk_buff_data_t; 337typedef unsigned char *sk_buff_data_t;
334#endif 338#endif
335 339
336#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
337 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
338#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
339#endif
340
341/** 340/**
342 * struct sk_buff - socket buffer 341 * struct sk_buff - socket buffer
343 * @next: Next buffer in list 342 * @next: Next buffer in list
@@ -370,7 +369,6 @@ typedef unsigned char *sk_buff_data_t;
370 * @protocol: Packet protocol from driver 369 * @protocol: Packet protocol from driver
371 * @destructor: Destruct function 370 * @destructor: Destruct function
372 * @nfct: Associated connection, if any 371 * @nfct: Associated connection, if any
373 * @nfct_reasm: netfilter conntrack re-assembly pointer
374 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 372 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
375 * @skb_iif: ifindex of device we arrived on 373 * @skb_iif: ifindex of device we arrived on
376 * @tc_index: Traffic control index 374 * @tc_index: Traffic control index
@@ -459,9 +457,6 @@ struct sk_buff {
459#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 457#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
460 struct nf_conntrack *nfct; 458 struct nf_conntrack *nfct;
461#endif 459#endif
462#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
463 struct sk_buff *nfct_reasm;
464#endif
465#ifdef CONFIG_BRIDGE_NETFILTER 460#ifdef CONFIG_BRIDGE_NETFILTER
466 struct nf_bridge_info *nf_bridge; 461 struct nf_bridge_info *nf_bridge;
467#endif 462#endif
@@ -585,8 +580,8 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
585 skb->_skb_refdst = (unsigned long)dst; 580 skb->_skb_refdst = (unsigned long)dst;
586} 581}
587 582
588extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, 583void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
589 bool force); 584 bool force);
590 585
591/** 586/**
592 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference 587 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
@@ -634,20 +629,20 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
634 return (struct rtable *)skb_dst(skb); 629 return (struct rtable *)skb_dst(skb);
635} 630}
636 631
637extern void kfree_skb(struct sk_buff *skb); 632void kfree_skb(struct sk_buff *skb);
638extern void kfree_skb_list(struct sk_buff *segs); 633void kfree_skb_list(struct sk_buff *segs);
639extern void skb_tx_error(struct sk_buff *skb); 634void skb_tx_error(struct sk_buff *skb);
640extern void consume_skb(struct sk_buff *skb); 635void consume_skb(struct sk_buff *skb);
641extern void __kfree_skb(struct sk_buff *skb); 636void __kfree_skb(struct sk_buff *skb);
642extern struct kmem_cache *skbuff_head_cache; 637extern struct kmem_cache *skbuff_head_cache;
643 638
644extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 639void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
645extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 640bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
646 bool *fragstolen, int *delta_truesize); 641 bool *fragstolen, int *delta_truesize);
647 642
648extern struct sk_buff *__alloc_skb(unsigned int size, 643struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
649 gfp_t priority, int flags, int node); 644 int node);
650extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 645struct sk_buff *build_skb(void *data, unsigned int frag_size);
651static inline struct sk_buff *alloc_skb(unsigned int size, 646static inline struct sk_buff *alloc_skb(unsigned int size,
652 gfp_t priority) 647 gfp_t priority)
653{ 648{
@@ -660,41 +655,33 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
660 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); 655 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
661} 656}
662 657
663extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node); 658struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
664static inline struct sk_buff *alloc_skb_head(gfp_t priority) 659static inline struct sk_buff *alloc_skb_head(gfp_t priority)
665{ 660{
666 return __alloc_skb_head(priority, -1); 661 return __alloc_skb_head(priority, -1);
667} 662}
668 663
669extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 664struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
670extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 665int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
671extern struct sk_buff *skb_clone(struct sk_buff *skb, 666struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
672 gfp_t priority); 667struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
673extern struct sk_buff *skb_copy(const struct sk_buff *skb, 668struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
674 gfp_t priority); 669
675extern struct sk_buff *__pskb_copy(struct sk_buff *skb, 670int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
676 int headroom, gfp_t gfp_mask); 671struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
677 672 unsigned int headroom);
678extern int pskb_expand_head(struct sk_buff *skb, 673struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
679 int nhead, int ntail, 674 int newtailroom, gfp_t priority);
680 gfp_t gfp_mask); 675int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
681extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 676 int len);
682 unsigned int headroom); 677int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
683extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 678int skb_pad(struct sk_buff *skb, int pad);
684 int newheadroom, int newtailroom,
685 gfp_t priority);
686extern int skb_to_sgvec(struct sk_buff *skb,
687 struct scatterlist *sg, int offset,
688 int len);
689extern int skb_cow_data(struct sk_buff *skb, int tailbits,
690 struct sk_buff **trailer);
691extern int skb_pad(struct sk_buff *skb, int pad);
692#define dev_kfree_skb(a) consume_skb(a) 679#define dev_kfree_skb(a) consume_skb(a)
693 680
694extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 681int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
695 int getfrag(void *from, char *to, int offset, 682 int getfrag(void *from, char *to, int offset,
696 int len,int odd, struct sk_buff *skb), 683 int len, int odd, struct sk_buff *skb),
697 void *from, int length); 684 void *from, int length);
698 685
699struct skb_seq_state { 686struct skb_seq_state {
700 __u32 lower_offset; 687 __u32 lower_offset;
@@ -706,18 +693,17 @@ struct skb_seq_state {
706 __u8 *frag_data; 693 __u8 *frag_data;
707}; 694};
708 695
709extern void skb_prepare_seq_read(struct sk_buff *skb, 696void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
710 unsigned int from, unsigned int to, 697 unsigned int to, struct skb_seq_state *st);
711 struct skb_seq_state *st); 698unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
712extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 699 struct skb_seq_state *st);
713 struct skb_seq_state *st); 700void skb_abort_seq_read(struct skb_seq_state *st);
714extern void skb_abort_seq_read(struct skb_seq_state *st);
715 701
716extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 702unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
717 unsigned int to, struct ts_config *config, 703 unsigned int to, struct ts_config *config,
718 struct ts_state *state); 704 struct ts_state *state);
719 705
720extern void __skb_get_rxhash(struct sk_buff *skb); 706void __skb_get_rxhash(struct sk_buff *skb);
721static inline __u32 skb_get_rxhash(struct sk_buff *skb) 707static inline __u32 skb_get_rxhash(struct sk_buff *skb)
722{ 708{
723 if (!skb->l4_rxhash) 709 if (!skb->l4_rxhash)
@@ -1095,7 +1081,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1095 * The "__skb_xxxx()" functions are the non-atomic ones that 1081 * The "__skb_xxxx()" functions are the non-atomic ones that
1096 * can only be called with interrupts disabled. 1082 * can only be called with interrupts disabled.
1097 */ 1083 */
1098extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 1084void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1085 struct sk_buff_head *list);
1099static inline void __skb_insert(struct sk_buff *newsk, 1086static inline void __skb_insert(struct sk_buff *newsk,
1100 struct sk_buff *prev, struct sk_buff *next, 1087 struct sk_buff *prev, struct sk_buff *next,
1101 struct sk_buff_head *list) 1088 struct sk_buff_head *list)
@@ -1201,8 +1188,8 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
1201 __skb_insert(newsk, prev, prev->next, list); 1188 __skb_insert(newsk, prev, prev->next, list);
1202} 1189}
1203 1190
1204extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1191void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1205 struct sk_buff_head *list); 1192 struct sk_buff_head *list);
1206 1193
1207static inline void __skb_queue_before(struct sk_buff_head *list, 1194static inline void __skb_queue_before(struct sk_buff_head *list,
1208 struct sk_buff *next, 1195 struct sk_buff *next,
@@ -1221,7 +1208,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
1221 * 1208 *
1222 * A buffer cannot be placed on two lists at the same time. 1209 * A buffer cannot be placed on two lists at the same time.
1223 */ 1210 */
1224extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1211void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1225static inline void __skb_queue_head(struct sk_buff_head *list, 1212static inline void __skb_queue_head(struct sk_buff_head *list,
1226 struct sk_buff *newsk) 1213 struct sk_buff *newsk)
1227{ 1214{
@@ -1238,7 +1225,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
1238 * 1225 *
1239 * A buffer cannot be placed on two lists at the same time. 1226 * A buffer cannot be placed on two lists at the same time.
1240 */ 1227 */
1241extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1228void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1242static inline void __skb_queue_tail(struct sk_buff_head *list, 1229static inline void __skb_queue_tail(struct sk_buff_head *list,
1243 struct sk_buff *newsk) 1230 struct sk_buff *newsk)
1244{ 1231{
@@ -1249,7 +1236,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
1249 * remove sk_buff from list. _Must_ be called atomically, and with 1236 * remove sk_buff from list. _Must_ be called atomically, and with
1250 * the list known.. 1237 * the list known..
1251 */ 1238 */
1252extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1239void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1253static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1240static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1254{ 1241{
1255 struct sk_buff *next, *prev; 1242 struct sk_buff *next, *prev;
@@ -1270,7 +1257,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1270 * so must be used with appropriate locks held only. The head item is 1257 * so must be used with appropriate locks held only. The head item is
1271 * returned or %NULL if the list is empty. 1258 * returned or %NULL if the list is empty.
1272 */ 1259 */
1273extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1260struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1274static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1261static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1275{ 1262{
1276 struct sk_buff *skb = skb_peek(list); 1263 struct sk_buff *skb = skb_peek(list);
@@ -1287,7 +1274,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1287 * so must be used with appropriate locks held only. The tail item is 1274 * so must be used with appropriate locks held only. The tail item is
1288 * returned or %NULL if the list is empty. 1275 * returned or %NULL if the list is empty.
1289 */ 1276 */
1290extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1277struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1291static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1278static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1292{ 1279{
1293 struct sk_buff *skb = skb_peek_tail(list); 1280 struct sk_buff *skb = skb_peek_tail(list);
@@ -1361,7 +1348,7 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1361 * @size: the length of the data 1348 * @size: the length of the data
1362 * 1349 *
1363 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 1350 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1364 * @skb to point to &size bytes at offset @off within @page. In 1351 * @skb to point to @size bytes at offset @off within @page. In
1365 * addition updates @skb such that @i is the last fragment. 1352 * addition updates @skb such that @i is the last fragment.
1366 * 1353 *
1367 * Does not take any additional reference on the fragment. 1354 * Does not take any additional reference on the fragment.
@@ -1373,8 +1360,11 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1373 skb_shinfo(skb)->nr_frags = i + 1; 1360 skb_shinfo(skb)->nr_frags = i + 1;
1374} 1361}
1375 1362
1376extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1363void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1377 int off, int size, unsigned int truesize); 1364 int size, unsigned int truesize);
1365
1366void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1367 unsigned int truesize);
1378 1368
1379#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1369#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1380#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1370#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
@@ -1418,7 +1408,8 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1418/* 1408/*
1419 * Add data to an sk_buff 1409 * Add data to an sk_buff
1420 */ 1410 */
1421extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1411unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1412unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1422static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1413static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1423{ 1414{
1424 unsigned char *tmp = skb_tail_pointer(skb); 1415 unsigned char *tmp = skb_tail_pointer(skb);
@@ -1428,7 +1419,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1428 return tmp; 1419 return tmp;
1429} 1420}
1430 1421
1431extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1422unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1432static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1423static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1433{ 1424{
1434 skb->data -= len; 1425 skb->data -= len;
@@ -1436,7 +1427,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1436 return skb->data; 1427 return skb->data;
1437} 1428}
1438 1429
1439extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1430unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1440static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1431static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1441{ 1432{
1442 skb->len -= len; 1433 skb->len -= len;
@@ -1449,7 +1440,7 @@ static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int l
1449 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1440 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1450} 1441}
1451 1442
1452extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1443unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1453 1444
1454static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1445static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1455{ 1446{
@@ -1647,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1647 skb->mac_header += offset; 1638 skb->mac_header += offset;
1648} 1639}
1649 1640
1641static inline void skb_pop_mac_header(struct sk_buff *skb)
1642{
1643 skb->mac_header = skb->network_header;
1644}
1645
1650static inline void skb_probe_transport_header(struct sk_buff *skb, 1646static inline void skb_probe_transport_header(struct sk_buff *skb,
1651 const int offset_hint) 1647 const int offset_hint)
1652{ 1648{
@@ -1753,7 +1749,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1753#define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1749#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1754#endif 1750#endif
1755 1751
1756extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1752int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1757 1753
1758static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1754static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1759{ 1755{
@@ -1765,7 +1761,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1765 skb_set_tail_pointer(skb, len); 1761 skb_set_tail_pointer(skb, len);
1766} 1762}
1767 1763
1768extern void skb_trim(struct sk_buff *skb, unsigned int len); 1764void skb_trim(struct sk_buff *skb, unsigned int len);
1769 1765
1770static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1766static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1771{ 1767{
@@ -1838,7 +1834,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1838 * the list and one reference dropped. This function does not take the 1834 * the list and one reference dropped. This function does not take the
1839 * list lock and the caller must hold the relevant locks to use it. 1835 * list lock and the caller must hold the relevant locks to use it.
1840 */ 1836 */
1841extern void skb_queue_purge(struct sk_buff_head *list); 1837void skb_queue_purge(struct sk_buff_head *list);
1842static inline void __skb_queue_purge(struct sk_buff_head *list) 1838static inline void __skb_queue_purge(struct sk_buff_head *list)
1843{ 1839{
1844 struct sk_buff *skb; 1840 struct sk_buff *skb;
@@ -1850,11 +1846,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1850#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) 1846#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
1851#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE 1847#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
1852 1848
1853extern void *netdev_alloc_frag(unsigned int fragsz); 1849void *netdev_alloc_frag(unsigned int fragsz);
1854 1850
1855extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1851struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
1856 unsigned int length, 1852 gfp_t gfp_mask);
1857 gfp_t gfp_mask);
1858 1853
1859/** 1854/**
1860 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1855 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -2071,6 +2066,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2071 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 2066 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2072} 2067}
2073 2068
2069bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2070
2074/** 2071/**
2075 * skb_frag_dma_map - maps a paged fragment via the DMA API 2072 * skb_frag_dma_map - maps a paged fragment via the DMA API
2076 * @dev: the device to map the fragment to 2073 * @dev: the device to map the fragment to
@@ -2342,60 +2339,49 @@ static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2342#define skb_walk_frags(skb, iter) \ 2339#define skb_walk_frags(skb, iter) \
2343 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2340 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2344 2341
2345extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2342struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2346 int *peeked, int *off, int *err); 2343 int *peeked, int *off, int *err);
2347extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 2344struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2348 int noblock, int *err); 2345 int *err);
2349extern unsigned int datagram_poll(struct file *file, struct socket *sock, 2346unsigned int datagram_poll(struct file *file, struct socket *sock,
2350 struct poll_table_struct *wait); 2347 struct poll_table_struct *wait);
2351extern int skb_copy_datagram_iovec(const struct sk_buff *from, 2348int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
2352 int offset, struct iovec *to, 2349 struct iovec *to, int size);
2353 int size); 2350int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
2354extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 2351 struct iovec *iov);
2355 int hlen, 2352int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
2356 struct iovec *iov); 2353 const struct iovec *from, int from_offset,
2357extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 2354 int len);
2358 int offset, 2355int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
2359 const struct iovec *from, 2356 int offset, size_t count);
2360 int from_offset, 2357int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
2361 int len); 2358 const struct iovec *to, int to_offset,
2362extern int zerocopy_sg_from_iovec(struct sk_buff *skb, 2359 int size);
2363 const struct iovec *frm, 2360void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2364 int offset, 2361void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2365 size_t count); 2362int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2366extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 2363int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2367 int offset, 2364int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2368 const struct iovec *to, 2365__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2369 int to_offset, 2366 int len, __wsum csum);
2370 int size); 2367int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
2371extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2368 struct pipe_inode_info *pipe, unsigned int len,
2372extern void skb_free_datagram_locked(struct sock *sk, 2369 unsigned int flags);
2373 struct sk_buff *skb); 2370void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2374extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 2371void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2375 unsigned int flags); 2372int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2376extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 2373void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2377 int len, __wsum csum); 2374struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2378extern int skb_copy_bits(const struct sk_buff *skb, int offset, 2375
2379 void *to, int len); 2376struct skb_checksum_ops {
2380extern int skb_store_bits(struct sk_buff *skb, int offset, 2377 __wsum (*update)(const void *mem, int len, __wsum wsum);
2381 const void *from, int len); 2378 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
2382extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 2379};
2383 int offset, u8 *to, int len, 2380
2384 __wsum csum); 2381__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2385extern int skb_splice_bits(struct sk_buff *skb, 2382 __wsum csum, const struct skb_checksum_ops *ops);
2386 unsigned int offset, 2383__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2387 struct pipe_inode_info *pipe, 2384 __wsum csum);
2388 unsigned int len,
2389 unsigned int flags);
2390extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2391extern void skb_split(struct sk_buff *skb,
2392 struct sk_buff *skb1, const u32 len);
2393extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2394 int shiftlen);
2395extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2396
2397extern struct sk_buff *skb_segment(struct sk_buff *skb,
2398 netdev_features_t features);
2399 2385
2400static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2386static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2401 int len, void *buffer) 2387 int len, void *buffer)
@@ -2440,7 +2426,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2440 memcpy(skb->data + offset, from, len); 2426 memcpy(skb->data + offset, from, len);
2441} 2427}
2442 2428
2443extern void skb_init(void); 2429void skb_init(void);
2444 2430
2445static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 2431static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2446{ 2432{
@@ -2483,12 +2469,12 @@ static inline ktime_t net_invalid_timestamp(void)
2483 return ktime_set(0, 0); 2469 return ktime_set(0, 0);
2484} 2470}
2485 2471
2486extern void skb_timestamping_init(void); 2472void skb_timestamping_init(void);
2487 2473
2488#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2474#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2489 2475
2490extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2476void skb_clone_tx_timestamp(struct sk_buff *skb);
2491extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2477bool skb_defer_rx_timestamp(struct sk_buff *skb);
2492 2478
2493#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2479#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2494 2480
@@ -2529,8 +2515,8 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
2529 * generates a software time stamp (otherwise), then queues the clone 2515 * generates a software time stamp (otherwise), then queues the clone
2530 * to the error queue of the socket. Errors are silently ignored. 2516 * to the error queue of the socket. Errors are silently ignored.
2531 */ 2517 */
2532extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2518void skb_tstamp_tx(struct sk_buff *orig_skb,
2533 struct skb_shared_hwtstamps *hwtstamps); 2519 struct skb_shared_hwtstamps *hwtstamps);
2534 2520
2535static inline void sw_tx_timestamp(struct sk_buff *skb) 2521static inline void sw_tx_timestamp(struct sk_buff *skb)
2536{ 2522{
@@ -2545,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
2545 * Ethernet MAC Drivers should call this function in their hard_xmit() 2531 * Ethernet MAC Drivers should call this function in their hard_xmit()
2546 * function immediately before giving the sk_buff to the MAC hardware. 2532 * function immediately before giving the sk_buff to the MAC hardware.
2547 * 2533 *
2534 * Specifically, one should make absolutely sure that this function is
2535 * called before TX completion of this packet can trigger. Otherwise
2536 * the packet could potentially already be freed.
2537 *
2548 * @skb: A socket buffer. 2538 * @skb: A socket buffer.
2549 */ 2539 */
2550static inline void skb_tx_timestamp(struct sk_buff *skb) 2540static inline void skb_tx_timestamp(struct sk_buff *skb)
@@ -2562,8 +2552,8 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
2562 */ 2552 */
2563void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 2553void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2564 2554
2565extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2555__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2566extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2556__sum16 __skb_checksum_complete(struct sk_buff *skb);
2567 2557
2568static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2558static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2569{ 2559{
@@ -2593,7 +2583,7 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2593} 2583}
2594 2584
2595#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2585#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2596extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2586void nf_conntrack_destroy(struct nf_conntrack *nfct);
2597static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2587static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2598{ 2588{
2599 if (nfct && atomic_dec_and_test(&nfct->use)) 2589 if (nfct && atomic_dec_and_test(&nfct->use))
@@ -2605,18 +2595,6 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2605 atomic_inc(&nfct->use); 2595 atomic_inc(&nfct->use);
2606} 2596}
2607#endif 2597#endif
2608#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2609static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2610{
2611 if (skb)
2612 atomic_inc(&skb->users);
2613}
2614static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2615{
2616 if (skb)
2617 kfree_skb(skb);
2618}
2619#endif
2620#ifdef CONFIG_BRIDGE_NETFILTER 2598#ifdef CONFIG_BRIDGE_NETFILTER
2621static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2599static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2622{ 2600{
@@ -2635,10 +2613,6 @@ static inline void nf_reset(struct sk_buff *skb)
2635 nf_conntrack_put(skb->nfct); 2613 nf_conntrack_put(skb->nfct);
2636 skb->nfct = NULL; 2614 skb->nfct = NULL;
2637#endif 2615#endif
2638#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2639 nf_conntrack_put_reasm(skb->nfct_reasm);
2640 skb->nfct_reasm = NULL;
2641#endif
2642#ifdef CONFIG_BRIDGE_NETFILTER 2616#ifdef CONFIG_BRIDGE_NETFILTER
2643 nf_bridge_put(skb->nf_bridge); 2617 nf_bridge_put(skb->nf_bridge);
2644 skb->nf_bridge = NULL; 2618 skb->nf_bridge = NULL;
@@ -2660,10 +2634,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2660 nf_conntrack_get(src->nfct); 2634 nf_conntrack_get(src->nfct);
2661 dst->nfctinfo = src->nfctinfo; 2635 dst->nfctinfo = src->nfctinfo;
2662#endif 2636#endif
2663#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2664 dst->nfct_reasm = src->nfct_reasm;
2665 nf_conntrack_get_reasm(src->nfct_reasm);
2666#endif
2667#ifdef CONFIG_BRIDGE_NETFILTER 2637#ifdef CONFIG_BRIDGE_NETFILTER
2668 dst->nf_bridge = src->nf_bridge; 2638 dst->nf_bridge = src->nf_bridge;
2669 nf_bridge_get(src->nf_bridge); 2639 nf_bridge_get(src->nf_bridge);
@@ -2675,9 +2645,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2675#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2645#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2676 nf_conntrack_put(dst->nfct); 2646 nf_conntrack_put(dst->nfct);
2677#endif 2647#endif
2678#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2679 nf_conntrack_put_reasm(dst->nfct_reasm);
2680#endif
2681#ifdef CONFIG_BRIDGE_NETFILTER 2648#ifdef CONFIG_BRIDGE_NETFILTER
2682 nf_bridge_put(dst->nf_bridge); 2649 nf_bridge_put(dst->nf_bridge);
2683#endif 2650#endif
@@ -2732,28 +2699,27 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2732 return skb->queue_mapping != 0; 2699 return skb->queue_mapping != 0;
2733} 2700}
2734 2701
2735extern u16 __skb_tx_hash(const struct net_device *dev, 2702u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2736 const struct sk_buff *skb, 2703 unsigned int num_tx_queues);
2737 unsigned int num_tx_queues);
2738 2704
2739#ifdef CONFIG_XFRM
2740static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2705static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2741{ 2706{
2707#ifdef CONFIG_XFRM
2742 return skb->sp; 2708 return skb->sp;
2743}
2744#else 2709#else
2745static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2746{
2747 return NULL; 2710 return NULL;
2748}
2749#endif 2711#endif
2712}
2750 2713
2751/* Keeps track of mac header offset relative to skb->head. 2714/* Keeps track of mac header offset relative to skb->head.
2752 * It is useful for TSO of Tunneling protocol. e.g. GRE. 2715 * It is useful for TSO of Tunneling protocol. e.g. GRE.
2753 * For non-tunnel skb it points to skb_mac_header() and for 2716 * For non-tunnel skb it points to skb_mac_header() and for
2754 * tunnel skb it points to outer mac header. */ 2717 * tunnel skb it points to outer mac header.
2718 * Keeps track of level of encapsulation of network headers.
2719 */
2755struct skb_gso_cb { 2720struct skb_gso_cb {
2756 int mac_offset; 2721 int mac_offset;
2722 int encap_level;
2757}; 2723};
2758#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) 2724#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2759 2725
@@ -2783,12 +2749,13 @@ static inline bool skb_is_gso(const struct sk_buff *skb)
2783 return skb_shinfo(skb)->gso_size; 2749 return skb_shinfo(skb)->gso_size;
2784} 2750}
2785 2751
2752/* Note: Should be called only if skb_is_gso(skb) is true */
2786static inline bool skb_is_gso_v6(const struct sk_buff *skb) 2753static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2787{ 2754{
2788 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2755 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2789} 2756}
2790 2757
2791extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2758void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2792 2759
2793static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2760static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2794{ 2761{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 74f105847d13..1e2f4fe12773 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -53,7 +53,14 @@
53 * } 53 * }
54 * rcu_read_unlock(); 54 * rcu_read_unlock();
55 * 55 *
56 * See also the comment on struct slab_rcu in mm/slab.c. 56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
57 */ 64 */
58#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 65#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
59#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 66#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
@@ -381,10 +388,55 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
381/** 388/**
382 * kmalloc - allocate memory 389 * kmalloc - allocate memory
383 * @size: how many bytes of memory are required. 390 * @size: how many bytes of memory are required.
384 * @flags: the type of memory to allocate (see kcalloc). 391 * @flags: the type of memory to allocate.
385 * 392 *
386 * kmalloc is the normal method of allocating memory 393 * kmalloc is the normal method of allocating memory
387 * for objects smaller than page size in the kernel. 394 * for objects smaller than page size in the kernel.
395 *
396 * The @flags argument may be one of:
397 *
398 * %GFP_USER - Allocate memory on behalf of user. May sleep.
399 *
400 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
401 *
402 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
403 * For example, use this inside interrupt handlers.
404 *
405 * %GFP_HIGHUSER - Allocate pages from high memory.
406 *
407 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
408 *
409 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
410 *
411 * %GFP_NOWAIT - Allocation will not sleep.
412 *
413 * %GFP_THISNODE - Allocate node-local memory only.
414 *
415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a
417 * slab created with SLAB_DMA.
418 *
419 * Also it is possible to set different flags by OR'ing
420 * in one or more of the following additional @flags:
421 *
422 * %__GFP_COLD - Request cache-cold pages instead of
423 * trying to return cache-warm pages.
424 *
425 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
426 *
427 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
428 * (think twice before using).
429 *
430 * %__GFP_NORETRY - If memory is not immediately available,
431 * then give up at once.
432 *
433 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
434 *
435 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
436 *
437 * There are other flags available as well, but these are not intended
438 * for general use, and so are not documented here. For a full list of
439 * potential flags, always refer to linux/gfp.h.
388 */ 440 */
389static __always_inline void *kmalloc(size_t size, gfp_t flags) 441static __always_inline void *kmalloc(size_t size, gfp_t flags)
390{ 442{
@@ -495,61 +547,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
495void print_slabinfo_header(struct seq_file *m); 547void print_slabinfo_header(struct seq_file *m);
496 548
497/** 549/**
498 * kmalloc - allocate memory
499 * @size: how many bytes of memory are required.
500 * @flags: the type of memory to allocate.
501 *
502 * The @flags argument may be one of:
503 *
504 * %GFP_USER - Allocate memory on behalf of user. May sleep.
505 *
506 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
507 *
508 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
509 * For example, use this inside interrupt handlers.
510 *
511 * %GFP_HIGHUSER - Allocate pages from high memory.
512 *
513 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
514 *
515 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
516 *
517 * %GFP_NOWAIT - Allocation will not sleep.
518 *
519 * %GFP_THISNODE - Allocate node-local memory only.
520 *
521 * %GFP_DMA - Allocation suitable for DMA.
522 * Should only be used for kmalloc() caches. Otherwise, use a
523 * slab created with SLAB_DMA.
524 *
525 * Also it is possible to set different flags by OR'ing
526 * in one or more of the following additional @flags:
527 *
528 * %__GFP_COLD - Request cache-cold pages instead of
529 * trying to return cache-warm pages.
530 *
531 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
532 *
533 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
534 * (think twice before using).
535 *
536 * %__GFP_NORETRY - If memory is not immediately available,
537 * then give up at once.
538 *
539 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
540 *
541 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
542 *
543 * There are other flags available as well, but these are not intended
544 * for general use, and so are not documented here. For a full list of
545 * potential flags, always refer to linux/gfp.h.
546 *
547 * kmalloc is the normal method of allocating memory
548 * in the kernel.
549 */
550static __always_inline void *kmalloc(size_t size, gfp_t flags);
551
552/**
553 * kmalloc_array - allocate memory for an array. 550 * kmalloc_array - allocate memory for an array.
554 * @n: number of elements. 551 * @n: number of elements.
555 * @size: element size. 552 * @size: element size.
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e9346b4f1ef4..09bfffb08a56 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -27,8 +27,8 @@ struct kmem_cache {
27 27
28 size_t colour; /* cache colouring range */ 28 size_t colour; /* cache colouring range */
29 unsigned int colour_off; /* colour offset */ 29 unsigned int colour_off; /* colour offset */
30 struct kmem_cache *slabp_cache; 30 struct kmem_cache *freelist_cache;
31 unsigned int slab_size; 31 unsigned int freelist_size;
32 32
33 /* constructor func */ 33 /* constructor func */
34 void (*ctor)(void *obj); 34 void (*ctor)(void *obj);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cc0b67eada42..f56bfa9e4526 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,7 +11,7 @@
11enum stat_item { 11enum stat_item {
12 ALLOC_FASTPATH, /* Allocation from cpu slab */ 12 ALLOC_FASTPATH, /* Allocation from cpu slab */
13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
14 FREE_FASTPATH, /* Free to cpu slub */ 14 FREE_FASTPATH, /* Free to cpu slab */
15 FREE_SLOWPATH, /* Freeing not to cpu slab */ 15 FREE_SLOWPATH, /* Freeing not to cpu slab */
16 FREE_FROZEN, /* Freeing to frozen slab */ 16 FREE_FROZEN, /* Freeing to frozen slab */
17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 731f5237d5f4..5da22ee42e16 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -49,6 +49,9 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
49 smp_call_func_t func, void *info, bool wait, 49 smp_call_func_t func, void *info, bool wait,
50 gfp_t gfp_flags); 50 gfp_t gfp_flags);
51 51
52void __smp_call_function_single(int cpuid, struct call_single_data *data,
53 int wait);
54
52#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
53 56
54#include <linux/preempt.h> 57#include <linux/preempt.h>
@@ -95,9 +98,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait);
95void smp_call_function_many(const struct cpumask *mask, 98void smp_call_function_many(const struct cpumask *mask,
96 smp_call_func_t func, void *info, bool wait); 99 smp_call_func_t func, void *info, bool wait);
97 100
98void __smp_call_function_single(int cpuid, struct call_single_data *data,
99 int wait);
100
101int smp_call_function_any(const struct cpumask *mask, 101int smp_call_function_any(const struct cpumask *mask,
102 smp_call_func_t func, void *info, int wait); 102 smp_call_func_t func, void *info, int wait);
103 103
@@ -106,14 +106,10 @@ void kick_all_cpus_sync(void);
106/* 106/*
107 * Generic and arch helpers 107 * Generic and arch helpers
108 */ 108 */
109#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
110void __init call_function_init(void); 109void __init call_function_init(void);
111void generic_smp_call_function_single_interrupt(void); 110void generic_smp_call_function_single_interrupt(void);
112#define generic_smp_call_function_interrupt \ 111#define generic_smp_call_function_interrupt \
113 generic_smp_call_function_single_interrupt 112 generic_smp_call_function_single_interrupt
114#else
115static inline void call_function_init(void) { }
116#endif
117 113
118/* 114/*
119 * Mark the boot cpu "online" so that it can call console drivers in 115 * Mark the boot cpu "online" so that it can call console drivers in
@@ -155,12 +151,6 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
155 151
156static inline void kick_all_cpus_sync(void) { } 152static inline void kick_all_cpus_sync(void) { }
157 153
158static inline void __smp_call_function_single(int cpuid,
159 struct call_single_data *data, int wait)
160{
161 on_each_cpu(data->func, data->info, wait);
162}
163
164#endif /* !SMP */ 154#endif /* !SMP */
165 155
166/* 156/*
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index 900f0e328235..a25bd6f65e7f 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -26,6 +26,8 @@ struct rspi_plat_data {
26 unsigned int dma_rx_id; 26 unsigned int dma_rx_id;
27 27
28 unsigned dma_width_16bit:1; /* DMAC read/write width = 16-bit */ 28 unsigned dma_width_16bit:1; /* DMAC read/write width = 16-bit */
29
30 u16 num_chipselect;
29}; 31};
30 32
31#endif 33#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 887116dbce2c..8c62ba74dd91 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,7 @@
23#include <linux/mod_devicetable.h> 23#include <linux/mod_devicetable.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/kthread.h> 25#include <linux/kthread.h>
26#include <linux/completion.h>
26 27
27/* 28/*
28 * INTERFACES between SPI master-side drivers and SPI infrastructure. 29 * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -150,8 +151,7 @@ static inline void *spi_get_drvdata(struct spi_device *spi)
150} 151}
151 152
152struct spi_message; 153struct spi_message;
153 154struct spi_transfer;
154
155 155
156/** 156/**
157 * struct spi_driver - Host side "protocol" driver 157 * struct spi_driver - Host side "protocol" driver
@@ -257,6 +257,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
257 * @queue_lock: spinlock to syncronise access to message queue 257 * @queue_lock: spinlock to syncronise access to message queue
258 * @queue: message queue 258 * @queue: message queue
259 * @cur_msg: the currently in-flight message 259 * @cur_msg: the currently in-flight message
260 * @cur_msg_prepared: spi_prepare_message was called for the currently
261 * in-flight message
262 * @xfer_completion: used by core tranfer_one_message()
260 * @busy: message pump is busy 263 * @busy: message pump is busy
261 * @running: message pump is running 264 * @running: message pump is running
262 * @rt: whether this queue is set to run as a realtime task 265 * @rt: whether this queue is set to run as a realtime task
@@ -274,6 +277,16 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
274 * @unprepare_transfer_hardware: there are currently no more messages on the 277 * @unprepare_transfer_hardware: there are currently no more messages on the
275 * queue so the subsystem notifies the driver that it may relax the 278 * queue so the subsystem notifies the driver that it may relax the
276 * hardware by issuing this call 279 * hardware by issuing this call
280 * @set_cs: assert or deassert chip select, true to assert. May be called
281 * from interrupt context.
282 * @prepare_message: set up the controller to transfer a single message,
283 * for example doing DMA mapping. Called from threaded
284 * context.
285 * @transfer_one: transfer a single spi_transfer. When the
286 * driver is finished with this transfer it must call
287 * spi_finalize_current_transfer() so the subsystem can issue
288 * the next transfer
289 * @unprepare_message: undo any work done by prepare_message().
277 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 290 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
278 * number. Any individual value may be -ENOENT for CS lines that 291 * number. Any individual value may be -ENOENT for CS lines that
279 * are not GPIOs (driven by the SPI controller itself). 292 * are not GPIOs (driven by the SPI controller itself).
@@ -388,11 +401,25 @@ struct spi_master {
388 bool running; 401 bool running;
389 bool rt; 402 bool rt;
390 bool auto_runtime_pm; 403 bool auto_runtime_pm;
404 bool cur_msg_prepared;
405 struct completion xfer_completion;
391 406
392 int (*prepare_transfer_hardware)(struct spi_master *master); 407 int (*prepare_transfer_hardware)(struct spi_master *master);
393 int (*transfer_one_message)(struct spi_master *master, 408 int (*transfer_one_message)(struct spi_master *master,
394 struct spi_message *mesg); 409 struct spi_message *mesg);
395 int (*unprepare_transfer_hardware)(struct spi_master *master); 410 int (*unprepare_transfer_hardware)(struct spi_master *master);
411 int (*prepare_message)(struct spi_master *master,
412 struct spi_message *message);
413 int (*unprepare_message)(struct spi_master *master,
414 struct spi_message *message);
415
416 /*
417 * These hooks are for drivers that use a generic implementation
418 * of transfer_one_message() provied by the core.
419 */
420 void (*set_cs)(struct spi_device *spi, bool enable);
421 int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
422 struct spi_transfer *transfer);
396 423
397 /* gpio chip select */ 424 /* gpio chip select */
398 int *cs_gpios; 425 int *cs_gpios;
@@ -428,12 +455,15 @@ extern int spi_master_resume(struct spi_master *master);
428/* Calls the driver make to interact with the message queue */ 455/* Calls the driver make to interact with the message queue */
429extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); 456extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
430extern void spi_finalize_current_message(struct spi_master *master); 457extern void spi_finalize_current_message(struct spi_master *master);
458extern void spi_finalize_current_transfer(struct spi_master *master);
431 459
432/* the spi driver core manages memory for the spi_master classdev */ 460/* the spi driver core manages memory for the spi_master classdev */
433extern struct spi_master * 461extern struct spi_master *
434spi_alloc_master(struct device *host, unsigned size); 462spi_alloc_master(struct device *host, unsigned size);
435 463
436extern int spi_register_master(struct spi_master *master); 464extern int spi_register_master(struct spi_master *master);
465extern int devm_spi_register_master(struct device *dev,
466 struct spi_master *master);
437extern void spi_unregister_master(struct spi_master *master); 467extern void spi_unregister_master(struct spi_master *master);
438 468
439extern struct spi_master *spi_busnum_to_master(u16 busnum); 469extern struct spi_master *spi_busnum_to_master(u16 busnum);
@@ -823,6 +853,33 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
823 return (status < 0) ? status : result; 853 return (status < 0) ? status : result;
824} 854}
825 855
856/**
857 * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read
858 * @spi: device with which data will be exchanged
859 * @cmd: command to be written before data is read back
860 * Context: can sleep
861 *
862 * This returns the (unsigned) sixteen bit number returned by the device in cpu
863 * endianness, or else a negative error code. Callable only from contexts that
864 * can sleep.
865 *
866 * This function is similar to spi_w8r16, with the exception that it will
867 * convert the read 16 bit data word from big-endian to native endianness.
868 *
869 */
870static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
871
872{
873 ssize_t status;
874 __be16 result;
875
876 status = spi_write_then_read(spi, &cmd, 1, &result, 2);
877 if (status < 0)
878 return status;
879
880 return be16_to_cpu(result);
881}
882
826/*---------------------------------------------------------------------------*/ 883/*---------------------------------------------------------------------------*/
827 884
828/* 885/*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c114614ed172..9b058eecd403 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -237,4 +237,18 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
237 __srcu_read_unlock(sp, idx); 237 __srcu_read_unlock(sp, idx);
238} 238}
239 239
240/**
241 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
242 *
243 * Converts the preceding srcu_read_unlock into a two-way memory barrier.
244 *
245 * Call this after srcu_read_unlock, to guarantee that all memory operations
246 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after
247 * the preceding srcu_read_unlock.
248 */
249static inline void smp_mb__after_srcu_read_unlock(void)
250{
251 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
252}
253
240#endif 254#endif
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 86a12b0cb239..0688472500bb 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -108,6 +108,16 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
108 return 0; 108 return 0;
109} 109}
110 110
111/* Get the device phy address */
112static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
113{
114 struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
115 if (!dev)
116 return -ENODEV;
117
118 return dev->dev->bus->sprom.et0phyaddr;
119}
120
111extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, 121extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
112 struct pci_dev *pdev); 122 struct pci_dev *pdev);
113extern int ssb_gige_map_irq(struct ssb_device *sdev, 123extern int ssb_gige_map_irq(struct ssb_device *sdev,
@@ -174,6 +184,10 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
174{ 184{
175 return -ENODEV; 185 return -ENODEV;
176} 186}
187static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
188{
189 return -ENODEV;
190}
177 191
178#endif /* CONFIG_SSB_DRIVER_GIGE */ 192#endif /* CONFIG_SSB_DRIVER_GIGE */
179#endif /* LINUX_SSB_DRIVER_GIGE_H_ */ 193#endif /* LINUX_SSB_DRIVER_GIGE_H_ */
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 3b5e910d14ca..d2abbdb8c6aa 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -28,6 +28,7 @@ struct cpu_stop_work {
28}; 28};
29 29
30int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); 30int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
31int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
31void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, 32void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
32 struct cpu_stop_work *work_buf); 33 struct cpu_stop_work *work_buf);
33int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); 34int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6740801aa71a..8af2804bab16 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -49,6 +49,7 @@ struct rpc_clnt {
49 49
50 unsigned int cl_softrtry : 1,/* soft timeouts */ 50 unsigned int cl_softrtry : 1,/* soft timeouts */
51 cl_discrtry : 1,/* disconnect before retry */ 51 cl_discrtry : 1,/* disconnect before retry */
52 cl_noretranstimeo: 1,/* No retransmit timeouts */
52 cl_autobind : 1,/* use getport() */ 53 cl_autobind : 1,/* use getport() */
53 cl_chatty : 1;/* be verbose */ 54 cl_chatty : 1;/* be verbose */
54 55
@@ -126,6 +127,7 @@ struct rpc_create_args {
126#define RPC_CLNT_CREATE_QUIET (1UL << 6) 127#define RPC_CLNT_CREATE_QUIET (1UL << 6)
127#define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7) 128#define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7)
128#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8) 129#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8)
130#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
129 131
130struct rpc_clnt *rpc_create(struct rpc_create_args *args); 132struct rpc_clnt *rpc_create(struct rpc_create_args *args);
131struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 133struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -134,6 +136,10 @@ void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
134struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 136struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
135struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *, 137struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *,
136 rpc_authflavor_t); 138 rpc_authflavor_t);
139int rpc_switch_client_transport(struct rpc_clnt *,
140 struct xprt_create *,
141 const struct rpc_timeout *);
142
137void rpc_shutdown_client(struct rpc_clnt *); 143void rpc_shutdown_client(struct rpc_clnt *);
138void rpc_release_client(struct rpc_clnt *); 144void rpc_release_client(struct rpc_clnt *);
139void rpc_task_release_client(struct rpc_task *); 145void rpc_task_release_client(struct rpc_task *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 096ee58be11a..3a847de83fab 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -122,6 +122,7 @@ struct rpc_task_setup {
122#define RPC_TASK_SENT 0x0800 /* message was sent */ 122#define RPC_TASK_SENT 0x0800 /* message was sent */
123#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ 123#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
124#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ 124#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
125#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */
125 126
126#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 127#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
127#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) 128#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index cec7b9b5e1bf..8097b9df6773 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -288,7 +288,7 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
288int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); 288int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
289void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); 289void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
290void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); 290void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
291int xprt_prepare_transmit(struct rpc_task *task); 291bool xprt_prepare_transmit(struct rpc_task *task);
292void xprt_transmit(struct rpc_task *task); 292void xprt_transmit(struct rpc_task *task);
293void xprt_end_transmit(struct rpc_task *task); 293void xprt_end_transmit(struct rpc_task *task);
294int xprt_adjust_timeout(struct rpc_rqst *req); 294int xprt_adjust_timeout(struct rpc_rqst *req);
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 8d4fa82bfb91..c0f75261a728 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -139,7 +139,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
139 139
140extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 140extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
141 unsigned long address); 141 unsigned long address);
142extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); 142extern void migration_entry_wait_huge(struct vm_area_struct *vma,
143 struct mm_struct *mm, pte_t *pte);
143#else 144#else
144 145
145#define make_migration_entry(page, write) swp_entry(0, 0) 146#define make_migration_entry(page, write) swp_entry(0, 0)
@@ -151,8 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
151static inline void make_migration_entry_read(swp_entry_t *entryp) { } 152static inline void make_migration_entry_read(swp_entry_t *entryp) { }
152static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 153static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
153 unsigned long address) { } 154 unsigned long address) { }
154static inline void migration_entry_wait_huge(struct mm_struct *mm, 155static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
155 pte_t *pte) { } 156 struct mm_struct *mm, pte_t *pte) { }
156static inline int is_write_migration_entry(swp_entry_t entry) 157static inline int is_write_migration_entry(swp_entry_t entry)
157{ 158{
158 return 0; 159 return 0;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 7fac04e7ff6e..94273bbe6050 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
120 .class = &event_class_syscall_enter, \ 120 .class = &event_class_syscall_enter, \
121 .event.funcs = &enter_syscall_print_funcs, \ 121 .event.funcs = &enter_syscall_print_funcs, \
122 .data = (void *)&__syscall_meta_##sname,\ 122 .data = (void *)&__syscall_meta_##sname,\
123 .flags = TRACE_EVENT_FL_CAP_ANY, \ 123 .flags = TRACE_EVENT_FL_CAP_ANY, \
124 }; \ 124 }; \
125 static struct ftrace_event_call __used \ 125 static struct ftrace_event_call __used \
126 __attribute__((section("_ftrace_events"))) \ 126 __attribute__((section("_ftrace_events"))) \
@@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
134 .class = &event_class_syscall_exit, \ 134 .class = &event_class_syscall_exit, \
135 .event.funcs = &exit_syscall_print_funcs, \ 135 .event.funcs = &exit_syscall_print_funcs, \
136 .data = (void *)&__syscall_meta_##sname,\ 136 .data = (void *)&__syscall_meta_##sname,\
137 .flags = TRACE_EVENT_FL_CAP_ANY, \ 137 .flags = TRACE_EVENT_FL_CAP_ANY, \
138 }; \ 138 }; \
139 static struct ftrace_event_call __used \ 139 static struct ftrace_event_call __used \
140 __attribute__((section("_ftrace_events"))) \ 140 __attribute__((section("_ftrace_events"))) \
@@ -184,7 +184,8 @@ extern struct trace_event_functions exit_syscall_print_funcs;
184 184
185#define __PROTECT(...) asmlinkage_protect(__VA_ARGS__) 185#define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
186#define __SYSCALL_DEFINEx(x, name, ...) \ 186#define __SYSCALL_DEFINEx(x, name, ...) \
187 asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ 187 asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
188 __attribute__((alias(__stringify(SyS##name)))); \
188 static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ 189 static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
189 asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ 190 asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
190 asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ 191 asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
@@ -194,7 +195,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
194 __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ 195 __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
195 return ret; \ 196 return ret; \
196 } \ 197 } \
197 SYSCALL_ALIAS(sys##name, SyS##name); \
198 static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 198 static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
199 199
200asmlinkage long sys_time(time_t __user *tloc); 200asmlinkage long sys_time(time_t __user *tloc);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 11baec7c9b26..6695040a0317 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -173,7 +173,6 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
173struct sysfs_ops { 173struct sysfs_ops {
174 ssize_t (*show)(struct kobject *, struct attribute *, char *); 174 ssize_t (*show)(struct kobject *, struct attribute *, char *);
175 ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); 175 ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
176 const void *(*namespace)(struct kobject *, const struct attribute *);
177}; 176};
178 177
179struct sysfs_dirent; 178struct sysfs_dirent;
@@ -183,19 +182,23 @@ struct sysfs_dirent;
183int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), 182int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
184 void *data, struct module *owner); 183 void *data, struct module *owner);
185 184
186int __must_check sysfs_create_dir(struct kobject *kobj); 185int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
187void sysfs_remove_dir(struct kobject *kobj); 186void sysfs_remove_dir(struct kobject *kobj);
188int __must_check sysfs_rename_dir(struct kobject *kobj, const char *new_name); 187int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
189int __must_check sysfs_move_dir(struct kobject *kobj, 188 const void *new_ns);
190 struct kobject *new_parent_kobj); 189int __must_check sysfs_move_dir_ns(struct kobject *kobj,
191 190 struct kobject *new_parent_kobj,
192int __must_check sysfs_create_file(struct kobject *kobj, 191 const void *new_ns);
193 const struct attribute *attr); 192
193int __must_check sysfs_create_file_ns(struct kobject *kobj,
194 const struct attribute *attr,
195 const void *ns);
194int __must_check sysfs_create_files(struct kobject *kobj, 196int __must_check sysfs_create_files(struct kobject *kobj,
195 const struct attribute **attr); 197 const struct attribute **attr);
196int __must_check sysfs_chmod_file(struct kobject *kobj, 198int __must_check sysfs_chmod_file(struct kobject *kobj,
197 const struct attribute *attr, umode_t mode); 199 const struct attribute *attr, umode_t mode);
198void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); 200void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
201 const void *ns);
199void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); 202void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
200 203
201int __must_check sysfs_create_bin_file(struct kobject *kobj, 204int __must_check sysfs_create_bin_file(struct kobject *kobj,
@@ -210,8 +213,9 @@ int __must_check sysfs_create_link_nowarn(struct kobject *kobj,
210 const char *name); 213 const char *name);
211void sysfs_remove_link(struct kobject *kobj, const char *name); 214void sysfs_remove_link(struct kobject *kobj, const char *name);
212 215
213int sysfs_rename_link(struct kobject *kobj, struct kobject *target, 216int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target,
214 const char *old_name, const char *new_name); 217 const char *old_name, const char *new_name,
218 const void *new_ns);
215 219
216void sysfs_delete_link(struct kobject *dir, struct kobject *targ, 220void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
217 const char *name); 221 const char *name);
@@ -241,9 +245,9 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
241 245
242void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); 246void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
243void sysfs_notify_dirent(struct sysfs_dirent *sd); 247void sysfs_notify_dirent(struct sysfs_dirent *sd);
244struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, 248struct sysfs_dirent *sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd,
245 const void *ns, 249 const unsigned char *name,
246 const unsigned char *name); 250 const void *ns);
247struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); 251struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
248void sysfs_put(struct sysfs_dirent *sd); 252void sysfs_put(struct sysfs_dirent *sd);
249 253
@@ -257,7 +261,7 @@ static inline int sysfs_schedule_callback(struct kobject *kobj,
257 return -ENOSYS; 261 return -ENOSYS;
258} 262}
259 263
260static inline int sysfs_create_dir(struct kobject *kobj) 264static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
261{ 265{
262 return 0; 266 return 0;
263} 267}
@@ -266,19 +270,22 @@ static inline void sysfs_remove_dir(struct kobject *kobj)
266{ 270{
267} 271}
268 272
269static inline int sysfs_rename_dir(struct kobject *kobj, const char *new_name) 273static inline int sysfs_rename_dir_ns(struct kobject *kobj,
274 const char *new_name, const void *new_ns)
270{ 275{
271 return 0; 276 return 0;
272} 277}
273 278
274static inline int sysfs_move_dir(struct kobject *kobj, 279static inline int sysfs_move_dir_ns(struct kobject *kobj,
275 struct kobject *new_parent_kobj) 280 struct kobject *new_parent_kobj,
281 const void *new_ns)
276{ 282{
277 return 0; 283 return 0;
278} 284}
279 285
280static inline int sysfs_create_file(struct kobject *kobj, 286static inline int sysfs_create_file_ns(struct kobject *kobj,
281 const struct attribute *attr) 287 const struct attribute *attr,
288 const void *ns)
282{ 289{
283 return 0; 290 return 0;
284} 291}
@@ -295,8 +302,9 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
295 return 0; 302 return 0;
296} 303}
297 304
298static inline void sysfs_remove_file(struct kobject *kobj, 305static inline void sysfs_remove_file_ns(struct kobject *kobj,
299 const struct attribute *attr) 306 const struct attribute *attr,
307 const void *ns)
300{ 308{
301} 309}
302 310
@@ -333,8 +341,9 @@ static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
333{ 341{
334} 342}
335 343
336static inline int sysfs_rename_link(struct kobject *k, struct kobject *t, 344static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
337 const char *old_name, const char *new_name) 345 const char *old_name,
346 const char *new_name, const void *ns)
338{ 347{
339 return 0; 348 return 0;
340} 349}
@@ -413,10 +422,9 @@ static inline void sysfs_notify(struct kobject *kobj, const char *dir,
413static inline void sysfs_notify_dirent(struct sysfs_dirent *sd) 422static inline void sysfs_notify_dirent(struct sysfs_dirent *sd)
414{ 423{
415} 424}
416static inline 425static inline struct sysfs_dirent *
417struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, 426sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd, const unsigned char *name,
418 const void *ns, 427 const void *ns)
419 const unsigned char *name)
420{ 428{
421 return NULL; 429 return NULL;
422} 430}
@@ -435,4 +443,28 @@ static inline int __must_check sysfs_init(void)
435 443
436#endif /* CONFIG_SYSFS */ 444#endif /* CONFIG_SYSFS */
437 445
446static inline int __must_check sysfs_create_file(struct kobject *kobj,
447 const struct attribute *attr)
448{
449 return sysfs_create_file_ns(kobj, attr, NULL);
450}
451
452static inline void sysfs_remove_file(struct kobject *kobj,
453 const struct attribute *attr)
454{
455 return sysfs_remove_file_ns(kobj, attr, NULL);
456}
457
458static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
459 const char *old_name, const char *new_name)
460{
461 return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL);
462}
463
464static inline struct sysfs_dirent *
465sysfs_get_dirent(struct sysfs_dirent *parent_sd, const unsigned char *name)
466{
467 return sysfs_get_dirent_ns(parent_sd, name, NULL);
468}
469
438#endif /* _SYSFS_H_ */ 470#endif /* _SYSFS_H_ */
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933cced7..387fa7d05c98 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -17,9 +17,6 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/types.h> 18#include <linux/types.h>
19 19
20/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
21#define SYSRQ_DEFAULT_ENABLE 1
22
23/* Possible values of bitmask for enabling sysrq functions */ 20/* Possible values of bitmask for enabling sysrq functions */
24/* 0x0001 is reserved for enable everything */ 21/* 0x0001 is reserved for enable everything */
25#define SYSRQ_ENABLE_LOG 0x0002 22#define SYSRQ_ENABLE_LOG 0x0002
diff --git a/include/linux/tegra-powergate.h b/include/linux/tegra-powergate.h
index 55c29a8d5015..fd4498329c7c 100644
--- a/include/linux/tegra-powergate.h
+++ b/include/linux/tegra-powergate.h
@@ -34,10 +34,18 @@ struct clk;
34#define TEGRA_POWERGATE_CPU3 11 34#define TEGRA_POWERGATE_CPU3 11
35#define TEGRA_POWERGATE_CELP 12 35#define TEGRA_POWERGATE_CELP 12
36#define TEGRA_POWERGATE_3D1 13 36#define TEGRA_POWERGATE_3D1 13
37#define TEGRA_POWERGATE_CPU0 14
38#define TEGRA_POWERGATE_C0NC 15
39#define TEGRA_POWERGATE_C1NC 16
40#define TEGRA_POWERGATE_DIS 18
41#define TEGRA_POWERGATE_DISB 19
42#define TEGRA_POWERGATE_XUSBA 20
43#define TEGRA_POWERGATE_XUSBB 21
44#define TEGRA_POWERGATE_XUSBC 22
37 45
38#define TEGRA_POWERGATE_CPU0 TEGRA_POWERGATE_CPU
39#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D 46#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
40 47
48#ifdef CONFIG_ARCH_TEGRA
41int tegra_powergate_is_powered(int id); 49int tegra_powergate_is_powered(int id);
42int tegra_powergate_power_on(int id); 50int tegra_powergate_power_on(int id);
43int tegra_powergate_power_off(int id); 51int tegra_powergate_power_off(int id);
@@ -45,5 +53,31 @@ int tegra_powergate_remove_clamping(int id);
45 53
46/* Must be called with clk disabled, and returns with clk enabled */ 54/* Must be called with clk disabled, and returns with clk enabled */
47int tegra_powergate_sequence_power_up(int id, struct clk *clk); 55int tegra_powergate_sequence_power_up(int id, struct clk *clk);
56#else
57static inline int tegra_powergate_is_powered(int id)
58{
59 return -ENOSYS;
60}
61
62static inline int tegra_powergate_power_on(int id)
63{
64 return -ENOSYS;
65}
66
67static inline int tegra_powergate_power_off(int id)
68{
69 return -ENOSYS;
70}
71
72static inline int tegra_powergate_remove_clamping(int id)
73{
74 return -ENOSYS;
75}
76
77static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk)
78{
79 return -ENOSYS;
80}
81#endif
48 82
49#endif /* _MACH_TEGRA_POWERGATE_H_ */ 83#endif /* _MACH_TEGRA_POWERGATE_H_ */
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
new file mode 100644
index 000000000000..361de59a2285
--- /dev/null
+++ b/include/linux/thinkpad_acpi.h
@@ -0,0 +1,15 @@
1#ifndef __THINKPAD_ACPI_H__
2#define __THINKPAD_ACPI_H__
3
4/* These two functions return 0 if success, or negative error code
5 (e g -ENODEV if no led present) */
6
7enum {
8 TPACPI_LED_MUTE,
9 TPACPI_LED_MICMUTE,
10 TPACPI_LED_MAX,
11};
12
13int tpacpi_led_set(int whichled, bool on);
14
15#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e7e04736802f..fddbe2023a5d 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -104,8 +104,21 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
104#define test_thread_flag(flag) \ 104#define test_thread_flag(flag) \
105 test_ti_thread_flag(current_thread_info(), flag) 105 test_ti_thread_flag(current_thread_info(), flag)
106 106
107#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED) 107static inline __deprecated void set_need_resched(void)
108#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED) 108{
109 /*
110 * Use of this function in deprecated.
111 *
112 * As of this writing there are only a few users in the DRM tree left
113 * all of which are wrong and can be removed without causing too much
114 * grief.
115 *
116 * The DRM people are aware and are working on removing the last few
117 * instances.
118 */
119}
120
121#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
109 122
110#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK 123#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
111/* 124/*
diff --git a/include/linux/topology.h b/include/linux/topology.h
index d3cf0d6e7712..12ae6ce997d6 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -106,6 +106,8 @@ int arch_update_cpu_topology(void);
106 .last_balance = jiffies, \ 106 .last_balance = jiffies, \
107 .balance_interval = 1, \ 107 .balance_interval = 1, \
108 .smt_gain = 1178, /* 15% */ \ 108 .smt_gain = 1178, /* 15% */ \
109 .max_newidle_lb_cost = 0, \
110 .next_decay_max_lb_cost = jiffies, \
109} 111}
110#endif 112#endif
111#endif /* CONFIG_SCHED_SMT */ 113#endif /* CONFIG_SCHED_SMT */
@@ -135,6 +137,8 @@ int arch_update_cpu_topology(void);
135 , \ 137 , \
136 .last_balance = jiffies, \ 138 .last_balance = jiffies, \
137 .balance_interval = 1, \ 139 .balance_interval = 1, \
140 .max_newidle_lb_cost = 0, \
141 .next_decay_max_lb_cost = jiffies, \
138} 142}
139#endif 143#endif
140#endif /* CONFIG_SCHED_MC */ 144#endif /* CONFIG_SCHED_MC */
@@ -166,6 +170,8 @@ int arch_update_cpu_topology(void);
166 , \ 170 , \
167 .last_balance = jiffies, \ 171 .last_balance = jiffies, \
168 .balance_interval = 1, \ 172 .balance_interval = 1, \
173 .max_newidle_lb_cost = 0, \
174 .next_decay_max_lb_cost = jiffies, \
169} 175}
170#endif 176#endif
171 177
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index ebeab360d851..f16dc0a40049 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void)
267 267
268#define TRACE_EVENT_FLAGS(event, flag) 268#define TRACE_EVENT_FLAGS(event, flag)
269 269
270#define TRACE_EVENT_PERF_PERM(event, expr...)
271
270#endif /* DECLARE_TRACE */ 272#endif /* DECLARE_TRACE */
271 273
272#ifndef TRACE_EVENT 274#ifndef TRACE_EVENT
@@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void)
399 401
400#define TRACE_EVENT_FLAGS(event, flag) 402#define TRACE_EVENT_FLAGS(event, flag)
401 403
404#define TRACE_EVENT_PERF_PERM(event, expr...)
405
402#endif /* ifdef TRACE_EVENT (see note above) */ 406#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 64f864651d86..97d660ed70c1 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -180,7 +180,6 @@ struct tty_port_operations {
180 IFF the port was initialized. Do not use to free resources. Called 180 IFF the port was initialized. Do not use to free resources. Called
181 under the port mutex to serialize against activate/shutdowns */ 181 under the port mutex to serialize against activate/shutdowns */
182 void (*shutdown)(struct tty_port *port); 182 void (*shutdown)(struct tty_port *port);
183 void (*drop)(struct tty_port *port);
184 /* Called under the port mutex from tty_port_open, serialized using 183 /* Called under the port mutex from tty_port_open, serialized using
185 the port mutex */ 184 the port mutex */
186 /* FIXME: long term getting the tty argument *out* of this would be 185 /* FIXME: long term getting the tty argument *out* of this would be
@@ -672,31 +671,17 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
672#define wait_event_interruptible_tty(tty, wq, condition) \ 671#define wait_event_interruptible_tty(tty, wq, condition) \
673({ \ 672({ \
674 int __ret = 0; \ 673 int __ret = 0; \
675 if (!(condition)) { \ 674 if (!(condition)) \
676 __wait_event_interruptible_tty(tty, wq, condition, __ret); \ 675 __ret = __wait_event_interruptible_tty(tty, wq, \
677 } \ 676 condition); \
678 __ret; \ 677 __ret; \
679}) 678})
680 679
681#define __wait_event_interruptible_tty(tty, wq, condition, ret) \ 680#define __wait_event_interruptible_tty(tty, wq, condition) \
682do { \ 681 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
683 DEFINE_WAIT(__wait); \ 682 tty_unlock(tty); \
684 \
685 for (;;) { \
686 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
687 if (condition) \
688 break; \
689 if (!signal_pending(current)) { \
690 tty_unlock(tty); \
691 schedule(); \ 683 schedule(); \
692 tty_lock(tty); \ 684 tty_lock(tty))
693 continue; \
694 } \
695 ret = -ERESTARTSYS; \
696 break; \
697 } \
698 finish_wait(&wq, &__wait); \
699} while (0)
700 685
701#ifdef CONFIG_PROC_FS 686#ifdef CONFIG_PROC_FS
702extern void proc_tty_register_driver(struct tty_driver *); 687extern void proc_tty_register_driver(struct tty_driver *);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 8da8c4e87da3..7bfabd20204c 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -67,6 +67,13 @@ struct u64_stats_sync {
67#endif 67#endif
68}; 68};
69 69
70
71#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
72# define u64_stats_init(syncp) seqcount_init(syncp.seq)
73#else
74# define u64_stats_init(syncp) do { } while (0)
75#endif
76
70static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) 77static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
71{ 78{
72#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 79#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5ca0951e1855..9d8cf056e661 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -15,7 +15,7 @@
15 */ 15 */
16static inline void pagefault_disable(void) 16static inline void pagefault_disable(void)
17{ 17{
18 inc_preempt_count(); 18 preempt_count_inc();
19 /* 19 /*
20 * make sure to have issued the store before a pagefault 20 * make sure to have issued the store before a pagefault
21 * can hit. 21 * can hit.
@@ -30,11 +30,7 @@ static inline void pagefault_enable(void)
30 * the pagefault handler again. 30 * the pagefault handler again.
31 */ 31 */
32 barrier(); 32 barrier();
33 dec_preempt_count(); 33 preempt_count_dec();
34 /*
35 * make sure we do..
36 */
37 barrier();
38 preempt_check_resched(); 34 preempt_check_resched();
39} 35}
40 36
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 06f28beed7c2..319eae70fe84 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -30,6 +30,7 @@
30struct vm_area_struct; 30struct vm_area_struct;
31struct mm_struct; 31struct mm_struct;
32struct inode; 32struct inode;
33struct notifier_block;
33 34
34#ifdef CONFIG_ARCH_SUPPORTS_UPROBES 35#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
35# include <asm/uprobes.h> 36# include <asm/uprobes.h>
@@ -108,6 +109,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
108extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 109extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
109extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); 110extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
110extern bool __weak is_trap_insn(uprobe_opcode_t *insn); 111extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
112extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
111extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 113extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
112extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); 114extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
113extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 115extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
@@ -117,14 +119,21 @@ extern void uprobe_start_dup_mmap(void);
117extern void uprobe_end_dup_mmap(void); 119extern void uprobe_end_dup_mmap(void);
118extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); 120extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
119extern void uprobe_free_utask(struct task_struct *t); 121extern void uprobe_free_utask(struct task_struct *t);
120extern void uprobe_copy_process(struct task_struct *t); 122extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
121extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); 123extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
122extern int uprobe_post_sstep_notifier(struct pt_regs *regs); 124extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
123extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); 125extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
124extern void uprobe_notify_resume(struct pt_regs *regs); 126extern void uprobe_notify_resume(struct pt_regs *regs);
125extern bool uprobe_deny_signal(void); 127extern bool uprobe_deny_signal(void);
126extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs); 128extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
127extern void uprobe_clear_state(struct mm_struct *mm); 129extern void uprobe_clear_state(struct mm_struct *mm);
130extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
131extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
132extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
133extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
134extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
135extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
136extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
128#else /* !CONFIG_UPROBES */ 137#else /* !CONFIG_UPROBES */
129struct uprobes_state { 138struct uprobes_state {
130}; 139};
@@ -174,7 +183,7 @@ static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
174static inline void uprobe_free_utask(struct task_struct *t) 183static inline void uprobe_free_utask(struct task_struct *t)
175{ 184{
176} 185}
177static inline void uprobe_copy_process(struct task_struct *t) 186static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
178{ 187{
179} 188}
180static inline void uprobe_clear_state(struct mm_struct *mm) 189static inline void uprobe_clear_state(struct mm_struct *mm)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 001629cd1a97..512ab162832c 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -475,7 +475,8 @@ struct usb3_lpm_parameters {
475 * @lpm_capable: device supports LPM 475 * @lpm_capable: device supports LPM
476 * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM 476 * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
477 * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM 477 * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
478 * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled 478 * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
479 * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
479 * @usb3_lpm_enabled: USB3 hardware LPM enabled 480 * @usb3_lpm_enabled: USB3 hardware LPM enabled
480 * @string_langid: language ID for strings 481 * @string_langid: language ID for strings
481 * @product: iProduct string, if present (static) 482 * @product: iProduct string, if present (static)
@@ -548,6 +549,7 @@ struct usb_device {
548 unsigned usb2_hw_lpm_capable:1; 549 unsigned usb2_hw_lpm_capable:1;
549 unsigned usb2_hw_lpm_besl_capable:1; 550 unsigned usb2_hw_lpm_besl_capable:1;
550 unsigned usb2_hw_lpm_enabled:1; 551 unsigned usb2_hw_lpm_enabled:1;
552 unsigned usb2_hw_lpm_allowed:1;
551 unsigned usb3_lpm_enabled:1; 553 unsigned usb3_lpm_enabled:1;
552 int string_langid; 554 int string_langid;
553 555
@@ -702,7 +704,7 @@ extern int usb_alloc_streams(struct usb_interface *interface,
702 unsigned int num_streams, gfp_t mem_flags); 704 unsigned int num_streams, gfp_t mem_flags);
703 705
704/* Reverts a group of bulk endpoints back to not using stream IDs. */ 706/* Reverts a group of bulk endpoints back to not using stream IDs. */
705extern void usb_free_streams(struct usb_interface *interface, 707extern int usb_free_streams(struct usb_interface *interface,
706 struct usb_host_endpoint **eps, unsigned int num_eps, 708 struct usb_host_endpoint **eps, unsigned int num_eps,
707 gfp_t mem_flags); 709 gfp_t mem_flags);
708 710
@@ -1209,11 +1211,13 @@ struct usb_anchor {
1209 struct list_head urb_list; 1211 struct list_head urb_list;
1210 wait_queue_head_t wait; 1212 wait_queue_head_t wait;
1211 spinlock_t lock; 1213 spinlock_t lock;
1214 atomic_t suspend_wakeups;
1212 unsigned int poisoned:1; 1215 unsigned int poisoned:1;
1213}; 1216};
1214 1217
1215static inline void init_usb_anchor(struct usb_anchor *anchor) 1218static inline void init_usb_anchor(struct usb_anchor *anchor)
1216{ 1219{
1220 memset(anchor, 0, sizeof(*anchor));
1217 INIT_LIST_HEAD(&anchor->urb_list); 1221 INIT_LIST_HEAD(&anchor->urb_list);
1218 init_waitqueue_head(&anchor->wait); 1222 init_waitqueue_head(&anchor->wait);
1219 spin_lock_init(&anchor->lock); 1223 spin_lock_init(&anchor->lock);
@@ -1260,6 +1264,8 @@ typedef void (*usb_complete_t)(struct urb *);
1260 * @sg: scatter gather buffer list, the buffer size of each element in 1264 * @sg: scatter gather buffer list, the buffer size of each element in
1261 * the list (except the last) must be divisible by the endpoint's 1265 * the list (except the last) must be divisible by the endpoint's
1262 * max packet size if no_sg_constraint isn't set in 'struct usb_bus' 1266 * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
1267 * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
1268 * Do not use urb->sg for interrupt endpoints for now, only bulk.)
1263 * @num_mapped_sgs: (internal) number of mapped sg entries 1269 * @num_mapped_sgs: (internal) number of mapped sg entries
1264 * @num_sgs: number of entries in the sg list 1270 * @num_sgs: number of entries in the sg list
1265 * @transfer_buffer_length: How big is transfer_buffer. The transfer may 1271 * @transfer_buffer_length: How big is transfer_buffer. The transfer may
@@ -1574,6 +1580,8 @@ extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
1574extern void usb_poison_anchored_urbs(struct usb_anchor *anchor); 1580extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
1575extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor); 1581extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
1576extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor); 1582extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
1583extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor);
1584extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor);
1577extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor); 1585extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
1578extern void usb_unanchor_urb(struct urb *urb); 1586extern void usb_unanchor_urb(struct urb *urb);
1579extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, 1587extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index cc25b70af33c..c3fa80745996 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -36,6 +36,9 @@
36 * SUCH DAMAGE. 36 * SUCH DAMAGE.
37 */ 37 */
38 38
39#ifndef __LINUX_USB_CDC_NCM_H
40#define __LINUX_USB_CDC_NCM_H
41
39#define CDC_NCM_COMM_ALTSETTING_NCM 0 42#define CDC_NCM_COMM_ALTSETTING_NCM 0
40#define CDC_NCM_COMM_ALTSETTING_MBIM 1 43#define CDC_NCM_COMM_ALTSETTING_MBIM 1
41 44
@@ -85,22 +88,13 @@
85#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) 88#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
86 89
87struct cdc_ncm_ctx { 90struct cdc_ncm_ctx {
88 struct usb_cdc_ncm_ntb_parameters ncm_parm;
89 struct hrtimer tx_timer; 91 struct hrtimer tx_timer;
90 struct tasklet_struct bh; 92 struct tasklet_struct bh;
91 93
92 const struct usb_cdc_ncm_desc *func_desc; 94 const struct usb_cdc_ncm_desc *func_desc;
93 const struct usb_cdc_mbim_desc *mbim_desc; 95 const struct usb_cdc_mbim_desc *mbim_desc;
94 const struct usb_cdc_header_desc *header_desc;
95 const struct usb_cdc_union_desc *union_desc;
96 const struct usb_cdc_ether_desc *ether_desc; 96 const struct usb_cdc_ether_desc *ether_desc;
97 97
98 struct net_device *netdev;
99 struct usb_device *udev;
100 struct usb_host_endpoint *in_ep;
101 struct usb_host_endpoint *out_ep;
102 struct usb_host_endpoint *status_ep;
103 struct usb_interface *intf;
104 struct usb_interface *control; 98 struct usb_interface *control;
105 struct usb_interface *data; 99 struct usb_interface *data;
106 100
@@ -113,8 +107,6 @@ struct cdc_ncm_ctx {
113 107
114 u32 tx_timer_pending; 108 u32 tx_timer_pending;
115 u32 tx_curr_frame_num; 109 u32 tx_curr_frame_num;
116 u32 rx_speed;
117 u32 tx_speed;
118 u32 rx_max; 110 u32 rx_max;
119 u32 tx_max; 111 u32 tx_max;
120 u32 max_datagram_size; 112 u32 max_datagram_size;
@@ -127,9 +119,14 @@ struct cdc_ncm_ctx {
127 u16 connected; 119 u16 connected;
128}; 120};
129 121
130extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); 122u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
131extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 123int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
132extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 124void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
133extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); 125struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
134extern int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); 126int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
135extern int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset); 127int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
128struct sk_buff *
129cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
130int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
131
132#endif /* __LINUX_USB_CDC_NCM_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 75efc45eaa2f..b8aba196f7f1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -73,6 +73,7 @@ struct giveback_urb_bh {
73 spinlock_t lock; 73 spinlock_t lock;
74 struct list_head head; 74 struct list_head head;
75 struct tasklet_struct bh; 75 struct tasklet_struct bh;
76 struct usb_host_endpoint *completing_ep;
76}; 77};
77 78
78struct usb_hcd { 79struct usb_hcd {
@@ -140,6 +141,7 @@ struct usb_hcd {
140 unsigned wireless:1; /* Wireless USB HCD */ 141 unsigned wireless:1; /* Wireless USB HCD */
141 unsigned authorized_default:1; 142 unsigned authorized_default:1;
142 unsigned has_tt:1; /* Integrated TT in root hub */ 143 unsigned has_tt:1; /* Integrated TT in root hub */
144 unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
143 145
144 unsigned int irq; /* irq allocated */ 146 unsigned int irq; /* irq allocated */
145 void __iomem *regs; /* device memory/io */ 147 void __iomem *regs; /* device memory/io */
@@ -378,6 +380,12 @@ static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
378 return hcd->driver->flags & HCD_BH; 380 return hcd->driver->flags & HCD_BH;
379} 381}
380 382
383static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
384 struct usb_host_endpoint *ep)
385{
386 return hcd->high_prio_bh.completing_ep == ep;
387}
388
381extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 389extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
382extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, 390extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
383 int status); 391 int status);
@@ -428,6 +436,8 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
428extern void usb_hcd_pci_remove(struct pci_dev *dev); 436extern void usb_hcd_pci_remove(struct pci_dev *dev);
429extern void usb_hcd_pci_shutdown(struct pci_dev *dev); 437extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
430 438
439extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
440
431#ifdef CONFIG_PM 441#ifdef CONFIG_PM
432extern const struct dev_pm_ops usb_hcd_pci_pm_ops; 442extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
433#endif 443#endif
@@ -496,6 +506,7 @@ struct usb_tt {
496 struct usb_device *hub; /* upstream highspeed hub */ 506 struct usb_device *hub; /* upstream highspeed hub */
497 int multi; /* true means one TT per port */ 507 int multi; /* true means one TT per port */
498 unsigned think_time; /* think time in ns */ 508 unsigned think_time; /* think time in ns */
509 void *hcpriv; /* HCD private data */
499 510
500 /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ 511 /* for control/bulk error recovery (CLEAR_TT_BUFFER) */
501 spinlock_t lock; 512 spinlock_t lock;
@@ -554,9 +565,8 @@ extern void usb_ep0_reinit(struct usb_device *);
554 * of (7/6 * 8 * bytecount) = 9.33 * bytecount */ 565 * of (7/6 * 8 * bytecount) = 9.33 * bytecount */
555 /* bytecount = data payload byte count */ 566 /* bytecount = data payload byte count */
556 567
557#define NS_TO_US(ns) ((ns + 500L) / 1000L) 568#define NS_TO_US(ns) DIV_ROUND_UP(ns, 1000L)
558 /* convert & round nanoseconds to microseconds */ 569 /* convert nanoseconds to microseconds, rounding up */
559
560 570
561/* 571/*
562 * Full/low speed bandwidth allocation constants/support. 572 * Full/low speed bandwidth allocation constants/support.
diff --git a/include/linux/usb/intel_mid_otg.h b/include/linux/usb/intel_mid_otg.h
deleted file mode 100644
index 756cf5543ffd..000000000000
--- a/include/linux/usb/intel_mid_otg.h
+++ /dev/null
@@ -1,180 +0,0 @@
1/*
2 * Intel MID (Langwell/Penwell) USB OTG Transceiver driver
3 * Copyright (C) 2008 - 2010, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef __INTEL_MID_OTG_H
21#define __INTEL_MID_OTG_H
22
23#include <linux/pm.h>
24#include <linux/usb/otg.h>
25#include <linux/notifier.h>
26
27struct intel_mid_otg_xceiv;
28
29/* This is a common data structure for Intel MID platform to
30 * save values of the OTG state machine */
31struct otg_hsm {
32 /* Input */
33 int a_bus_resume;
34 int a_bus_suspend;
35 int a_conn;
36 int a_sess_vld;
37 int a_srp_det;
38 int a_vbus_vld;
39 int b_bus_resume;
40 int b_bus_suspend;
41 int b_conn;
42 int b_se0_srp;
43 int b_ssend_srp;
44 int b_sess_end;
45 int b_sess_vld;
46 int id;
47/* id values */
48#define ID_B 0x05
49#define ID_A 0x04
50#define ID_ACA_C 0x03
51#define ID_ACA_B 0x02
52#define ID_ACA_A 0x01
53 int power_up;
54 int adp_change;
55 int test_device;
56
57 /* Internal variables */
58 int a_set_b_hnp_en;
59 int b_srp_done;
60 int b_hnp_enable;
61 int hnp_poll_enable;
62
63 /* Timeout indicator for timers */
64 int a_wait_vrise_tmout;
65 int a_wait_bcon_tmout;
66 int a_aidl_bdis_tmout;
67 int a_bidl_adis_tmout;
68 int a_bidl_adis_tmr;
69 int a_wait_vfall_tmout;
70 int b_ase0_brst_tmout;
71 int b_bus_suspend_tmout;
72 int b_srp_init_tmout;
73 int b_srp_fail_tmout;
74 int b_srp_fail_tmr;
75 int b_adp_sense_tmout;
76
77 /* Informative variables */
78 int a_bus_drop;
79 int a_bus_req;
80 int a_clr_err;
81 int b_bus_req;
82 int a_suspend_req;
83 int b_bus_suspend_vld;
84
85 /* Output */
86 int drv_vbus;
87 int loc_conn;
88 int loc_sof;
89
90 /* Others */
91 int vbus_srp_up;
92};
93
94/* must provide ULPI access function to read/write registers implemented in
95 * ULPI address space */
96struct iotg_ulpi_access_ops {
97 int (*read)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val);
98 int (*write)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val);
99};
100
101#define OTG_A_DEVICE 0x0
102#define OTG_B_DEVICE 0x1
103
104/*
105 * the Intel MID (Langwell/Penwell) otg transceiver driver needs to interact
106 * with device and host drivers to implement the USB OTG related feature. More
107 * function members are added based on usb_phy data structure for this
108 * purpose.
109 */
110struct intel_mid_otg_xceiv {
111 struct usb_phy otg;
112 struct otg_hsm hsm;
113
114 /* base address */
115 void __iomem *base;
116
117 /* ops to access ulpi */
118 struct iotg_ulpi_access_ops ulpi_ops;
119
120 /* atomic notifier for interrupt context */
121 struct atomic_notifier_head iotg_notifier;
122
123 /* start/stop USB Host function */
124 int (*start_host)(struct intel_mid_otg_xceiv *iotg);
125 int (*stop_host)(struct intel_mid_otg_xceiv *iotg);
126
127 /* start/stop USB Peripheral function */
128 int (*start_peripheral)(struct intel_mid_otg_xceiv *iotg);
129 int (*stop_peripheral)(struct intel_mid_otg_xceiv *iotg);
130
131 /* start/stop ADP sense/probe function */
132 int (*set_adp_probe)(struct intel_mid_otg_xceiv *iotg,
133 bool enabled, int dev);
134 int (*set_adp_sense)(struct intel_mid_otg_xceiv *iotg,
135 bool enabled);
136
137#ifdef CONFIG_PM
138 /* suspend/resume USB host function */
139 int (*suspend_host)(struct intel_mid_otg_xceiv *iotg,
140 pm_message_t message);
141 int (*resume_host)(struct intel_mid_otg_xceiv *iotg);
142
143 int (*suspend_peripheral)(struct intel_mid_otg_xceiv *iotg,
144 pm_message_t message);
145 int (*resume_peripheral)(struct intel_mid_otg_xceiv *iotg);
146#endif
147
148};
149static inline
150struct intel_mid_otg_xceiv *otg_to_mid_xceiv(struct usb_phy *otg)
151{
152 return container_of(otg, struct intel_mid_otg_xceiv, otg);
153}
154
155#define MID_OTG_NOTIFY_CONNECT 0x0001
156#define MID_OTG_NOTIFY_DISCONN 0x0002
157#define MID_OTG_NOTIFY_HSUSPEND 0x0003
158#define MID_OTG_NOTIFY_HRESUME 0x0004
159#define MID_OTG_NOTIFY_CSUSPEND 0x0005
160#define MID_OTG_NOTIFY_CRESUME 0x0006
161#define MID_OTG_NOTIFY_HOSTADD 0x0007
162#define MID_OTG_NOTIFY_HOSTREMOVE 0x0008
163#define MID_OTG_NOTIFY_CLIENTADD 0x0009
164#define MID_OTG_NOTIFY_CLIENTREMOVE 0x000a
165
166static inline int
167intel_mid_otg_register_notifier(struct intel_mid_otg_xceiv *iotg,
168 struct notifier_block *nb)
169{
170 return atomic_notifier_chain_register(&iotg->iotg_notifier, nb);
171}
172
173static inline void
174intel_mid_otg_unregister_notifier(struct intel_mid_otg_xceiv *iotg,
175 struct notifier_block *nb)
176{
177 atomic_notifier_chain_unregister(&iotg->iotg_notifier, nb);
178}
179
180#endif /* __INTEL_MID_OTG_H */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 053c26841cc3..eb505250940a 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -99,8 +99,6 @@ struct musb_hdrc_platform_data {
99 /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ 99 /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
100 u8 mode; 100 u8 mode;
101 101
102 u8 has_mailbox:1;
103
104 /* for clk_get() */ 102 /* for clk_get() */
105 const char *clock; 103 const char *clock;
106 104
diff --git a/include/linux/usb/omap_control_usb.h b/include/linux/usb/omap_control_usb.h
index 27b5b8c931b0..596b01918813 100644
--- a/include/linux/usb/omap_control_usb.h
+++ b/include/linux/usb/omap_control_usb.h
@@ -19,20 +19,23 @@
19#ifndef __OMAP_CONTROL_USB_H__ 19#ifndef __OMAP_CONTROL_USB_H__
20#define __OMAP_CONTROL_USB_H__ 20#define __OMAP_CONTROL_USB_H__
21 21
22enum omap_control_usb_type {
23 OMAP_CTRL_TYPE_OTGHS = 1, /* Mailbox OTGHS_CONTROL */
24 OMAP_CTRL_TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */
25 OMAP_CTRL_TYPE_PIPE3, /* PIPE3 PHY, DPLL & seperate Rx/Tx power */
26 OMAP_CTRL_TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */
27};
28
22struct omap_control_usb { 29struct omap_control_usb {
23 struct device *dev; 30 struct device *dev;
24 31
25 u32 __iomem *dev_conf;
26 u32 __iomem *otghs_control; 32 u32 __iomem *otghs_control;
27 u32 __iomem *phy_power; 33 u32 __iomem *power;
34 u32 __iomem *power_aux;
28 35
29 struct clk *sys_clk; 36 struct clk *sys_clk;
30 37
31 u32 type; 38 enum omap_control_usb_type type;
32};
33
34struct omap_control_usb_platform_data {
35 u8 type;
36}; 39};
37 40
38enum omap_control_usb_mode { 41enum omap_control_usb_mode {
@@ -42,10 +45,6 @@ enum omap_control_usb_mode {
42 USB_MODE_DISCONNECT, 45 USB_MODE_DISCONNECT,
43}; 46};
44 47
45/* To differentiate ctrl module IP having either mailbox or USB3 PHY power */
46#define OMAP_CTRL_DEV_TYPE1 0x1
47#define OMAP_CTRL_DEV_TYPE2 0x2
48
49#define OMAP_CTRL_DEV_PHY_PD BIT(0) 48#define OMAP_CTRL_DEV_PHY_PD BIT(0)
50 49
51#define OMAP_CTRL_DEV_AVALID BIT(0) 50#define OMAP_CTRL_DEV_AVALID BIT(0)
@@ -63,26 +62,18 @@ enum omap_control_usb_mode {
63#define OMAP_CTRL_USB3_PHY_TX_RX_POWERON 0x3 62#define OMAP_CTRL_USB3_PHY_TX_RX_POWERON 0x3
64#define OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF 0x0 63#define OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF 0x0
65 64
65#define OMAP_CTRL_USB2_PHY_PD BIT(28)
66
66#if IS_ENABLED(CONFIG_OMAP_CONTROL_USB) 67#if IS_ENABLED(CONFIG_OMAP_CONTROL_USB)
67extern struct device *omap_get_control_dev(void);
68extern void omap_control_usb_phy_power(struct device *dev, int on); 68extern void omap_control_usb_phy_power(struct device *dev, int on);
69extern void omap_control_usb3_phy_power(struct device *dev, bool on);
70extern void omap_control_usb_set_mode(struct device *dev, 69extern void omap_control_usb_set_mode(struct device *dev,
71 enum omap_control_usb_mode mode); 70 enum omap_control_usb_mode mode);
72#else 71#else
73static inline struct device *omap_get_control_dev(void)
74{
75 return ERR_PTR(-ENODEV);
76}
77 72
78static inline void omap_control_usb_phy_power(struct device *dev, int on) 73static inline void omap_control_usb_phy_power(struct device *dev, int on)
79{ 74{
80} 75}
81 76
82static inline void omap_control_usb3_phy_power(struct device *dev, int on)
83{
84}
85
86static inline void omap_control_usb_set_mode(struct device *dev, 77static inline void omap_control_usb_set_mode(struct device *dev,
87 enum omap_control_usb_mode mode) 78 enum omap_control_usb_mode mode)
88{ 79{
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index d528b8045150..704a1ab8240c 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -320,6 +320,8 @@ extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
320extern void usb_serial_put(struct usb_serial *serial); 320extern void usb_serial_put(struct usb_serial *serial);
321extern int usb_serial_generic_open(struct tty_struct *tty, 321extern int usb_serial_generic_open(struct tty_struct *tty,
322 struct usb_serial_port *port); 322 struct usb_serial_port *port);
323extern int usb_serial_generic_write_start(struct usb_serial_port *port,
324 gfp_t mem_flags);
323extern int usb_serial_generic_write(struct tty_struct *tty, 325extern int usb_serial_generic_write(struct tty_struct *tty,
324 struct usb_serial_port *port, const unsigned char *buf, int count); 326 struct usb_serial_port *port, const unsigned char *buf, int count);
325extern void usb_serial_generic_close(struct usb_serial_port *port); 327extern void usb_serial_generic_close(struct usb_serial_port *port);
diff --git a/include/linux/usb/usb_phy_gen_xceiv.h b/include/linux/usb/usb_phy_gen_xceiv.h
index 11d85b9c1b08..cc8d818a83be 100644
--- a/include/linux/usb/usb_phy_gen_xceiv.h
+++ b/include/linux/usb/usb_phy_gen_xceiv.h
@@ -9,7 +9,8 @@ struct usb_phy_gen_xceiv_platform_data {
9 9
10 /* if set fails with -EPROBE_DEFER if can't get regulator */ 10 /* if set fails with -EPROBE_DEFER if can't get regulator */
11 unsigned int needs_vcc:1; 11 unsigned int needs_vcc:1;
12 unsigned int needs_reset:1; 12 unsigned int needs_reset:1; /* deprecated */
13 int gpio_reset;
13}; 14};
14 15
15#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE)) 16#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
index 4ff744e2b678..c1257130769b 100644
--- a/include/linux/usb/wusb-wa.h
+++ b/include/linux/usb/wusb-wa.h
@@ -142,7 +142,7 @@ enum wa_notif_type {
142struct wa_notif_hdr { 142struct wa_notif_hdr {
143 u8 bLength; 143 u8 bLength;
144 u8 bNotifyType; /* enum wa_notif_type */ 144 u8 bNotifyType; /* enum wa_notif_type */
145} __attribute__((packed)); 145} __packed;
146 146
147/** 147/**
148 * HWA DN Received notification [(WUSB] section 8.5.4.2) 148 * HWA DN Received notification [(WUSB] section 8.5.4.2)
@@ -158,7 +158,7 @@ struct hwa_notif_dn {
158 u8 bSourceDeviceAddr; /* from errata 2005/07 */ 158 u8 bSourceDeviceAddr; /* from errata 2005/07 */
159 u8 bmAttributes; 159 u8 bmAttributes;
160 struct wusb_dn_hdr dndata[]; 160 struct wusb_dn_hdr dndata[];
161} __attribute__((packed)); 161} __packed;
162 162
163/* [WUSB] section 8.3.3 */ 163/* [WUSB] section 8.3.3 */
164enum wa_xfer_type { 164enum wa_xfer_type {
@@ -167,6 +167,8 @@ enum wa_xfer_type {
167 WA_XFER_TYPE_ISO = 0x82, 167 WA_XFER_TYPE_ISO = 0x82,
168 WA_XFER_RESULT = 0x83, 168 WA_XFER_RESULT = 0x83,
169 WA_XFER_ABORT = 0x84, 169 WA_XFER_ABORT = 0x84,
170 WA_XFER_ISO_PACKET_INFO = 0xA0,
171 WA_XFER_ISO_PACKET_STATUS = 0xA1,
170}; 172};
171 173
172/* [WUSB] section 8.3.3 */ 174/* [WUSB] section 8.3.3 */
@@ -177,28 +179,47 @@ struct wa_xfer_hdr {
177 __le32 dwTransferID; /* Host-assigned ID */ 179 __le32 dwTransferID; /* Host-assigned ID */
178 __le32 dwTransferLength; /* Length of data to xfer */ 180 __le32 dwTransferLength; /* Length of data to xfer */
179 u8 bTransferSegment; 181 u8 bTransferSegment;
180} __attribute__((packed)); 182} __packed;
181 183
182struct wa_xfer_ctl { 184struct wa_xfer_ctl {
183 struct wa_xfer_hdr hdr; 185 struct wa_xfer_hdr hdr;
184 u8 bmAttribute; 186 u8 bmAttribute;
185 __le16 wReserved; 187 __le16 wReserved;
186 struct usb_ctrlrequest baSetupData; 188 struct usb_ctrlrequest baSetupData;
187} __attribute__((packed)); 189} __packed;
188 190
189struct wa_xfer_bi { 191struct wa_xfer_bi {
190 struct wa_xfer_hdr hdr; 192 struct wa_xfer_hdr hdr;
191 u8 bReserved; 193 u8 bReserved;
192 __le16 wReserved; 194 __le16 wReserved;
193} __attribute__((packed)); 195} __packed;
194 196
197/* [WUSB] section 8.5.5 */
195struct wa_xfer_hwaiso { 198struct wa_xfer_hwaiso {
196 struct wa_xfer_hdr hdr; 199 struct wa_xfer_hdr hdr;
197 u8 bReserved; 200 u8 bReserved;
198 __le16 wPresentationTime; 201 __le16 wPresentationTime;
199 __le32 dwNumOfPackets; 202 __le32 dwNumOfPackets;
200 /* FIXME: u8 pktdata[]? */ 203} __packed;
201} __attribute__((packed)); 204
205struct wa_xfer_packet_info_hwaiso {
206 __le16 wLength;
207 u8 bPacketType;
208 u8 bReserved;
209 __le16 PacketLength[0];
210} __packed;
211
212struct wa_xfer_packet_status_len_hwaiso {
213 __le16 PacketLength;
214 __le16 PacketStatus;
215} __packed;
216
217struct wa_xfer_packet_status_hwaiso {
218 __le16 wLength;
219 u8 bPacketType;
220 u8 bReserved;
221 struct wa_xfer_packet_status_len_hwaiso PacketStatus[0];
222} __packed;
202 223
203/* [WUSB] section 8.3.3.5 */ 224/* [WUSB] section 8.3.3.5 */
204struct wa_xfer_abort { 225struct wa_xfer_abort {
@@ -206,7 +227,7 @@ struct wa_xfer_abort {
206 u8 bRequestType; 227 u8 bRequestType;
207 __le16 wRPipe; /* RPipe index */ 228 __le16 wRPipe; /* RPipe index */
208 __le32 dwTransferID; /* Host-assigned ID */ 229 __le32 dwTransferID; /* Host-assigned ID */
209} __attribute__((packed)); 230} __packed;
210 231
211/** 232/**
212 * WA Transfer Complete notification ([WUSB] section 8.3.3.3) 233 * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
@@ -216,7 +237,7 @@ struct wa_notif_xfer {
216 struct wa_notif_hdr hdr; 237 struct wa_notif_hdr hdr;
217 u8 bEndpoint; 238 u8 bEndpoint;
218 u8 Reserved; 239 u8 Reserved;
219} __attribute__((packed)); 240} __packed;
220 241
221/** Transfer result basic codes [WUSB] table 8-15 */ 242/** Transfer result basic codes [WUSB] table 8-15 */
222enum { 243enum {
@@ -243,7 +264,7 @@ struct wa_xfer_result {
243 u8 bTransferSegment; 264 u8 bTransferSegment;
244 u8 bTransferStatus; 265 u8 bTransferStatus;
245 __le32 dwNumOfPackets; 266 __le32 dwNumOfPackets;
246} __attribute__((packed)); 267} __packed;
247 268
248/** 269/**
249 * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). 270 * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
@@ -258,16 +279,16 @@ struct wa_xfer_result {
258struct usb_wa_descriptor { 279struct usb_wa_descriptor {
259 u8 bLength; 280 u8 bLength;
260 u8 bDescriptorType; 281 u8 bDescriptorType;
261 u16 bcdWAVersion; 282 __le16 bcdWAVersion;
262 u8 bNumPorts; /* don't use!! */ 283 u8 bNumPorts; /* don't use!! */
263 u8 bmAttributes; /* Reserved == 0 */ 284 u8 bmAttributes; /* Reserved == 0 */
264 u16 wNumRPipes; 285 __le16 wNumRPipes;
265 u16 wRPipeMaxBlock; 286 __le16 wRPipeMaxBlock;
266 u8 bRPipeBlockSize; 287 u8 bRPipeBlockSize;
267 u8 bPwrOn2PwrGood; 288 u8 bPwrOn2PwrGood;
268 u8 bNumMMCIEs; 289 u8 bNumMMCIEs;
269 u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ 290 u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
270} __attribute__((packed)); 291} __packed;
271 292
272/** 293/**
273 * HWA Device Information Buffer (WUSB1.0[T8.54]) 294 * HWA Device Information Buffer (WUSB1.0[T8.54])
@@ -277,6 +298,6 @@ struct hwa_dev_info {
277 u8 bDeviceAddress; 298 u8 bDeviceAddress;
278 __le16 wPHYRates; 299 __le16 wPHYRates;
279 u8 bmDeviceAttribute; 300 u8 bmDeviceAttribute;
280} __attribute__((packed)); 301} __packed;
281 302
282#endif /* #ifndef __LINUX_USB_WUSB_WA_H */ 303#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 0c4d4ca370ec..eeb28329fa3c 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -271,6 +271,8 @@ static inline u8 wusb_key_index(int index, int type, int originator)
271#define WUSB_KEY_INDEX_TYPE_GTK 2 271#define WUSB_KEY_INDEX_TYPE_GTK 2
272#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 272#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
273#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 273#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
274/* bits 0-3 used for the key index. */
275#define WUSB_KEY_INDEX_MAX 15
274 276
275/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ 277/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
276struct aes_ccm_nonce { 278struct aes_ccm_nonce {
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 4db29859464f..4836ba3c1cd8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -27,6 +27,12 @@ struct user_namespace {
27 kuid_t owner; 27 kuid_t owner;
28 kgid_t group; 28 kgid_t group;
29 unsigned int proc_inum; 29 unsigned int proc_inum;
30
31 /* Register of per-UID persistent keyrings for this namespace */
32#ifdef CONFIG_PERSISTENT_KEYRINGS
33 struct key *persistent_keyring_register;
34 struct rw_semaphore persistent_keyring_register_sem;
35#endif
30}; 36};
31 37
32extern struct user_namespace init_user_ns; 38extern struct user_namespace init_user_ns;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 36d36cc89329..e4abb84199be 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,11 +51,11 @@ int virtqueue_add_sgs(struct virtqueue *vq,
51 void *data, 51 void *data,
52 gfp_t gfp); 52 gfp_t gfp);
53 53
54void virtqueue_kick(struct virtqueue *vq); 54bool virtqueue_kick(struct virtqueue *vq);
55 55
56bool virtqueue_kick_prepare(struct virtqueue *vq); 56bool virtqueue_kick_prepare(struct virtqueue *vq);
57 57
58void virtqueue_notify(struct virtqueue *vq); 58bool virtqueue_notify(struct virtqueue *vq);
59 59
60void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); 60void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
61 61
@@ -73,6 +73,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
73 73
74unsigned int virtqueue_get_vring_size(struct virtqueue *vq); 74unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
75 75
76bool virtqueue_is_broken(struct virtqueue *vq);
77
76/** 78/**
77 * virtio_device - representation of a device using virtio 79 * virtio_device - representation of a device using virtio
78 * @index: unique position on the virtio bus 80 * @index: unique position on the virtio bus
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 29b9104232b4..e8f8f71e843c 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -96,33 +96,6 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
96 return test_bit(fbit, vdev->features); 96 return test_bit(fbit, vdev->features);
97} 97}
98 98
99/**
100 * virtio_config_val - look for a feature and get a virtio config entry.
101 * @vdev: the virtio device
102 * @fbit: the feature bit
103 * @offset: the type to search for.
104 * @v: a pointer to the value to fill in.
105 *
106 * The return value is -ENOENT if the feature doesn't exist. Otherwise
107 * the config value is copied into whatever is pointed to by v. */
108#define virtio_config_val(vdev, fbit, offset, v) \
109 virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v))
110
111#define virtio_config_val_len(vdev, fbit, offset, v, len) \
112 virtio_config_buf((vdev), (fbit), (offset), (v), (len))
113
114static inline int virtio_config_buf(struct virtio_device *vdev,
115 unsigned int fbit,
116 unsigned int offset,
117 void *buf, unsigned len)
118{
119 if (!virtio_has_feature(vdev, fbit))
120 return -ENOENT;
121
122 vdev->config->get(vdev, offset, buf, len);
123 return 0;
124}
125
126static inline 99static inline
127struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, 100struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
128 vq_callback_t *c, const char *n) 101 vq_callback_t *c, const char *n)
@@ -162,5 +135,139 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
162 return 0; 135 return 0;
163} 136}
164 137
138/* Config space accessors. */
139#define virtio_cread(vdev, structname, member, ptr) \
140 do { \
141 /* Must match the member's type, and be integer */ \
142 if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
143 (*ptr) = 1; \
144 \
145 switch (sizeof(*ptr)) { \
146 case 1: \
147 *(ptr) = virtio_cread8(vdev, \
148 offsetof(structname, member)); \
149 break; \
150 case 2: \
151 *(ptr) = virtio_cread16(vdev, \
152 offsetof(structname, member)); \
153 break; \
154 case 4: \
155 *(ptr) = virtio_cread32(vdev, \
156 offsetof(structname, member)); \
157 break; \
158 case 8: \
159 *(ptr) = virtio_cread64(vdev, \
160 offsetof(structname, member)); \
161 break; \
162 default: \
163 BUG(); \
164 } \
165 } while(0)
166
167/* Config space accessors. */
168#define virtio_cwrite(vdev, structname, member, ptr) \
169 do { \
170 /* Must match the member's type, and be integer */ \
171 if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
172 BUG_ON((*ptr) == 1); \
173 \
174 switch (sizeof(*ptr)) { \
175 case 1: \
176 virtio_cwrite8(vdev, \
177 offsetof(structname, member), \
178 *(ptr)); \
179 break; \
180 case 2: \
181 virtio_cwrite16(vdev, \
182 offsetof(structname, member), \
183 *(ptr)); \
184 break; \
185 case 4: \
186 virtio_cwrite32(vdev, \
187 offsetof(structname, member), \
188 *(ptr)); \
189 break; \
190 case 8: \
191 virtio_cwrite64(vdev, \
192 offsetof(structname, member), \
193 *(ptr)); \
194 break; \
195 default: \
196 BUG(); \
197 } \
198 } while(0)
199
200static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
201{
202 u8 ret;
203 vdev->config->get(vdev, offset, &ret, sizeof(ret));
204 return ret;
205}
206
207static inline void virtio_cread_bytes(struct virtio_device *vdev,
208 unsigned int offset,
209 void *buf, size_t len)
210{
211 vdev->config->get(vdev, offset, buf, len);
212}
213
214static inline void virtio_cwrite8(struct virtio_device *vdev,
215 unsigned int offset, u8 val)
216{
217 vdev->config->set(vdev, offset, &val, sizeof(val));
218}
219
220static inline u16 virtio_cread16(struct virtio_device *vdev,
221 unsigned int offset)
222{
223 u16 ret;
224 vdev->config->get(vdev, offset, &ret, sizeof(ret));
225 return ret;
226}
227
228static inline void virtio_cwrite16(struct virtio_device *vdev,
229 unsigned int offset, u16 val)
230{
231 vdev->config->set(vdev, offset, &val, sizeof(val));
232}
233
234static inline u32 virtio_cread32(struct virtio_device *vdev,
235 unsigned int offset)
236{
237 u32 ret;
238 vdev->config->get(vdev, offset, &ret, sizeof(ret));
239 return ret;
240}
241
242static inline void virtio_cwrite32(struct virtio_device *vdev,
243 unsigned int offset, u32 val)
244{
245 vdev->config->set(vdev, offset, &val, sizeof(val));
246}
247
248static inline u64 virtio_cread64(struct virtio_device *vdev,
249 unsigned int offset)
250{
251 u64 ret;
252 vdev->config->get(vdev, offset, &ret, sizeof(ret));
253 return ret;
254}
255
256static inline void virtio_cwrite64(struct virtio_device *vdev,
257 unsigned int offset, u64 val)
258{
259 vdev->config->set(vdev, offset, &val, sizeof(val));
260}
261
262/* Conditional config space accessors. */
263#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
264 ({ \
265 int _r = 0; \
266 if (!virtio_has_feature(vdev, fbit)) \
267 _r = -ENOENT; \
268 else \
269 virtio_cread((vdev), structname, member, ptr); \
270 _r; \
271 })
165 272
166#endif /* _LINUX_VIRTIO_CONFIG_H */ 273#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index b300787af8e0..67e06fe18c03 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -71,7 +71,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
71 struct virtio_device *vdev, 71 struct virtio_device *vdev,
72 bool weak_barriers, 72 bool weak_barriers,
73 void *pages, 73 void *pages,
74 void (*notify)(struct virtqueue *vq), 74 bool (*notify)(struct virtqueue *vq),
75 void (*callback)(struct virtqueue *vq), 75 void (*callback)(struct virtqueue *vq),
76 const char *name); 76 const char *name);
77void vring_del_virtqueue(struct virtqueue *vq); 77void vring_del_virtqueue(struct virtqueue *vq);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 1855f0a22add..c557c6d096de 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -39,6 +39,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
39 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 39 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
40#ifdef CONFIG_NUMA_BALANCING 40#ifdef CONFIG_NUMA_BALANCING
41 NUMA_PTE_UPDATES, 41 NUMA_PTE_UPDATES,
42 NUMA_HUGE_PTE_UPDATES,
42 NUMA_HINT_FAULTS, 43 NUMA_HINT_FAULTS,
43 NUMA_HINT_FAULTS_LOCAL, 44 NUMA_HINT_FAULTS_LOCAL,
44 NUMA_PAGE_MIGRATE, 45 NUMA_PAGE_MIGRATE,
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a67fc1635592..eaa00b10abaa 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1,7 +1,8 @@
1#ifndef _LINUX_WAIT_H 1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H 2#define _LINUX_WAIT_H
3 3/*
4 4 * Linux wait queue related types and methods
5 */
5#include <linux/list.h> 6#include <linux/list.h>
6#include <linux/stddef.h> 7#include <linux/stddef.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
@@ -13,27 +14,27 @@ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, v
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 15
15struct __wait_queue { 16struct __wait_queue {
16 unsigned int flags; 17 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01 18#define WQ_FLAG_EXCLUSIVE 0x01
18 void *private; 19 void *private;
19 wait_queue_func_t func; 20 wait_queue_func_t func;
20 struct list_head task_list; 21 struct list_head task_list;
21}; 22};
22 23
23struct wait_bit_key { 24struct wait_bit_key {
24 void *flags; 25 void *flags;
25 int bit_nr; 26 int bit_nr;
26#define WAIT_ATOMIC_T_BIT_NR -1 27#define WAIT_ATOMIC_T_BIT_NR -1
27}; 28};
28 29
29struct wait_bit_queue { 30struct wait_bit_queue {
30 struct wait_bit_key key; 31 struct wait_bit_key key;
31 wait_queue_t wait; 32 wait_queue_t wait;
32}; 33};
33 34
34struct __wait_queue_head { 35struct __wait_queue_head {
35 spinlock_t lock; 36 spinlock_t lock;
36 struct list_head task_list; 37 struct list_head task_list;
37}; 38};
38typedef struct __wait_queue_head wait_queue_head_t; 39typedef struct __wait_queue_head wait_queue_head_t;
39 40
@@ -84,17 +85,17 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct
84 85
85static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 86static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{ 87{
87 q->flags = 0; 88 q->flags = 0;
88 q->private = p; 89 q->private = p;
89 q->func = default_wake_function; 90 q->func = default_wake_function;
90} 91}
91 92
92static inline void init_waitqueue_func_entry(wait_queue_t *q, 93static inline void
93 wait_queue_func_t func) 94init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
94{ 95{
95 q->flags = 0; 96 q->flags = 0;
96 q->private = NULL; 97 q->private = NULL;
97 q->func = func; 98 q->func = func;
98} 99}
99 100
100static inline int waitqueue_active(wait_queue_head_t *q) 101static inline int waitqueue_active(wait_queue_head_t *q)
@@ -114,8 +115,8 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
114/* 115/*
115 * Used for wake-one threads: 116 * Used for wake-one threads:
116 */ 117 */
117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, 118static inline void
118 wait_queue_t *wait) 119__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
119{ 120{
120 wait->flags |= WQ_FLAG_EXCLUSIVE; 121 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait); 122 __add_wait_queue(q, wait);
@@ -127,23 +128,22 @@ static inline void __add_wait_queue_tail(wait_queue_head_t *head,
127 list_add_tail(&new->task_list, &head->task_list); 128 list_add_tail(&new->task_list, &head->task_list);
128} 129}
129 130
130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, 131static inline void
131 wait_queue_t *wait) 132__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
132{ 133{
133 wait->flags |= WQ_FLAG_EXCLUSIVE; 134 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait); 135 __add_wait_queue_tail(q, wait);
135} 136}
136 137
137static inline void __remove_wait_queue(wait_queue_head_t *head, 138static inline void
138 wait_queue_t *old) 139__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
139{ 140{
140 list_del(&old->task_list); 141 list_del(&old->task_list);
141} 142}
142 143
143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 144void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 145void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, 146void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
146 void *key);
147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149void __wake_up_bit(wait_queue_head_t *, void *, int); 149void __wake_up_bit(wait_queue_head_t *, void *, int);
@@ -170,27 +170,64 @@ wait_queue_head_t *bit_waitqueue(void *, int);
170/* 170/*
171 * Wakeup macros to be used to report events to the targets. 171 * Wakeup macros to be used to report events to the targets.
172 */ 172 */
173#define wake_up_poll(x, m) \ 173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \ 175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \ 177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \ 179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181 181
182#define __wait_event(wq, condition) \ 182#define ___wait_cond_timeout(condition) \
183do { \ 183({ \
184 DEFINE_WAIT(__wait); \ 184 bool __cond = (condition); \
185 if (__cond && !__ret) \
186 __ret = 1; \
187 __cond || !__ret; \
188})
189
190#define ___wait_is_interruptible(state) \
191 (!__builtin_constant_p(state) || \
192 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
193
194#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
195({ \
196 __label__ __out; \
197 wait_queue_t __wait; \
198 long __ret = ret; \
199 \
200 INIT_LIST_HEAD(&__wait.task_list); \
201 if (exclusive) \
202 __wait.flags = WQ_FLAG_EXCLUSIVE; \
203 else \
204 __wait.flags = 0; \
185 \ 205 \
186 for (;;) { \ 206 for (;;) { \
187 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 207 long __int = prepare_to_wait_event(&wq, &__wait, state);\
208 \
188 if (condition) \ 209 if (condition) \
189 break; \ 210 break; \
190 schedule(); \ 211 \
212 if (___wait_is_interruptible(state) && __int) { \
213 __ret = __int; \
214 if (exclusive) { \
215 abort_exclusive_wait(&wq, &__wait, \
216 state, NULL); \
217 goto __out; \
218 } \
219 break; \
220 } \
221 \
222 cmd; \
191 } \ 223 } \
192 finish_wait(&wq, &__wait); \ 224 finish_wait(&wq, &__wait); \
193} while (0) 225__out: __ret; \
226})
227
228#define __wait_event(wq, condition) \
229 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
230 schedule())
194 231
195/** 232/**
196 * wait_event - sleep until a condition gets true 233 * wait_event - sleep until a condition gets true
@@ -204,29 +241,17 @@ do { \
204 * wake_up() has to be called after changing any variable that could 241 * wake_up() has to be called after changing any variable that could
205 * change the result of the wait condition. 242 * change the result of the wait condition.
206 */ 243 */
207#define wait_event(wq, condition) \ 244#define wait_event(wq, condition) \
208do { \ 245do { \
209 if (condition) \ 246 if (condition) \
210 break; \ 247 break; \
211 __wait_event(wq, condition); \ 248 __wait_event(wq, condition); \
212} while (0) 249} while (0)
213 250
214#define __wait_event_timeout(wq, condition, ret) \ 251#define __wait_event_timeout(wq, condition, timeout) \
215do { \ 252 ___wait_event(wq, ___wait_cond_timeout(condition), \
216 DEFINE_WAIT(__wait); \ 253 TASK_UNINTERRUPTIBLE, 0, timeout, \
217 \ 254 __ret = schedule_timeout(__ret))
218 for (;;) { \
219 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
220 if (condition) \
221 break; \
222 ret = schedule_timeout(ret); \
223 if (!ret) \
224 break; \
225 } \
226 if (!ret && (condition)) \
227 ret = 1; \
228 finish_wait(&wq, &__wait); \
229} while (0)
230 255
231/** 256/**
232 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 257 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
@@ -248,29 +273,40 @@ do { \
248#define wait_event_timeout(wq, condition, timeout) \ 273#define wait_event_timeout(wq, condition, timeout) \
249({ \ 274({ \
250 long __ret = timeout; \ 275 long __ret = timeout; \
251 if (!(condition)) \ 276 if (!___wait_cond_timeout(condition)) \
252 __wait_event_timeout(wq, condition, __ret); \ 277 __ret = __wait_event_timeout(wq, condition, timeout); \
253 __ret; \ 278 __ret; \
254}) 279})
255 280
256#define __wait_event_interruptible(wq, condition, ret) \ 281#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
282 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
283 cmd1; schedule(); cmd2)
284
285/**
286 * wait_event_cmd - sleep until a condition gets true
287 * @wq: the waitqueue to wait on
288 * @condition: a C expression for the event to wait for
289 * cmd1: the command will be executed before sleep
290 * cmd2: the command will be executed after sleep
291 *
292 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
293 * @condition evaluates to true. The @condition is checked each time
294 * the waitqueue @wq is woken up.
295 *
296 * wake_up() has to be called after changing any variable that could
297 * change the result of the wait condition.
298 */
299#define wait_event_cmd(wq, condition, cmd1, cmd2) \
257do { \ 300do { \
258 DEFINE_WAIT(__wait); \ 301 if (condition) \
259 \
260 for (;;) { \
261 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
262 if (condition) \
263 break; \
264 if (!signal_pending(current)) { \
265 schedule(); \
266 continue; \
267 } \
268 ret = -ERESTARTSYS; \
269 break; \ 302 break; \
270 } \ 303 __wait_event_cmd(wq, condition, cmd1, cmd2); \
271 finish_wait(&wq, &__wait); \
272} while (0) 304} while (0)
273 305
306#define __wait_event_interruptible(wq, condition) \
307 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
308 schedule())
309
274/** 310/**
275 * wait_event_interruptible - sleep until a condition gets true 311 * wait_event_interruptible - sleep until a condition gets true
276 * @wq: the waitqueue to wait on 312 * @wq: the waitqueue to wait on
@@ -290,31 +326,14 @@ do { \
290({ \ 326({ \
291 int __ret = 0; \ 327 int __ret = 0; \
292 if (!(condition)) \ 328 if (!(condition)) \
293 __wait_event_interruptible(wq, condition, __ret); \ 329 __ret = __wait_event_interruptible(wq, condition); \
294 __ret; \ 330 __ret; \
295}) 331})
296 332
297#define __wait_event_interruptible_timeout(wq, condition, ret) \ 333#define __wait_event_interruptible_timeout(wq, condition, timeout) \
298do { \ 334 ___wait_event(wq, ___wait_cond_timeout(condition), \
299 DEFINE_WAIT(__wait); \ 335 TASK_INTERRUPTIBLE, 0, timeout, \
300 \ 336 __ret = schedule_timeout(__ret))
301 for (;;) { \
302 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
303 if (condition) \
304 break; \
305 if (!signal_pending(current)) { \
306 ret = schedule_timeout(ret); \
307 if (!ret) \
308 break; \
309 continue; \
310 } \
311 ret = -ERESTARTSYS; \
312 break; \
313 } \
314 if (!ret && (condition)) \
315 ret = 1; \
316 finish_wait(&wq, &__wait); \
317} while (0)
318 337
319/** 338/**
320 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 339 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
@@ -337,15 +356,15 @@ do { \
337#define wait_event_interruptible_timeout(wq, condition, timeout) \ 356#define wait_event_interruptible_timeout(wq, condition, timeout) \
338({ \ 357({ \
339 long __ret = timeout; \ 358 long __ret = timeout; \
340 if (!(condition)) \ 359 if (!___wait_cond_timeout(condition)) \
341 __wait_event_interruptible_timeout(wq, condition, __ret); \ 360 __ret = __wait_event_interruptible_timeout(wq, \
361 condition, timeout); \
342 __ret; \ 362 __ret; \
343}) 363})
344 364
345#define __wait_event_hrtimeout(wq, condition, timeout, state) \ 365#define __wait_event_hrtimeout(wq, condition, timeout, state) \
346({ \ 366({ \
347 int __ret = 0; \ 367 int __ret = 0; \
348 DEFINE_WAIT(__wait); \
349 struct hrtimer_sleeper __t; \ 368 struct hrtimer_sleeper __t; \
350 \ 369 \
351 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ 370 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
@@ -356,25 +375,15 @@ do { \
356 current->timer_slack_ns, \ 375 current->timer_slack_ns, \
357 HRTIMER_MODE_REL); \ 376 HRTIMER_MODE_REL); \
358 \ 377 \
359 for (;;) { \ 378 __ret = ___wait_event(wq, condition, state, 0, 0, \
360 prepare_to_wait(&wq, &__wait, state); \
361 if (condition) \
362 break; \
363 if (state == TASK_INTERRUPTIBLE && \
364 signal_pending(current)) { \
365 __ret = -ERESTARTSYS; \
366 break; \
367 } \
368 if (!__t.task) { \ 379 if (!__t.task) { \
369 __ret = -ETIME; \ 380 __ret = -ETIME; \
370 break; \ 381 break; \
371 } \ 382 } \
372 schedule(); \ 383 schedule()); \
373 } \
374 \ 384 \
375 hrtimer_cancel(&__t.timer); \ 385 hrtimer_cancel(&__t.timer); \
376 destroy_hrtimer_on_stack(&__t.timer); \ 386 destroy_hrtimer_on_stack(&__t.timer); \
377 finish_wait(&wq, &__wait); \
378 __ret; \ 387 __ret; \
379}) 388})
380 389
@@ -428,33 +437,15 @@ do { \
428 __ret; \ 437 __ret; \
429}) 438})
430 439
431#define __wait_event_interruptible_exclusive(wq, condition, ret) \ 440#define __wait_event_interruptible_exclusive(wq, condition) \
432do { \ 441 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
433 DEFINE_WAIT(__wait); \ 442 schedule())
434 \
435 for (;;) { \
436 prepare_to_wait_exclusive(&wq, &__wait, \
437 TASK_INTERRUPTIBLE); \
438 if (condition) { \
439 finish_wait(&wq, &__wait); \
440 break; \
441 } \
442 if (!signal_pending(current)) { \
443 schedule(); \
444 continue; \
445 } \
446 ret = -ERESTARTSYS; \
447 abort_exclusive_wait(&wq, &__wait, \
448 TASK_INTERRUPTIBLE, NULL); \
449 break; \
450 } \
451} while (0)
452 443
453#define wait_event_interruptible_exclusive(wq, condition) \ 444#define wait_event_interruptible_exclusive(wq, condition) \
454({ \ 445({ \
455 int __ret = 0; \ 446 int __ret = 0; \
456 if (!(condition)) \ 447 if (!(condition)) \
457 __wait_event_interruptible_exclusive(wq, condition, __ret);\ 448 __ret = __wait_event_interruptible_exclusive(wq, condition);\
458 __ret; \ 449 __ret; \
459}) 450})
460 451
@@ -606,24 +597,8 @@ do { \
606 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 597 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
607 598
608 599
609 600#define __wait_event_killable(wq, condition) \
610#define __wait_event_killable(wq, condition, ret) \ 601 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
611do { \
612 DEFINE_WAIT(__wait); \
613 \
614 for (;;) { \
615 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
616 if (condition) \
617 break; \
618 if (!fatal_signal_pending(current)) { \
619 schedule(); \
620 continue; \
621 } \
622 ret = -ERESTARTSYS; \
623 break; \
624 } \
625 finish_wait(&wq, &__wait); \
626} while (0)
627 602
628/** 603/**
629 * wait_event_killable - sleep until a condition gets true 604 * wait_event_killable - sleep until a condition gets true
@@ -644,26 +619,17 @@ do { \
644({ \ 619({ \
645 int __ret = 0; \ 620 int __ret = 0; \
646 if (!(condition)) \ 621 if (!(condition)) \
647 __wait_event_killable(wq, condition, __ret); \ 622 __ret = __wait_event_killable(wq, condition); \
648 __ret; \ 623 __ret; \
649}) 624})
650 625
651 626
652#define __wait_event_lock_irq(wq, condition, lock, cmd) \ 627#define __wait_event_lock_irq(wq, condition, lock, cmd) \
653do { \ 628 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
654 DEFINE_WAIT(__wait); \ 629 spin_unlock_irq(&lock); \
655 \ 630 cmd; \
656 for (;;) { \ 631 schedule(); \
657 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 632 spin_lock_irq(&lock))
658 if (condition) \
659 break; \
660 spin_unlock_irq(&lock); \
661 cmd; \
662 schedule(); \
663 spin_lock_irq(&lock); \
664 } \
665 finish_wait(&wq, &__wait); \
666} while (0)
667 633
668/** 634/**
669 * wait_event_lock_irq_cmd - sleep until a condition gets true. The 635 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
@@ -723,26 +689,12 @@ do { \
723} while (0) 689} while (0)
724 690
725 691
726#define __wait_event_interruptible_lock_irq(wq, condition, \ 692#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
727 lock, ret, cmd) \ 693 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
728do { \ 694 spin_unlock_irq(&lock); \
729 DEFINE_WAIT(__wait); \ 695 cmd; \
730 \ 696 schedule(); \
731 for (;;) { \ 697 spin_lock_irq(&lock))
732 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
733 if (condition) \
734 break; \
735 if (signal_pending(current)) { \
736 ret = -ERESTARTSYS; \
737 break; \
738 } \
739 spin_unlock_irq(&lock); \
740 cmd; \
741 schedule(); \
742 spin_lock_irq(&lock); \
743 } \
744 finish_wait(&wq, &__wait); \
745} while (0)
746 698
747/** 699/**
748 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. 700 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
@@ -772,10 +724,9 @@ do { \
772#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ 724#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
773({ \ 725({ \
774 int __ret = 0; \ 726 int __ret = 0; \
775 \
776 if (!(condition)) \ 727 if (!(condition)) \
777 __wait_event_interruptible_lock_irq(wq, condition, \ 728 __ret = __wait_event_interruptible_lock_irq(wq, \
778 lock, __ret, cmd); \ 729 condition, lock, cmd); \
779 __ret; \ 730 __ret; \
780}) 731})
781 732
@@ -804,39 +755,24 @@ do { \
804#define wait_event_interruptible_lock_irq(wq, condition, lock) \ 755#define wait_event_interruptible_lock_irq(wq, condition, lock) \
805({ \ 756({ \
806 int __ret = 0; \ 757 int __ret = 0; \
807 \
808 if (!(condition)) \ 758 if (!(condition)) \
809 __wait_event_interruptible_lock_irq(wq, condition, \ 759 __ret = __wait_event_interruptible_lock_irq(wq, \
810 lock, __ret, ); \ 760 condition, lock,); \
811 __ret; \ 761 __ret; \
812}) 762})
813 763
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ 764#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \ 765 lock, timeout) \
816do { \ 766 ___wait_event(wq, ___wait_cond_timeout(condition), \
817 DEFINE_WAIT(__wait); \ 767 TASK_INTERRUPTIBLE, 0, timeout, \
818 \ 768 spin_unlock_irq(&lock); \
819 for (;;) { \ 769 __ret = schedule_timeout(__ret); \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 770 spin_lock_irq(&lock));
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835 771
836/** 772/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. 773 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
838 * The condition is checked under the lock. This is expected 774 * true or a timeout elapses. The condition is checked under
839 * to be called with the lock taken. 775 * the lock. This is expected to be called with the lock taken.
840 * @wq: the waitqueue to wait on 776 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for 777 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule() 778 * @lock: a locked spinlock_t, which will be released before schedule()
@@ -860,11 +796,10 @@ do { \
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ 796#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \ 797 timeout) \
862({ \ 798({ \
863 int __ret = timeout; \ 799 long __ret = timeout; \
864 \ 800 if (!___wait_cond_timeout(condition)) \
865 if (!(condition)) \ 801 __ret = __wait_event_interruptible_lock_irq_timeout( \
866 __wait_event_interruptible_lock_irq_timeout( \ 802 wq, condition, lock, timeout); \
867 wq, condition, lock, __ret); \
868 __ret; \ 803 __ret; \
869}) 804})
870 805
@@ -875,20 +810,18 @@ do { \
875 * We plan to remove these interfaces. 810 * We plan to remove these interfaces.
876 */ 811 */
877extern void sleep_on(wait_queue_head_t *q); 812extern void sleep_on(wait_queue_head_t *q);
878extern long sleep_on_timeout(wait_queue_head_t *q, 813extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
879 signed long timeout);
880extern void interruptible_sleep_on(wait_queue_head_t *q); 814extern void interruptible_sleep_on(wait_queue_head_t *q);
881extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, 815extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
882 signed long timeout);
883 816
884/* 817/*
885 * Waitqueues which are removed from the waitqueue_head at wakeup time 818 * Waitqueues which are removed from the waitqueue_head at wakeup time
886 */ 819 */
887void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 820void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
888void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 821void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
822long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
889void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 823void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
890void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 824void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
891 unsigned int mode, void *key);
892int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 825int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
893int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 826int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
894 827
@@ -934,8 +867,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
934 * One uses wait_on_bit() where one is waiting for the bit to clear, 867 * One uses wait_on_bit() where one is waiting for the bit to clear,
935 * but has no intention of setting it. 868 * but has no intention of setting it.
936 */ 869 */
937static inline int wait_on_bit(void *word, int bit, 870static inline int
938 int (*action)(void *), unsigned mode) 871wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
939{ 872{
940 if (!test_bit(bit, word)) 873 if (!test_bit(bit, word))
941 return 0; 874 return 0;
@@ -958,8 +891,8 @@ static inline int wait_on_bit(void *word, int bit,
958 * One uses wait_on_bit_lock() where one is waiting for the bit to 891 * One uses wait_on_bit_lock() where one is waiting for the bit to
959 * clear with the intention of setting it, and when done, clearing it. 892 * clear with the intention of setting it, and when done, clearing it.
960 */ 893 */
961static inline int wait_on_bit_lock(void *word, int bit, 894static inline int
962 int (*action)(void *), unsigned mode) 895wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
963{ 896{
964 if (!test_and_set_bit(bit, word)) 897 if (!test_and_set_bit(bit, word))
965 return 0; 898 return 0;
@@ -983,5 +916,5 @@ int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
983 return 0; 916 return 0;
984 return out_of_line_wait_on_atomic_t(val, action, mode); 917 return out_of_line_wait_on_atomic_t(val, action, mode);
985} 918}
986 919
987#endif 920#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 021b8a319b9e..fc0e4320aa6d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -97,7 +97,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
97int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); 97int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
98int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 98int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
99 enum wb_reason reason); 99 enum wb_reason reason);
100void sync_inodes_sb(struct super_block *); 100void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
101void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); 101void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
102void inode_wait_for_writeback(struct inode *inode); 102void inode_wait_for_writeback(struct inode *inode);
103 103