aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h168
-rw-r--r--include/linux/amba/bus.h10
-rw-r--r--include/linux/ath9k_platform.h3
-rw-r--r--include/linux/atmel-mci.h2
-rw-r--r--include/linux/audit.h11
-rw-r--r--include/linux/bcma/bcma.h2
-rw-r--r--include/linux/bcma/bcma_driver_mips.h4
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bitmap.h36
-rw-r--r--include/linux/bitops.h7
-rw-r--r--include/linux/blk-mq.h17
-rw-r--r--include/linux/blkdev.h10
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/bpf.h11
-rw-r--r--include/linux/buffer_head.h47
-rw-r--r--include/linux/can/dev.h9
-rw-r--r--include/linux/cgroup.h34
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/clk/ti.h15
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cma.h8
-rw-r--r--include/linux/compaction.h10
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc4.h1
-rw-r--r--include/linux/compiler-gcc5.h1
-rw-r--r--include/linux/cpufreq-dt.h22
-rw-r--r--include/linux/cpufreq.h54
-rw-r--r--include/linux/cpuidle.h4
-rw-r--r--include/linux/cpuset.h37
-rw-r--r--include/linux/crash_dump.h15
-rw-r--r--include/linux/crypto.h1112
-rw-r--r--include/linux/dcache.h9
-rw-r--r--include/linux/debugfs.h7
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/dmaengine.h3
-rw-r--r--include/linux/dmar.h50
-rw-r--r--include/linux/edac.h4
-rw-r--r--include/linux/eeprom_93cx6.h4
-rw-r--r--include/linux/efi.h23
-rw-r--r--include/linux/elf.h5
-rw-r--r--include/linux/etherdevice.h12
-rw-r--r--include/linux/ethtool.h42
-rw-r--r--include/linux/f2fs_fs.h27
-rw-r--r--include/linux/fault-inject.h17
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/freezer.h50
-rw-r--r--include/linux/fs.h93
-rw-r--r--include/linux/fsnotify_backend.h31
-rw-r--r--include/linux/ftrace.h16
-rw-r--r--include/linux/gfp.h11
-rw-r--r--include/linux/gpio/consumer.h7
-rw-r--r--include/linux/gpio_keys.h3
-rw-r--r--include/linux/hash.h35
-rw-r--r--include/linux/hid.h43
-rw-r--r--include/linux/hugetlb.h49
-rw-r--r--include/linux/hugetlb_cgroup.h1
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/i2c/pmbus.h4
-rw-r--r--include/linux/ieee80211.h71
-rw-r--r--include/linux/ieee802154.h242
-rw-r--r--include/linux/if_bridge.h31
-rw-r--r--include/linux/if_vlan.h107
-rw-r--r--include/linux/iio/events.h2
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/iommu.h25
-rw-r--r--include/linux/ipc_namespace.h20
-rw-r--r--include/linux/ipmi.h6
-rw-r--r--include/linux/ipmi_smi.h10
-rw-r--r--include/linux/ipv6.h11
-rw-r--r--include/linux/irq.h65
-rw-r--r--include/linux/irqchip/mips-gic.h249
-rw-r--r--include/linux/irqdomain.h101
-rw-r--r--include/linux/irqhandler.h14
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kern_levels.h13
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/kernel_stat.h5
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/khugepaged.h17
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--include/linux/leds.h19
-rw-r--r--include/linux/libata.h10
-rw-r--r--include/linux/list.h34
-rw-r--r--include/linux/lockd/debug.h6
-rw-r--r--include/linux/mailbox_client.h49
-rw-r--r--include/linux/mailbox_controller.h133
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mbus.h1
-rw-r--r--include/linux/memcontrol.h114
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h1
-rw-r--r--include/linux/mfd/arizona/core.h1
-rw-r--r--include/linux/mfd/arizona/registers.h50
-rw-r--r--include/linux/mfd/atmel-hlcdc.h85
-rw-r--r--include/linux/mfd/axp20x.h59
-rw-r--r--include/linux/mfd/core.h7
-rw-r--r--include/linux/mfd/davinci_voicecodec.h7
-rw-r--r--include/linux/mfd/dln2.h103
-rw-r--r--include/linux/mfd/max77686.h7
-rw-r--r--include/linux/mfd/max77693-private.h8
-rw-r--r--include/linux/mfd/rtsx_pci.h37
-rw-r--r--include/linux/mfd/samsung/core.h2
-rw-r--r--include/linux/mfd/samsung/s2mps13.h186
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h39
-rw-r--r--include/linux/mfd/tc3589x.h8
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mlx4/cmd.h31
-rw-r--r--include/linux/mlx4/device.h127
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mlx5/device.h8
-rw-r--r--include/linux/mlx5/driver.h8
-rw-r--r--include/linux/mm.h61
-rw-r--r--include/linux/mm_types.h21
-rw-r--r--include/linux/mmc/card.h23
-rw-r--r--include/linux/mmc/core.h3
-rw-r--r--include/linux/mmc/dw_mmc.h7
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/mmc/mmc.h3
-rw-r--r--include/linux/mmc/sdhci.h18
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/mmzone.h23
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/msi.h187
-rw-r--r--include/linux/mtd/spi-nor.h21
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h134
-rw-r--r--include/linux/nfs4.h2
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_xdr.h25
-rw-r--r--include/linux/nl802154.h4
-rw-r--r--include/linux/of.h251
-rw-r--r--include/linux/of_address.h4
-rw-r--r--include/linux/of_pci.h12
-rw-r--r--include/linux/of_pdt.h3
-rw-r--r--include/linux/of_platform.h6
-rw-r--r--include/linux/of_reserved_mem.h9
-rw-r--r--include/linux/omap-gpmc.h199
-rw-r--r--include/linux/omap-mailbox.h16
-rw-r--r--include/linux/oom.h14
-rw-r--r--include/linux/page-debug-flags.h32
-rw-r--r--include/linux/page-isolation.h8
-rw-r--r--include/linux/page_cgroup.h105
-rw-r--r--include/linux/page_counter.h51
-rw-r--r--include/linux/page_ext.h84
-rw-r--r--include/linux/page_owner.h38
-rw-r--r--include/linux/pci-acpi.h7
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_hotplug.h1
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu-defs.h2
-rw-r--r--include/linux/percpu-refcount.h59
-rw-r--r--include/linux/perf_event.h37
-rw-r--r--include/linux/phy.h26
-rw-r--r--include/linux/pl320-ipc.h (renamed from include/linux/mailbox.h)0
-rw-r--r--include/linux/platform_data/asoc-s3c.h1
-rw-r--r--include/linux/platform_data/bcmgenet.h18
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/platform_data/hsmmc-omap.h90
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/mmc-atmel-mci.h22
-rw-r--r--include/linux/platform_data/mmc-omap.h27
-rw-r--r--include/linux/platform_data/pxa_sdhci.h5
-rw-r--r--include/linux/platform_data/serial-omap.h3
-rw-r--r--include/linux/platform_data/st21nfca.h1
-rw-r--r--include/linux/platform_data/st21nfcb.h1
-rw-r--r--include/linux/plist.h10
-rw-r--r--include/linux/pm.h19
-rw-r--r--include/linux/pm_clock.h8
-rw-r--r--include/linux/pm_domain.h25
-rw-r--r--include/linux/pm_opp.h12
-rw-r--r--include/linux/pm_qos.h43
-rw-r--r--include/linux/pm_runtime.h27
-rw-r--r--include/linux/pnfs_osd_xdr.h2
-rw-r--r--include/linux/power/charger-manager.h3
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/printk.h1
-rw-r--r--include/linux/property.h143
-rw-r--r--include/linux/pstore_ram.h4
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/pxa168_eth.h3
-rw-r--r--include/linux/pxa2xx_ssp.h20
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/quotaops.h8
-rw-r--r--include/linux/ratelimit.h12
-rw-r--r--include/linux/rculist.h17
-rw-r--r--include/linux/rcupdate.h34
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h6
-rw-r--r--include/linux/regmap.h7
-rw-r--r--include/linux/regulator/consumer.h8
-rw-r--r--include/linux/regulator/driver.h8
-rw-r--r--include/linux/regulator/of_regulator.h9
-rw-r--r--include/linux/res_counter.h223
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/reset.h7
-rw-r--r--include/linux/rhashtable.h15
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/rtnetlink.h14
-rw-r--r--include/linux/sched.h102
-rw-r--r--include/linux/seq_file.h15
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/skbuff.h198
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/socket.h26
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/stacktrace.h5
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/sunrpc/auth.h2
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/debug.h64
-rw-r--r--include/linux/sunrpc/metrics.h3
-rw-r--r--include/linux/sunrpc/sched.h8
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/sunrpc/xprtsock.h59
-rw-r--r--include/linux/swap.h8
-rw-r--r--include/linux/swap_cgroup.h42
-rw-r--r--include/linux/syscalls.h11
-rw-r--r--include/linux/syslog.h9
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/linux/thermal.h4
-rw-r--r--include/linux/time.h17
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h51
-rw-r--r--include/linux/uio.h7
-rw-r--r--include/linux/uio_driver.h12
-rw-r--r--include/linux/uprobes.h14
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/hcd.h7
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/vexpress.h19
-rw-r--r--include/linux/virtio.h12
-rw-r--r--include/linux/virtio_byteorder.h59
-rw-r--r--include/linux/virtio_config.h103
-rw-r--r--include/linux/virtio_scsi.h162
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmw_vmci_api.h5
-rw-r--r--include/linux/wait.h80
-rw-r--r--include/linux/watchdog.h9
250 files changed, 6309 insertions, 1672 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b7926bb9b444..6bff83b1f298 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -28,6 +28,7 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/ioport.h> /* for struct resource */ 29#include <linux/ioport.h> /* for struct resource */
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/property.h>
31 32
32#ifndef _LINUX 33#ifndef _LINUX
33#define _LINUX 34#define _LINUX
@@ -123,6 +124,10 @@ int acpi_numa_init (void);
123 124
124int acpi_table_init (void); 125int acpi_table_init (void);
125int acpi_table_parse(char *id, acpi_tbl_table_handler handler); 126int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
127int __init acpi_parse_entries(char *id, unsigned long table_size,
128 acpi_tbl_entry_handler handler,
129 struct acpi_table_header *table_header,
130 int entry_id, unsigned int max_entries);
126int __init acpi_table_parse_entries(char *id, unsigned long table_size, 131int __init acpi_table_parse_entries(char *id, unsigned long table_size,
127 int entry_id, 132 int entry_id,
128 acpi_tbl_entry_handler handler, 133 acpi_tbl_entry_handler handler,
@@ -423,15 +428,13 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
423const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 428const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
424 const struct device *dev); 429 const struct device *dev);
425 430
426static inline bool acpi_driver_match_device(struct device *dev, 431extern bool acpi_driver_match_device(struct device *dev,
427 const struct device_driver *drv) 432 const struct device_driver *drv);
428{
429 return !!acpi_match_device(drv->acpi_match_table, dev);
430}
431
432int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); 433int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
433int acpi_device_modalias(struct device *, char *, int); 434int acpi_device_modalias(struct device *, char *, int);
435void acpi_walk_dep_device_list(acpi_handle handle);
434 436
437struct platform_device *acpi_create_platform_device(struct acpi_device *);
435#define ACPI_PTR(_ptr) (_ptr) 438#define ACPI_PTR(_ptr) (_ptr)
436 439
437#else /* !CONFIG_ACPI */ 440#else /* !CONFIG_ACPI */
@@ -442,6 +445,23 @@ int acpi_device_modalias(struct device *, char *, int);
442#define ACPI_COMPANION_SET(dev, adev) do { } while (0) 445#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
443#define ACPI_HANDLE(dev) (NULL) 446#define ACPI_HANDLE(dev) (NULL)
444 447
448struct fwnode_handle;
449
450static inline bool is_acpi_node(struct fwnode_handle *fwnode)
451{
452 return false;
453}
454
455static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
456{
457 return NULL;
458}
459
460static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
461{
462 return NULL;
463}
464
445static inline const char *acpi_dev_name(struct acpi_device *adev) 465static inline const char *acpi_dev_name(struct acpi_device *adev)
446{ 466{
447 return NULL; 467 return NULL;
@@ -552,16 +572,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
552#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) 572#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
553#endif 573#endif
554 574
555#if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) 575#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
556int acpi_dev_runtime_suspend(struct device *dev); 576int acpi_dev_runtime_suspend(struct device *dev);
557int acpi_dev_runtime_resume(struct device *dev); 577int acpi_dev_runtime_resume(struct device *dev);
558int acpi_subsys_runtime_suspend(struct device *dev); 578int acpi_subsys_runtime_suspend(struct device *dev);
559int acpi_subsys_runtime_resume(struct device *dev); 579int acpi_subsys_runtime_resume(struct device *dev);
580struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
581int acpi_dev_pm_attach(struct device *dev, bool power_on);
560#else 582#else
561static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } 583static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
562static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } 584static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
563static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } 585static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
564static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } 586static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
587static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
588{
589 return NULL;
590}
591static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
592{
593 return -ENODEV;
594}
565#endif 595#endif
566 596
567#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) 597#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -584,20 +614,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
584static inline int acpi_subsys_freeze(struct device *dev) { return 0; } 614static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
585#endif 615#endif
586 616
587#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
588struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
589int acpi_dev_pm_attach(struct device *dev, bool power_on);
590#else
591static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
592{
593 return NULL;
594}
595static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
596{
597 return -ENODEV;
598}
599#endif
600
601#ifdef CONFIG_ACPI 617#ifdef CONFIG_ACPI
602__printf(3, 4) 618__printf(3, 4)
603void acpi_handle_printk(const char *level, acpi_handle handle, 619void acpi_handle_printk(const char *level, acpi_handle handle,
@@ -658,4 +674,114 @@ do { \
658#endif 674#endif
659#endif 675#endif
660 676
677struct acpi_gpio_params {
678 unsigned int crs_entry_index;
679 unsigned int line_index;
680 bool active_low;
681};
682
683struct acpi_gpio_mapping {
684 const char *name;
685 const struct acpi_gpio_params *data;
686 unsigned int size;
687};
688
689#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
690int acpi_dev_add_driver_gpios(struct acpi_device *adev,
691 const struct acpi_gpio_mapping *gpios);
692
693static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
694{
695 if (adev)
696 adev->driver_gpios = NULL;
697}
698#else
699static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
700 const struct acpi_gpio_mapping *gpios)
701{
702 return -ENXIO;
703}
704static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
705#endif
706
707/* Device properties */
708
709#define MAX_ACPI_REFERENCE_ARGS 8
710struct acpi_reference_args {
711 struct acpi_device *adev;
712 size_t nargs;
713 u64 args[MAX_ACPI_REFERENCE_ARGS];
714};
715
716#ifdef CONFIG_ACPI
717int acpi_dev_get_property(struct acpi_device *adev, const char *name,
718 acpi_object_type type, const union acpi_object **obj);
719int acpi_dev_get_property_array(struct acpi_device *adev, const char *name,
720 acpi_object_type type,
721 const union acpi_object **obj);
722int acpi_dev_get_property_reference(struct acpi_device *adev,
723 const char *name, size_t index,
724 struct acpi_reference_args *args);
725
726int acpi_dev_prop_get(struct acpi_device *adev, const char *propname,
727 void **valptr);
728int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
729 enum dev_prop_type proptype, void *val);
730int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
731 enum dev_prop_type proptype, void *val, size_t nval);
732
733struct acpi_device *acpi_get_next_child(struct device *dev,
734 struct acpi_device *child);
735#else
736static inline int acpi_dev_get_property(struct acpi_device *adev,
737 const char *name, acpi_object_type type,
738 const union acpi_object **obj)
739{
740 return -ENXIO;
741}
742static inline int acpi_dev_get_property_array(struct acpi_device *adev,
743 const char *name,
744 acpi_object_type type,
745 const union acpi_object **obj)
746{
747 return -ENXIO;
748}
749static inline int acpi_dev_get_property_reference(struct acpi_device *adev,
750 const char *name, const char *cells_name,
751 size_t index, struct acpi_reference_args *args)
752{
753 return -ENXIO;
754}
755
756static inline int acpi_dev_prop_get(struct acpi_device *adev,
757 const char *propname,
758 void **valptr)
759{
760 return -ENXIO;
761}
762
763static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
764 const char *propname,
765 enum dev_prop_type proptype,
766 void *val)
767{
768 return -ENXIO;
769}
770
771static inline int acpi_dev_prop_read(struct acpi_device *adev,
772 const char *propname,
773 enum dev_prop_type proptype,
774 void *val, size_t nval)
775{
776 return -ENXIO;
777}
778
779static inline struct acpi_device *acpi_get_next_child(struct device *dev,
780 struct acpi_device *child)
781{
782 return NULL;
783}
784
785#endif
786
661#endif /*_LINUX_ACPI_H*/ 787#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index c324f5700d1a..ac02f9bd63dc 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -97,6 +97,16 @@ void amba_release_regions(struct amba_device *);
97#define amba_pclk_disable(d) \ 97#define amba_pclk_disable(d) \
98 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 98 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
99 99
100static inline int amba_pclk_prepare(struct amba_device *dev)
101{
102 return clk_prepare(dev->pclk);
103}
104
105static inline void amba_pclk_unprepare(struct amba_device *dev)
106{
107 clk_unprepare(dev->pclk);
108}
109
100/* Some drivers don't use the struct amba_device */ 110/* Some drivers don't use the struct amba_device */
101#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) 111#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
102#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) 112#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index a495a959e8a7..33eb274cd0e6 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -31,8 +31,11 @@ struct ath9k_platform_data {
31 u32 gpio_mask; 31 u32 gpio_mask;
32 u32 gpio_val; 32 u32 gpio_val;
33 33
34 bool endian_check;
34 bool is_clk_25mhz; 35 bool is_clk_25mhz;
35 bool tx_gain_buffalo; 36 bool tx_gain_buffalo;
37 bool disable_2ghz;
38 bool disable_5ghz;
36 39
37 int (*get_mac_revision)(void); 40 int (*get_mac_revision)(void);
38 int (*external_reset)(void); 41 int (*external_reset)(void);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 91b77f8d495d..9177947bf032 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -11,6 +11,7 @@
11 * @detect_pin: GPIO pin wired to the card detect switch 11 * @detect_pin: GPIO pin wired to the card detect switch
12 * @wp_pin: GPIO pin wired to the write protect sensor 12 * @wp_pin: GPIO pin wired to the write protect sensor
13 * @detect_is_active_high: The state of the detect pin when it is active 13 * @detect_is_active_high: The state of the detect pin when it is active
14 * @non_removable: The slot is not removable, only detect once
14 * 15 *
15 * If a given slot is not present on the board, @bus_width should be 16 * If a given slot is not present on the board, @bus_width should be
16 * set to 0. The other fields are ignored in this case. 17 * set to 0. The other fields are ignored in this case.
@@ -26,6 +27,7 @@ struct mci_slot_pdata {
26 int detect_pin; 27 int detect_pin;
27 int wp_pin; 28 int wp_pin;
28 bool detect_is_active_high; 29 bool detect_is_active_high;
30 bool non_removable;
29}; 31};
30 32
31/** 33/**
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 36dffeccebdb..0c04917c2f12 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -90,7 +90,7 @@ extern unsigned compat_dir_class[];
90extern unsigned compat_chattr_class[]; 90extern unsigned compat_chattr_class[];
91extern unsigned compat_signal_class[]; 91extern unsigned compat_signal_class[];
92 92
93extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall); 93extern int audit_classify_compat_syscall(int abi, unsigned syscall);
94 94
95/* audit_names->type values */ 95/* audit_names->type values */
96#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ 96#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
@@ -130,6 +130,7 @@ extern void audit_putname(struct filename *name);
130#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ 130#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
131extern void __audit_inode(struct filename *name, const struct dentry *dentry, 131extern void __audit_inode(struct filename *name, const struct dentry *dentry,
132 unsigned int flags); 132 unsigned int flags);
133extern void __audit_file(const struct file *);
133extern void __audit_inode_child(const struct inode *parent, 134extern void __audit_inode_child(const struct inode *parent,
134 const struct dentry *dentry, 135 const struct dentry *dentry,
135 const unsigned char type); 136 const unsigned char type);
@@ -183,6 +184,11 @@ static inline void audit_inode(struct filename *name,
183 __audit_inode(name, dentry, flags); 184 __audit_inode(name, dentry, flags);
184 } 185 }
185} 186}
187static inline void audit_file(struct file *file)
188{
189 if (unlikely(!audit_dummy_context()))
190 __audit_file(file);
191}
186static inline void audit_inode_parent_hidden(struct filename *name, 192static inline void audit_inode_parent_hidden(struct filename *name,
187 const struct dentry *dentry) 193 const struct dentry *dentry)
188{ 194{
@@ -357,6 +363,9 @@ static inline void audit_inode(struct filename *name,
357 const struct dentry *dentry, 363 const struct dentry *dentry,
358 unsigned int parent) 364 unsigned int parent)
359{ } 365{ }
366static inline void audit_file(struct file *file)
367{
368}
360static inline void audit_inode_parent_hidden(struct filename *name, 369static inline void audit_inode_parent_hidden(struct filename *name,
361 const struct dentry *dentry) 370 const struct dentry *dentry)
362{ } 371{ }
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 729f48e6b20b..eb1c6a47b67f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset);
447#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ 447#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */
448extern u32 bcma_core_dma_translation(struct bcma_device *core); 448extern u32 bcma_core_dma_translation(struct bcma_device *core);
449 449
450extern unsigned int bcma_core_irq(struct bcma_device *core, int num);
451
450#endif /* LINUX_BCMA_H_ */ 452#endif /* LINUX_BCMA_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index fb61f3fb4ddb..0b3b32aeeb8a 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -43,12 +43,12 @@ struct bcma_drv_mips {
43extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); 43extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
44extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); 44extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
45 45
46extern unsigned int bcma_core_irq(struct bcma_device *core); 46extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
47#else 47#else
48static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } 48static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
49static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } 49static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
50 50
51static inline unsigned int bcma_core_irq(struct bcma_device *core) 51static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
52{ 52{
53 return 0; 53 return 0;
54} 54}
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 61f29e5ea840..576e4639ca60 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -53,6 +53,10 @@ struct linux_binprm {
53#define BINPRM_FLAGS_EXECFD_BIT 1 53#define BINPRM_FLAGS_EXECFD_BIT 1
54#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) 54#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
55 55
56/* filename of the binary will be inaccessible after exec */
57#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
58#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
59
56/* Function parameter for binfmt->coredump */ 60/* Function parameter for binfmt->coredump */
57struct coredump_params { 61struct coredump_params {
58 const siginfo_t *siginfo; 62 const siginfo_t *siginfo;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e1c8d080c427..34e020c23644 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -45,6 +45,7 @@
45 * bitmap_set(dst, pos, nbits) Set specified bit area 45 * bitmap_set(dst, pos, nbits) Set specified bit area
46 * bitmap_clear(dst, pos, nbits) Clear specified bit area 46 * bitmap_clear(dst, pos, nbits) Clear specified bit area
47 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area 47 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
48 * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
48 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n 49 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
49 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n 50 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
50 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) 51 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -114,11 +115,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
114 115
115extern void bitmap_set(unsigned long *map, unsigned int start, int len); 116extern void bitmap_set(unsigned long *map, unsigned int start, int len);
116extern void bitmap_clear(unsigned long *map, unsigned int start, int len); 117extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
117extern unsigned long bitmap_find_next_zero_area(unsigned long *map, 118
118 unsigned long size, 119extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
119 unsigned long start, 120 unsigned long size,
120 unsigned int nr, 121 unsigned long start,
121 unsigned long align_mask); 122 unsigned int nr,
123 unsigned long align_mask,
124 unsigned long align_offset);
125
126/**
127 * bitmap_find_next_zero_area - find a contiguous aligned zero area
128 * @map: The address to base the search on
129 * @size: The bitmap size in bits
130 * @start: The bitnumber to start searching at
131 * @nr: The number of zeroed bits we're looking for
132 * @align_mask: Alignment mask for zero area
133 *
134 * The @align_mask should be one less than a power of 2; the effect is that
135 * the bit offset of all zero areas this function finds is multiples of that
136 * power of 2. A @align_mask of 0 means no alignment is required.
137 */
138static inline unsigned long
139bitmap_find_next_zero_area(unsigned long *map,
140 unsigned long size,
141 unsigned long start,
142 unsigned int nr,
143 unsigned long align_mask)
144{
145 return bitmap_find_next_zero_area_off(map, size, start, nr,
146 align_mask, 0);
147}
122 148
123extern int bitmap_scnprintf(char *buf, unsigned int len, 149extern int bitmap_scnprintf(char *buf, unsigned int len,
124 const unsigned long *src, int nbits); 150 const unsigned long *src, int nbits);
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index be5fd38bd5a0..5d858e02997f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -18,8 +18,11 @@
18 * position @h. For example 18 * position @h. For example
19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. 19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
20 */ 20 */
21#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) 21#define GENMASK(h, l) \
22#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) 22 (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
23
24#define GENMASK_ULL(h, l) \
25 (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
23 26
24extern unsigned int __sw_hweight8(unsigned int w); 27extern unsigned int __sw_hweight8(unsigned int w);
25extern unsigned int __sw_hweight16(unsigned int w); 28extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index c9be1589415a..15f7034aa377 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -167,6 +167,23 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
167 gfp_t gfp, bool reserved); 167 gfp_t gfp, bool reserved);
168struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 168struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
169 169
170enum {
171 BLK_MQ_UNIQUE_TAG_BITS = 16,
172 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
173};
174
175u32 blk_mq_unique_tag(struct request *rq);
176
177static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
178{
179 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
180}
181
182static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
183{
184 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
185}
186
170struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 187struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
171struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 188struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
172 189
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0207a78a8d82..0495e3854247 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -398,7 +398,7 @@ struct request_queue {
398 */ 398 */
399 struct kobject mq_kobj; 399 struct kobject mq_kobj;
400 400
401#ifdef CONFIG_PM_RUNTIME 401#ifdef CONFIG_PM
402 struct device *dev; 402 struct device *dev;
403 int rpm_status; 403 int rpm_status;
404 unsigned int nr_pending; 404 unsigned int nr_pending;
@@ -1057,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *);
1057/* 1057/*
1058 * block layer runtime pm functions 1058 * block layer runtime pm functions
1059 */ 1059 */
1060#ifdef CONFIG_PM_RUNTIME 1060#ifdef CONFIG_PM
1061extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1061extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1062extern int blk_pre_runtime_suspend(struct request_queue *q); 1062extern int blk_pre_runtime_suspend(struct request_queue *q);
1063extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1063extern void blk_post_runtime_suspend(struct request_queue *q, int err);
@@ -1136,8 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1136/* 1136/*
1137 * tag stuff 1137 * tag stuff
1138 */ 1138 */
1139#define blk_rq_tagged(rq) \
1140 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
1141extern int blk_queue_start_tag(struct request_queue *, struct request *); 1139extern int blk_queue_start_tag(struct request_queue *, struct request *);
1142extern struct request *blk_queue_find_tag(struct request_queue *, int); 1140extern struct request *blk_queue_find_tag(struct request_queue *, int);
1143extern void blk_queue_end_tag(struct request_queue *, struct request *); 1141extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1583,13 +1581,13 @@ static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1583 struct request *r1, 1581 struct request *r1,
1584 struct request *r2) 1582 struct request *r2)
1585{ 1583{
1586 return 0; 1584 return true;
1587} 1585}
1588static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1586static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1589 struct request *r, 1587 struct request *r,
1590 struct bio *b) 1588 struct bio *b)
1591{ 1589{
1592 return 0; 1590 return true;
1593} 1591}
1594static inline bool blk_integrity_is_initialized(struct gendisk *g) 1592static inline bool blk_integrity_is_initialized(struct gendisk *g)
1595{ 1593{
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e2bd4c95b66..0995c2de8162 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
47 47
48extern unsigned long free_all_bootmem(void); 48extern unsigned long free_all_bootmem(void);
49extern void reset_node_managed_pages(pg_data_t *pgdat);
49extern void reset_all_zones_managed_pages(void); 50extern void reset_all_zones_managed_pages(void);
50 51
51extern void free_bootmem_node(pg_data_t *pgdat, 52extern void free_bootmem_node(pg_data_t *pgdat,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3cf91754a957..bbfceb756452 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -22,7 +22,7 @@ struct bpf_map_ops {
22 22
23 /* funcs callable from userspace and from eBPF programs */ 23 /* funcs callable from userspace and from eBPF programs */
24 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 24 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
25 int (*map_update_elem)(struct bpf_map *map, void *key, void *value); 25 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
26 int (*map_delete_elem)(struct bpf_map *map, void *key); 26 int (*map_delete_elem)(struct bpf_map *map, void *key);
27}; 27};
28 28
@@ -128,9 +128,18 @@ struct bpf_prog_aux {
128 struct work_struct work; 128 struct work_struct work;
129}; 129};
130 130
131#ifdef CONFIG_BPF_SYSCALL
131void bpf_prog_put(struct bpf_prog *prog); 132void bpf_prog_put(struct bpf_prog *prog);
133#else
134static inline void bpf_prog_put(struct bpf_prog *prog) {}
135#endif
132struct bpf_prog *bpf_prog_get(u32 ufd); 136struct bpf_prog *bpf_prog_get(u32 ufd);
133/* verify correctness of eBPF program */ 137/* verify correctness of eBPF program */
134int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); 138int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
135 139
140/* verifier prototypes for helper functions called from eBPF programs */
141extern struct bpf_func_proto bpf_map_lookup_elem_proto;
142extern struct bpf_func_proto bpf_map_update_elem_proto;
143extern struct bpf_func_proto bpf_map_delete_elem_proto;
144
136#endif /* _LINUX_BPF_H */ 145#endif /* _LINUX_BPF_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 324329ceea1e..73b45225a7ca 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -175,12 +175,13 @@ void __wait_on_buffer(struct buffer_head *);
175wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 175wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
176struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 176struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
177 unsigned size); 177 unsigned size);
178struct buffer_head *__getblk(struct block_device *bdev, sector_t block, 178struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
179 unsigned size); 179 unsigned size, gfp_t gfp);
180void __brelse(struct buffer_head *); 180void __brelse(struct buffer_head *);
181void __bforget(struct buffer_head *); 181void __bforget(struct buffer_head *);
182void __breadahead(struct block_device *, sector_t block, unsigned int size); 182void __breadahead(struct block_device *, sector_t block, unsigned int size);
183struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); 183struct buffer_head *__bread_gfp(struct block_device *,
184 sector_t block, unsigned size, gfp_t gfp);
184void invalidate_bh_lrus(void); 185void invalidate_bh_lrus(void);
185struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 186struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
186void free_buffer_head(struct buffer_head * bh); 187void free_buffer_head(struct buffer_head * bh);
@@ -295,7 +296,13 @@ static inline void bforget(struct buffer_head *bh)
295static inline struct buffer_head * 296static inline struct buffer_head *
296sb_bread(struct super_block *sb, sector_t block) 297sb_bread(struct super_block *sb, sector_t block)
297{ 298{
298 return __bread(sb->s_bdev, block, sb->s_blocksize); 299 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
300}
301
302static inline struct buffer_head *
303sb_bread_unmovable(struct super_block *sb, sector_t block)
304{
305 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
299} 306}
300 307
301static inline void 308static inline void
@@ -307,7 +314,7 @@ sb_breadahead(struct super_block *sb, sector_t block)
307static inline struct buffer_head * 314static inline struct buffer_head *
308sb_getblk(struct super_block *sb, sector_t block) 315sb_getblk(struct super_block *sb, sector_t block)
309{ 316{
310 return __getblk(sb->s_bdev, block, sb->s_blocksize); 317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
311} 318}
312 319
313static inline struct buffer_head * 320static inline struct buffer_head *
@@ -344,6 +351,36 @@ static inline void lock_buffer(struct buffer_head *bh)
344 __lock_buffer(bh); 351 __lock_buffer(bh);
345} 352}
346 353
354static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
355 sector_t block,
356 unsigned size)
357{
358 return __getblk_gfp(bdev, block, size, 0);
359}
360
361static inline struct buffer_head *__getblk(struct block_device *bdev,
362 sector_t block,
363 unsigned size)
364{
365 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
366}
367
368/**
369 * __bread() - reads a specified block and returns the bh
370 * @bdev: the block_device to read from
371 * @block: number of block
372 * @size: size (in bytes) to read
373 *
374 * Reads a specified block, and returns buffer head that contains it.
375 * The page cache is allocated from movable area so that it can be migrated.
376 * It returns NULL if the block was unreadable.
377 */
378static inline struct buffer_head *
379__bread(struct block_device *bdev, sector_t block, unsigned size)
380{
381 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
382}
383
347extern int __set_page_dirty_buffers(struct page *page); 384extern int __set_page_dirty_buffers(struct page *page);
348 385
349#else /* CONFIG_BLOCK */ 386#else /* CONFIG_BLOCK */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 6992afc6ba7f..c05ff0f9f9a5 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -99,6 +99,12 @@ inval_skb:
99 return 1; 99 return 1;
100} 100}
101 101
102static inline bool can_is_canfd_skb(const struct sk_buff *skb)
103{
104 /* the CAN specific type of skb is identified by its data length */
105 return skb->len == CANFD_MTU;
106}
107
102/* get data length from can_dlc with sanitized can_dlc */ 108/* get data length from can_dlc with sanitized can_dlc */
103u8 can_dlc2len(u8 can_dlc); 109u8 can_dlc2len(u8 can_dlc);
104 110
@@ -121,6 +127,9 @@ void unregister_candev(struct net_device *dev);
121int can_restart_now(struct net_device *dev); 127int can_restart_now(struct net_device *dev);
122void can_bus_off(struct net_device *dev); 128void can_bus_off(struct net_device *dev);
123 129
130void can_change_state(struct net_device *dev, struct can_frame *cf,
131 enum can_state tx_state, enum can_state rx_state);
132
124void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, 133void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
125 unsigned int idx); 134 unsigned int idx);
126unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); 135unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 1d5196889048..da0dae0600e6 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css)
113} 113}
114 114
115/** 115/**
116 * css_get_many - obtain references on the specified css
117 * @css: target css
118 * @n: number of references to get
119 *
120 * The caller must already have a reference.
121 */
122static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
123{
124 if (!(css->flags & CSS_NO_REF))
125 percpu_ref_get_many(&css->refcnt, n);
126}
127
128/**
116 * css_tryget - try to obtain a reference on the specified css 129 * css_tryget - try to obtain a reference on the specified css
117 * @css: target css 130 * @css: target css
118 * 131 *
@@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css)
159 percpu_ref_put(&css->refcnt); 172 percpu_ref_put(&css->refcnt);
160} 173}
161 174
175/**
176 * css_put_many - put css references
177 * @css: target css
178 * @n: number of references to put
179 *
180 * Put references obtained via css_get() and css_tryget_online().
181 */
182static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
183{
184 if (!(css->flags & CSS_NO_REF))
185 percpu_ref_put_many(&css->refcnt, n);
186}
187
162/* bits in struct cgroup flags field */ 188/* bits in struct cgroup flags field */
163enum { 189enum {
164 /* Control Group requires release notifications to userspace */ 190 /* Control Group requires release notifications to userspace */
@@ -367,8 +393,8 @@ struct css_set {
367 * struct cftype: handler definitions for cgroup control files 393 * struct cftype: handler definitions for cgroup control files
368 * 394 *
369 * When reading/writing to a file: 395 * When reading/writing to a file:
370 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata 396 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
371 * - the 'cftype' of the file is file->f_dentry->d_fsdata 397 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
372 */ 398 */
373 399
374/* cftype->flags */ 400/* cftype->flags */
@@ -612,8 +638,10 @@ struct cgroup_subsys {
612 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 638 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
613 int (*css_online)(struct cgroup_subsys_state *css); 639 int (*css_online)(struct cgroup_subsys_state *css);
614 void (*css_offline)(struct cgroup_subsys_state *css); 640 void (*css_offline)(struct cgroup_subsys_state *css);
641 void (*css_released)(struct cgroup_subsys_state *css);
615 void (*css_free)(struct cgroup_subsys_state *css); 642 void (*css_free)(struct cgroup_subsys_state *css);
616 void (*css_reset)(struct cgroup_subsys_state *css); 643 void (*css_reset)(struct cgroup_subsys_state *css);
644 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
617 645
618 int (*can_attach)(struct cgroup_subsys_state *css, 646 int (*can_attach)(struct cgroup_subsys_state *css,
619 struct cgroup_taskset *tset); 647 struct cgroup_taskset *tset);
@@ -908,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it);
908int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 936int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
909int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 937int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
910 938
939struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
940 struct cgroup_subsys *ss);
911struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 941struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
912 struct cgroup_subsys *ss); 942 struct cgroup_subsys *ss);
913 943
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index be21af149f11..2839c639f092 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -352,7 +352,6 @@ struct clk_divider {
352#define CLK_DIVIDER_READ_ONLY BIT(5) 352#define CLK_DIVIDER_READ_ONLY BIT(5)
353 353
354extern const struct clk_ops clk_divider_ops; 354extern const struct clk_ops clk_divider_ops;
355extern const struct clk_ops clk_divider_ro_ops;
356struct clk *clk_register_divider(struct device *dev, const char *name, 355struct clk *clk_register_divider(struct device *dev, const char *name,
357 const char *parent_name, unsigned long flags, 356 const char *parent_name, unsigned long flags,
358 void __iomem *reg, u8 shift, u8 width, 357 void __iomem *reg, u8 shift, u8 width,
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index f75acbf70e96..74e5341463c9 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -254,13 +254,26 @@ extern const struct clk_ops ti_clk_mux_ops;
254void omap2_init_clk_hw_omap_clocks(struct clk *clk); 254void omap2_init_clk_hw_omap_clocks(struct clk *clk);
255int omap3_noncore_dpll_enable(struct clk_hw *hw); 255int omap3_noncore_dpll_enable(struct clk_hw *hw);
256void omap3_noncore_dpll_disable(struct clk_hw *hw); 256void omap3_noncore_dpll_disable(struct clk_hw *hw);
257int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
257int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, 258int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
258 unsigned long parent_rate); 259 unsigned long parent_rate);
260int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
261 unsigned long rate,
262 unsigned long parent_rate,
263 u8 index);
264long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
265 unsigned long rate,
266 unsigned long *best_parent_rate,
267 struct clk **best_parent_clk);
259unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, 268unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
260 unsigned long parent_rate); 269 unsigned long parent_rate);
261long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, 270long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
262 unsigned long target_rate, 271 unsigned long target_rate,
263 unsigned long *parent_rate); 272 unsigned long *parent_rate);
273long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
274 unsigned long rate,
275 unsigned long *best_parent_rate,
276 struct clk **best_parent_clk);
264u8 omap2_init_dpll_parent(struct clk_hw *hw); 277u8 omap2_init_dpll_parent(struct clk_hw *hw);
265unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); 278unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
266long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, 279long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
@@ -278,6 +291,8 @@ int omap2_clk_disable_autoidle_all(void);
278void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); 291void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
279int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, 292int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
280 unsigned long parent_rate); 293 unsigned long parent_rate);
294int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
295 unsigned long parent_rate, u8 index);
281int omap2_dflt_clk_enable(struct clk_hw *hw); 296int omap2_dflt_clk_enable(struct clk_hw *hw);
282void omap2_dflt_clk_disable(struct clk_hw *hw); 297void omap2_dflt_clk_disable(struct clk_hw *hw);
283int omap2_dflt_clk_is_enabled(struct clk_hw *hw); 298int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 653f0e2b6ca9..abcafaa20b86 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
287extern void clocksource_change_rating(struct clocksource *cs, int rating); 287extern void clocksource_change_rating(struct clocksource *cs, int rating);
288extern void clocksource_suspend(void); 288extern void clocksource_suspend(void);
289extern void clocksource_resume(void); 289extern void clocksource_resume(void);
290extern struct clocksource * __init __weak clocksource_default_clock(void); 290extern struct clocksource * __init clocksource_default_clock(void);
291extern void clocksource_mark_unstable(struct clocksource *cs); 291extern void clocksource_mark_unstable(struct clocksource *cs);
292 292
293extern u64 293extern u64
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 0430ed05d3b9..a93438beb33c 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -18,12 +18,12 @@ struct cma;
18extern phys_addr_t cma_get_base(struct cma *cma); 18extern phys_addr_t cma_get_base(struct cma *cma);
19extern unsigned long cma_get_size(struct cma *cma); 19extern unsigned long cma_get_size(struct cma *cma);
20 20
21extern int __init cma_declare_contiguous(phys_addr_t size, 21extern int __init cma_declare_contiguous(phys_addr_t base,
22 phys_addr_t base, phys_addr_t limit, 22 phys_addr_t size, phys_addr_t limit,
23 phys_addr_t alignment, unsigned int order_per_bit, 23 phys_addr_t alignment, unsigned int order_per_bit,
24 bool fixed, struct cma **res_cma); 24 bool fixed, struct cma **res_cma);
25extern int cma_init_reserved_mem(phys_addr_t size, 25extern int cma_init_reserved_mem(phys_addr_t base,
26 phys_addr_t base, int order_per_bit, 26 phys_addr_t size, int order_per_bit,
27 struct cma **res_cma); 27 struct cma **res_cma);
28extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); 28extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
29extern bool cma_release(struct cma *cma, struct page *pages, int count); 29extern bool cma_release(struct cma *cma, struct page *pages, int count);
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 60bdf8dc02a3..3238ffa33f68 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -33,10 +33,11 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
33extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 33extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
34 int order, gfp_t gfp_mask, nodemask_t *mask, 34 int order, gfp_t gfp_mask, nodemask_t *mask,
35 enum migrate_mode mode, int *contended, 35 enum migrate_mode mode, int *contended,
36 struct zone **candidate_zone); 36 int alloc_flags, int classzone_idx);
37extern void compact_pgdat(pg_data_t *pgdat, int order); 37extern void compact_pgdat(pg_data_t *pgdat, int order);
38extern void reset_isolation_suitable(pg_data_t *pgdat); 38extern void reset_isolation_suitable(pg_data_t *pgdat);
39extern unsigned long compaction_suitable(struct zone *zone, int order); 39extern unsigned long compaction_suitable(struct zone *zone, int order,
40 int alloc_flags, int classzone_idx);
40 41
41/* Do not skip compaction more than 64 times */ 42/* Do not skip compaction more than 64 times */
42#define COMPACT_MAX_DEFER_SHIFT 6 43#define COMPACT_MAX_DEFER_SHIFT 6
@@ -103,7 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
103static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 104static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
104 int order, gfp_t gfp_mask, nodemask_t *nodemask, 105 int order, gfp_t gfp_mask, nodemask_t *nodemask,
105 enum migrate_mode mode, int *contended, 106 enum migrate_mode mode, int *contended,
106 struct zone **candidate_zone) 107 int alloc_flags, int classzone_idx)
107{ 108{
108 return COMPACT_CONTINUE; 109 return COMPACT_CONTINUE;
109} 110}
@@ -116,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
116{ 117{
117} 118}
118 119
119static inline unsigned long compaction_suitable(struct zone *zone, int order) 120static inline unsigned long compaction_suitable(struct zone *zone, int order,
121 int alloc_flags, int classzone_idx)
120{ 122{
121 return COMPACT_SKIPPED; 123 return COMPACT_SKIPPED;
122} 124}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e6494261eaff..7450ca2ac1fc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
357 357
358asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, 358asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
359 const compat_uptr_t __user *envp); 359 const compat_uptr_t __user *envp);
360asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
361 const compat_uptr_t __user *argv,
362 const compat_uptr_t __user *envp, int flags);
360 363
361asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, 364asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
362 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 365 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 2507fd2a1eb4..d1a558239b1a 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -71,7 +71,6 @@
71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
72 * 72 *
73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. 73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
74 * Fixed in GCC 4.8.2 and later versions.
75 * 74 *
76 * (asm goto is automatically volatile - the naming reflects this.) 75 * (asm goto is automatically volatile - the naming reflects this.)
77 */ 76 */
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
index cdd1cc202d51..c8c565952548 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
@@ -53,7 +53,6 @@
53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
54 * 54 *
55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. 55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
56 * Fixed in GCC 4.8.2 and later versions.
57 * 56 *
58 * (asm goto is automatically volatile - the naming reflects this.) 57 * (asm goto is automatically volatile - the naming reflects this.)
59 */ 58 */
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
new file mode 100644
index 000000000000..0414009e2c30
--- /dev/null
+++ b/include/linux/cpufreq-dt.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2014 Marvell
3 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __CPUFREQ_DT_H__
11#define __CPUFREQ_DT_H__
12
13struct cpufreq_dt_platform_data {
14 /*
15 * True when each CPU has its own clock to control its
16 * frequency, false when all CPUs are controlled by a single
17 * clock.
18 */
19 bool independent_clocks;
20};
21
22#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 138336b6bb04..4d078cebafd2 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -217,25 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
217 217
218 218
219struct cpufreq_driver { 219struct cpufreq_driver {
220 char name[CPUFREQ_NAME_LEN]; 220 char name[CPUFREQ_NAME_LEN];
221 u8 flags; 221 u8 flags;
222 void *driver_data;
222 223
223 /* needed by all drivers */ 224 /* needed by all drivers */
224 int (*init) (struct cpufreq_policy *policy); 225 int (*init)(struct cpufreq_policy *policy);
225 int (*verify) (struct cpufreq_policy *policy); 226 int (*verify)(struct cpufreq_policy *policy);
226 227
227 /* define one out of two */ 228 /* define one out of two */
228 int (*setpolicy) (struct cpufreq_policy *policy); 229 int (*setpolicy)(struct cpufreq_policy *policy);
229 230
230 /* 231 /*
231 * On failure, should always restore frequency to policy->restore_freq 232 * On failure, should always restore frequency to policy->restore_freq
232 * (i.e. old freq). 233 * (i.e. old freq).
233 */ 234 */
234 int (*target) (struct cpufreq_policy *policy, /* Deprecated */ 235 int (*target)(struct cpufreq_policy *policy,
235 unsigned int target_freq, 236 unsigned int target_freq,
236 unsigned int relation); 237 unsigned int relation); /* Deprecated */
237 int (*target_index) (struct cpufreq_policy *policy, 238 int (*target_index)(struct cpufreq_policy *policy,
238 unsigned int index); 239 unsigned int index);
239 /* 240 /*
240 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION 241 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
241 * unset. 242 * unset.
@@ -251,27 +252,31 @@ struct cpufreq_driver {
251 * wish to switch to intermediate frequency for some target frequency. 252 * wish to switch to intermediate frequency for some target frequency.
252 * In that case core will directly call ->target_index(). 253 * In that case core will directly call ->target_index().
253 */ 254 */
254 unsigned int (*get_intermediate)(struct cpufreq_policy *policy, 255 unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
255 unsigned int index); 256 unsigned int index);
256 int (*target_intermediate)(struct cpufreq_policy *policy, 257 int (*target_intermediate)(struct cpufreq_policy *policy,
257 unsigned int index); 258 unsigned int index);
258 259
259 /* should be defined, if possible */ 260 /* should be defined, if possible */
260 unsigned int (*get) (unsigned int cpu); 261 unsigned int (*get)(unsigned int cpu);
261 262
262 /* optional */ 263 /* optional */
263 int (*bios_limit) (int cpu, unsigned int *limit); 264 int (*bios_limit)(int cpu, unsigned int *limit);
265
266 int (*exit)(struct cpufreq_policy *policy);
267 void (*stop_cpu)(struct cpufreq_policy *policy);
268 int (*suspend)(struct cpufreq_policy *policy);
269 int (*resume)(struct cpufreq_policy *policy);
270
271 /* Will be called after the driver is fully initialized */
272 void (*ready)(struct cpufreq_policy *policy);
264 273
265 int (*exit) (struct cpufreq_policy *policy); 274 struct freq_attr **attr;
266 void (*stop_cpu) (struct cpufreq_policy *policy);
267 int (*suspend) (struct cpufreq_policy *policy);
268 int (*resume) (struct cpufreq_policy *policy);
269 struct freq_attr **attr;
270 275
271 /* platform specific boost support code */ 276 /* platform specific boost support code */
272 bool boost_supported; 277 bool boost_supported;
273 bool boost_enabled; 278 bool boost_enabled;
274 int (*set_boost) (int state); 279 int (*set_boost)(int state);
275}; 280};
276 281
277/* flags */ 282/* flags */
@@ -312,6 +317,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data);
312int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); 317int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
313 318
314const char *cpufreq_get_current_driver(void); 319const char *cpufreq_get_current_driver(void);
320void *cpufreq_get_driver_data(void);
315 321
316static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, 322static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
317 unsigned int min, unsigned int max) 323 unsigned int min, unsigned int max)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 25e0df6155a4..a07e087f54b2 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,7 +53,7 @@ struct cpuidle_state {
53}; 53};
54 54
55/* Idle State Flags */ 55/* Idle State Flags */
56#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ 56#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */
57#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ 57#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
58#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ 58#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
59 59
@@ -90,7 +90,7 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
90 * cpuidle_get_last_residency - retrieves the last state's residency time 90 * cpuidle_get_last_residency - retrieves the last state's residency time
91 * @dev: the target CPU 91 * @dev: the target CPU
92 * 92 *
93 * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set 93 * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
94 */ 94 */
95static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) 95static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
96{ 96{
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2f073db7392e..1b357997cac5 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
48void cpuset_init_current_mems_allowed(void); 48void cpuset_init_current_mems_allowed(void);
49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 50
51extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); 51extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
52extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53 52
54static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 53static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
55{ 54{
56 return nr_cpusets() <= 1 || 55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
57 __cpuset_node_allowed_softwall(node, gfp_mask);
58} 56}
59 57
60static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) 58static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
61{ 59{
62 return nr_cpusets() <= 1 || 60 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
63 __cpuset_node_allowed_hardwall(node, gfp_mask);
64}
65
66static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67{
68 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69}
70
71static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72{
73 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74} 61}
75 62
76extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 63extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
179 return 1; 166 return 1;
180} 167}
181 168
182static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 169static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
183{
184 return 1;
185}
186
187static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
188{
189 return 1;
190}
191
192static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
193{ 170{
194 return 1; 171 return 1;
195} 172}
196 173
197static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 174static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
198{ 175{
199 return 1; 176 return 1;
200} 177}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 72ab536ad3de..3849fce7ecfe 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -14,14 +14,13 @@
14extern unsigned long long elfcorehdr_addr; 14extern unsigned long long elfcorehdr_addr;
15extern unsigned long long elfcorehdr_size; 15extern unsigned long long elfcorehdr_size;
16 16
17extern int __weak elfcorehdr_alloc(unsigned long long *addr, 17extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
18 unsigned long long *size); 18extern void elfcorehdr_free(unsigned long long addr);
19extern void __weak elfcorehdr_free(unsigned long long addr); 19extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
20extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); 20extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
21extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); 21extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
22extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, 22 unsigned long from, unsigned long pfn,
23 unsigned long from, unsigned long pfn, 23 unsigned long size, pgprot_t prot);
24 unsigned long size, pgprot_t prot);
25 24
26extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 25extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
27 unsigned long, int); 26 unsigned long, int);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index d45e949699ea..9c8776d0ada8 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -26,6 +26,19 @@
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27 27
28/* 28/*
29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
30 * arbitrary modules to be loaded. Loading from userspace may still need the
31 * unprefixed names, so retains those aliases as well.
32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
34 * expands twice on the same line. Instead, use a separate base name for the
35 * alias.
36 */
37#define MODULE_ALIAS_CRYPTO(name) \
38 __MODULE_INFO(alias, alias_userspace, name); \
39 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
40
41/*
29 * Algorithm masks and types. 42 * Algorithm masks and types.
30 */ 43 */
31#define CRYPTO_ALG_TYPE_MASK 0x0000000f 44#define CRYPTO_ALG_TYPE_MASK 0x0000000f
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request;
127 140
128typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 141typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
129 142
143/**
144 * DOC: Block Cipher Context Data Structures
145 *
146 * These data structures define the operating context for each block cipher
147 * type.
148 */
149
130struct crypto_async_request { 150struct crypto_async_request {
131 struct list_head list; 151 struct list_head list;
132 crypto_completion_t complete; 152 crypto_completion_t complete;
@@ -194,9 +214,63 @@ struct hash_desc {
194 u32 flags; 214 u32 flags;
195}; 215};
196 216
197/* 217/**
198 * Algorithms: modular crypto algorithm implementations, managed 218 * DOC: Block Cipher Algorithm Definitions
199 * via crypto_register_alg() and crypto_unregister_alg(). 219 *
220 * These data structures define modular crypto algorithm implementations,
221 * managed via crypto_register_alg() and crypto_unregister_alg().
222 */
223
224/**
225 * struct ablkcipher_alg - asynchronous block cipher definition
226 * @min_keysize: Minimum key size supported by the transformation. This is the
227 * smallest key length supported by this transformation algorithm.
228 * This must be set to one of the pre-defined values as this is
229 * not hardware specific. Possible values for this field can be
230 * found via git grep "_MIN_KEY_SIZE" include/crypto/
231 * @max_keysize: Maximum key size supported by the transformation. This is the
232 * largest key length supported by this transformation algorithm.
233 * This must be set to one of the pre-defined values as this is
234 * not hardware specific. Possible values for this field can be
235 * found via git grep "_MAX_KEY_SIZE" include/crypto/
236 * @setkey: Set key for the transformation. This function is used to either
237 * program a supplied key into the hardware or store the key in the
238 * transformation context for programming it later. Note that this
239 * function does modify the transformation context. This function can
240 * be called multiple times during the existence of the transformation
241 * object, so one must make sure the key is properly reprogrammed into
242 * the hardware. This function is also responsible for checking the key
243 * length for validity. In case a software fallback was put in place in
244 * the @cra_init call, this function might need to use the fallback if
245 * the algorithm doesn't support all of the key sizes.
246 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
247 * the supplied scatterlist containing the blocks of data. The crypto
248 * API consumer is responsible for aligning the entries of the
249 * scatterlist properly and making sure the chunks are correctly
250 * sized. In case a software fallback was put in place in the
251 * @cra_init call, this function might need to use the fallback if
252 * the algorithm doesn't support all of the key sizes. In case the
253 * key was stored in transformation context, the key might need to be
254 * re-programmed into the hardware in this function. This function
255 * shall not modify the transformation context, as this function may
256 * be called in parallel with the same transformation object.
257 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
258 * and the conditions are exactly the same.
259 * @givencrypt: Update the IV for encryption. With this function, a cipher
260 * implementation may provide the function on how to update the IV
261 * for encryption.
262 * @givdecrypt: Update the IV for decryption. This is the reverse of
263 * @givencrypt .
264 * @geniv: The transformation implementation may use an "IV generator" provided
265 * by the kernel crypto API. Several use cases have a predefined
266 * approach how IVs are to be updated. For such use cases, the kernel
267 * crypto API provides ready-to-use implementations that can be
268 * referenced with this variable.
269 * @ivsize: IV size applicable for transformation. The consumer must provide an
270 * IV of exactly that size to perform the encrypt or decrypt operation.
271 *
272 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
273 * mandatory and must be filled.
200 */ 274 */
201struct ablkcipher_alg { 275struct ablkcipher_alg {
202 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 276 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -213,6 +287,32 @@ struct ablkcipher_alg {
213 unsigned int ivsize; 287 unsigned int ivsize;
214}; 288};
215 289
290/**
291 * struct aead_alg - AEAD cipher definition
292 * @maxauthsize: Set the maximum authentication tag size supported by the
293 * transformation. A transformation may support smaller tag sizes.
294 * As the authentication tag is a message digest to ensure the
295 * integrity of the encrypted data, a consumer typically wants the
296 * largest authentication tag possible as defined by this
297 * variable.
298 * @setauthsize: Set authentication size for the AEAD transformation. This
299 * function is used to specify the consumer requested size of the
300 * authentication tag to be either generated by the transformation
301 * during encryption or the size of the authentication tag to be
302 * supplied during the decryption operation. This function is also
303 * responsible for checking the authentication tag size for
304 * validity.
305 * @setkey: see struct ablkcipher_alg
306 * @encrypt: see struct ablkcipher_alg
307 * @decrypt: see struct ablkcipher_alg
308 * @givencrypt: see struct ablkcipher_alg
309 * @givdecrypt: see struct ablkcipher_alg
310 * @geniv: see struct ablkcipher_alg
311 * @ivsize: see struct ablkcipher_alg
312 *
313 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
314 * mandatory and must be filled.
315 */
216struct aead_alg { 316struct aead_alg {
217 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 317 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
218 unsigned int keylen); 318 unsigned int keylen);
@@ -228,6 +328,18 @@ struct aead_alg {
228 unsigned int maxauthsize; 328 unsigned int maxauthsize;
229}; 329};
230 330
331/**
332 * struct blkcipher_alg - synchronous block cipher definition
333 * @min_keysize: see struct ablkcipher_alg
334 * @max_keysize: see struct ablkcipher_alg
335 * @setkey: see struct ablkcipher_alg
336 * @encrypt: see struct ablkcipher_alg
337 * @decrypt: see struct ablkcipher_alg
338 * @geniv: see struct ablkcipher_alg
339 * @ivsize: see struct ablkcipher_alg
340 *
341 * All fields except @geniv and @ivsize are mandatory and must be filled.
342 */
231struct blkcipher_alg { 343struct blkcipher_alg {
232 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 344 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
233 unsigned int keylen); 345 unsigned int keylen);
@@ -245,6 +357,53 @@ struct blkcipher_alg {
245 unsigned int ivsize; 357 unsigned int ivsize;
246}; 358};
247 359
360/**
361 * struct cipher_alg - single-block symmetric ciphers definition
362 * @cia_min_keysize: Minimum key size supported by the transformation. This is
363 * the smallest key length supported by this transformation
364 * algorithm. This must be set to one of the pre-defined
365 * values as this is not hardware specific. Possible values
366 * for this field can be found via git grep "_MIN_KEY_SIZE"
367 * include/crypto/
368 * @cia_max_keysize: Maximum key size supported by the transformation. This is
369 * the largest key length supported by this transformation
370 * algorithm. This must be set to one of the pre-defined values
371 * as this is not hardware specific. Possible values for this
372 * field can be found via git grep "_MAX_KEY_SIZE"
373 * include/crypto/
374 * @cia_setkey: Set key for the transformation. This function is used to either
375 * program a supplied key into the hardware or store the key in the
376 * transformation context for programming it later. Note that this
377 * function does modify the transformation context. This function
378 * can be called multiple times during the existence of the
379 * transformation object, so one must make sure the key is properly
380 * reprogrammed into the hardware. This function is also
381 * responsible for checking the key length for validity.
382 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
383 * single block of data, which must be @cra_blocksize big. This
384 * always operates on a full @cra_blocksize and it is not possible
385 * to encrypt a block of smaller size. The supplied buffers must
386 * therefore also be at least of @cra_blocksize size. Both the
387 * input and output buffers are always aligned to @cra_alignmask.
388 * In case either of the input or output buffer supplied by user
389 * of the crypto API is not aligned to @cra_alignmask, the crypto
390 * API will re-align the buffers. The re-alignment means that a
391 * new buffer will be allocated, the data will be copied into the
392 * new buffer, then the processing will happen on the new buffer,
393 * then the data will be copied back into the original buffer and
394 * finally the new buffer will be freed. In case a software
395 * fallback was put in place in the @cra_init call, this function
396 * might need to use the fallback if the algorithm doesn't support
397 * all of the key sizes. In case the key was stored in
398 * transformation context, the key might need to be re-programmed
399 * into the hardware in this function. This function shall not
400 * modify the transformation context, as this function may be
401 * called in parallel with the same transformation object.
402 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
403 * @cia_encrypt, and the conditions are exactly the same.
404 *
405 * All fields are mandatory and must be filled.
406 */
248struct cipher_alg { 407struct cipher_alg {
249 unsigned int cia_min_keysize; 408 unsigned int cia_min_keysize;
250 unsigned int cia_max_keysize; 409 unsigned int cia_max_keysize;
@@ -261,6 +420,25 @@ struct compress_alg {
261 unsigned int slen, u8 *dst, unsigned int *dlen); 420 unsigned int slen, u8 *dst, unsigned int *dlen);
262}; 421};
263 422
423/**
424 * struct rng_alg - random number generator definition
425 * @rng_make_random: The function defined by this variable obtains a random
426 * number. The random number generator transform must generate
427 * the random number out of the context provided with this
428 * call.
429 * @rng_reset: Reset of the random number generator by clearing the entire state.
430 * With the invocation of this function call, the random number
431 * generator shall completely reinitialize its state. If the random
432 * number generator requires a seed for setting up a new state,
433 * the seed must be provided by the consumer while invoking this
434 * function. The required size of the seed is defined with
435 * @seedsize .
436 * @seedsize: The seed size required for a random number generator
437 * initialization defined with this variable. Some random number
438 * generators like the SP800-90A DRBG does not require a seed as the
439 * seeding is implemented internally without the need of support by
440 * the consumer. In this case, the seed size is set to zero.
441 */
264struct rng_alg { 442struct rng_alg {
265 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, 443 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
266 unsigned int dlen); 444 unsigned int dlen);
@@ -277,6 +455,81 @@ struct rng_alg {
277#define cra_compress cra_u.compress 455#define cra_compress cra_u.compress
278#define cra_rng cra_u.rng 456#define cra_rng cra_u.rng
279 457
458/**
459 * struct crypto_alg - definition of a cryptograpic cipher algorithm
460 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
461 * CRYPTO_ALG_* flags for the flags which go in here. Those are
462 * used for fine-tuning the description of the transformation
463 * algorithm.
464 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
465 * of the smallest possible unit which can be transformed with
466 * this algorithm. The users must respect this value.
467 * In case of HASH transformation, it is possible for a smaller
468 * block than @cra_blocksize to be passed to the crypto API for
469 * transformation, in case of any other transformation type, an
470 * error will be returned upon any attempt to transform smaller
471 * than @cra_blocksize chunks.
472 * @cra_ctxsize: Size of the operational context of the transformation. This
473 * value informs the kernel crypto API about the memory size
474 * needed to be allocated for the transformation context.
475 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
476 * buffer containing the input data for the algorithm must be
477 * aligned to this alignment mask. The data buffer for the
478 * output data must be aligned to this alignment mask. Note that
479 * the Crypto API will do the re-alignment in software, but
480 * only under special conditions and there is a performance hit.
481 * The re-alignment happens at these occasions for different
482 * @cra_u types: cipher -- For both input data and output data
483 * buffer; ahash -- For output hash destination buf; shash --
484 * For output hash destination buf.
485 * This is needed on hardware which is flawed by design and
486 * cannot pick data from arbitrary addresses.
487 * @cra_priority: Priority of this transformation implementation. In case
488 * multiple transformations with same @cra_name are available to
489 * the Crypto API, the kernel will use the one with highest
490 * @cra_priority.
491 * @cra_name: Generic name (usable by multiple implementations) of the
492 * transformation algorithm. This is the name of the transformation
493 * itself. This field is used by the kernel when looking up the
494 * providers of particular transformation.
495 * @cra_driver_name: Unique name of the transformation provider. This is the
496 * name of the provider of the transformation. This can be any
497 * arbitrary value, but in the usual case, this contains the
498 * name of the chip or provider and the name of the
499 * transformation algorithm.
500 * @cra_type: Type of the cryptographic transformation. This is a pointer to
501 * struct crypto_type, which implements callbacks common for all
502 * trasnformation types. There are multiple options:
503 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
504 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
505 * This field might be empty. In that case, there are no common
506 * callbacks. This is the case for: cipher, compress, shash.
507 * @cra_u: Callbacks implementing the transformation. This is a union of
508 * multiple structures. Depending on the type of transformation selected
509 * by @cra_type and @cra_flags above, the associated structure must be
510 * filled with callbacks. This field might be empty. This is the case
511 * for ahash, shash.
512 * @cra_init: Initialize the cryptographic transformation object. This function
513 * is used to initialize the cryptographic transformation object.
514 * This function is called only once at the instantiation time, right
515 * after the transformation context was allocated. In case the
516 * cryptographic hardware has some special requirements which need to
517 * be handled by software, this function shall check for the precise
518 * requirement of the transformation and put any software fallbacks
519 * in place.
520 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
521 * counterpart to @cra_init, used to remove various changes set in
522 * @cra_init.
523 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
524 * @cra_list: internally used
525 * @cra_users: internally used
526 * @cra_refcnt: internally used
527 * @cra_destroy: internally used
528 *
529 * The struct crypto_alg describes a generic Crypto API algorithm and is common
530 * for all of the transformations. Any variable not documented here shall not
531 * be used by a cipher implementation as it is internal to the Crypto API.
532 */
280struct crypto_alg { 533struct crypto_alg {
281 struct list_head cra_list; 534 struct list_head cra_list;
282 struct list_head cra_users; 535 struct list_head cra_users;
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask)
581 return mask; 834 return mask;
582} 835}
583 836
837/**
838 * DOC: Asynchronous Block Cipher API
839 *
840 * Asynchronous block cipher API is used with the ciphers of type
841 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
842 *
843 * Asynchronous cipher operations imply that the function invocation for a
844 * cipher request returns immediately before the completion of the operation.
845 * The cipher request is scheduled as a separate kernel thread and therefore
846 * load-balanced on the different CPUs via the process scheduler. To allow
847 * the kernel crypto API to inform the caller about the completion of a cipher
848 * request, the caller must provide a callback function. That function is
849 * invoked with the cipher handle when the request completes.
850 *
851 * To support the asynchronous operation, additional information than just the
852 * cipher handle must be supplied to the kernel crypto API. That additional
853 * information is given by filling in the ablkcipher_request data structure.
854 *
855 * For the asynchronous block cipher API, the state is maintained with the tfm
856 * cipher handle. A single tfm can be used across multiple calls and in
857 * parallel. For asynchronous block cipher calls, context data supplied and
858 * only used by the caller can be referenced the request data structure in
859 * addition to the IV used for the cipher request. The maintenance of such
860 * state information would be important for a crypto driver implementer to
861 * have, because when calling the callback function upon completion of the
862 * cipher operation, that callback function may need some information about
863 * which operation just finished if it invoked multiple in parallel. This
864 * state information is unused by the kernel crypto API.
865 */
866
867/**
868 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
869 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
870 * ablkcipher cipher
871 * @type: specifies the type of the cipher
872 * @mask: specifies the mask for the cipher
873 *
874 * Allocate a cipher handle for an ablkcipher. The returned struct
875 * crypto_ablkcipher is the cipher handle that is required for any subsequent
876 * API invocation for that ablkcipher.
877 *
878 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
879 * of an error, PTR_ERR() returns the error code.
880 */
584struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, 881struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
585 u32 type, u32 mask); 882 u32 type, u32 mask);
586 883
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm(
590 return &tfm->base; 887 return &tfm->base;
591} 888}
592 889
890/**
891 * crypto_free_ablkcipher() - zeroize and free cipher handle
892 * @tfm: cipher handle to be freed
893 */
593static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 894static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
594{ 895{
595 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 896 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
596} 897}
597 898
899/**
900 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
901 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
902 * ablkcipher
903 * @type: specifies the type of the cipher
904 * @mask: specifies the mask for the cipher
905 *
906 * Return: true when the ablkcipher is known to the kernel crypto API; false
907 * otherwise
908 */
598static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, 909static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
599 u32 mask) 910 u32 mask)
600{ 911{
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
608 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 919 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
609} 920}
610 921
922/**
923 * crypto_ablkcipher_ivsize() - obtain IV size
924 * @tfm: cipher handle
925 *
926 * The size of the IV for the ablkcipher referenced by the cipher handle is
927 * returned. This IV size may be zero if the cipher does not need an IV.
928 *
929 * Return: IV size in bytes
930 */
611static inline unsigned int crypto_ablkcipher_ivsize( 931static inline unsigned int crypto_ablkcipher_ivsize(
612 struct crypto_ablkcipher *tfm) 932 struct crypto_ablkcipher *tfm)
613{ 933{
614 return crypto_ablkcipher_crt(tfm)->ivsize; 934 return crypto_ablkcipher_crt(tfm)->ivsize;
615} 935}
616 936
937/**
938 * crypto_ablkcipher_blocksize() - obtain block size of cipher
939 * @tfm: cipher handle
940 *
941 * The block size for the ablkcipher referenced with the cipher handle is
942 * returned. The caller may use that information to allocate appropriate
943 * memory for the data returned by the encryption or decryption operation
944 *
945 * Return: block size of cipher
946 */
617static inline unsigned int crypto_ablkcipher_blocksize( 947static inline unsigned int crypto_ablkcipher_blocksize(
618 struct crypto_ablkcipher *tfm) 948 struct crypto_ablkcipher *tfm)
619{ 949{
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
643 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 973 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
644} 974}
645 975
976/**
977 * crypto_ablkcipher_setkey() - set key for cipher
978 * @tfm: cipher handle
979 * @key: buffer holding the key
980 * @keylen: length of the key in bytes
981 *
982 * The caller provided key is set for the ablkcipher referenced by the cipher
983 * handle.
984 *
985 * Note, the key length determines the cipher type. Many block ciphers implement
986 * different cipher modes depending on the key size, such as AES-128 vs AES-192
987 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
988 * is performed.
989 *
990 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
991 */
646static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 992static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
647 const u8 *key, unsigned int keylen) 993 const u8 *key, unsigned int keylen)
648{ 994{
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
651 return crt->setkey(crt->base, key, keylen); 997 return crt->setkey(crt->base, key, keylen);
652} 998}
653 999
1000/**
1001 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
1002 * @req: ablkcipher_request out of which the cipher handle is to be obtained
1003 *
1004 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
1005 * data structure.
1006 *
1007 * Return: crypto_ablkcipher handle
1008 */
654static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 1009static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
655 struct ablkcipher_request *req) 1010 struct ablkcipher_request *req)
656{ 1011{
657 return __crypto_ablkcipher_cast(req->base.tfm); 1012 return __crypto_ablkcipher_cast(req->base.tfm);
658} 1013}
659 1014
1015/**
1016 * crypto_ablkcipher_encrypt() - encrypt plaintext
1017 * @req: reference to the ablkcipher_request handle that holds all information
1018 * needed to perform the cipher operation
1019 *
1020 * Encrypt plaintext data using the ablkcipher_request handle. That data
1021 * structure and how it is filled with data is discussed with the
1022 * ablkcipher_request_* functions.
1023 *
1024 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1025 */
660static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 1026static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
661{ 1027{
662 struct ablkcipher_tfm *crt = 1028 struct ablkcipher_tfm *crt =
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
664 return crt->encrypt(req); 1030 return crt->encrypt(req);
665} 1031}
666 1032
1033/**
1034 * crypto_ablkcipher_decrypt() - decrypt ciphertext
1035 * @req: reference to the ablkcipher_request handle that holds all information
1036 * needed to perform the cipher operation
1037 *
1038 * Decrypt ciphertext data using the ablkcipher_request handle. That data
1039 * structure and how it is filled with data is discussed with the
1040 * ablkcipher_request_* functions.
1041 *
1042 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1043 */
667static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 1044static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
668{ 1045{
669 struct ablkcipher_tfm *crt = 1046 struct ablkcipher_tfm *crt =
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
671 return crt->decrypt(req); 1048 return crt->decrypt(req);
672} 1049}
673 1050
1051/**
1052 * DOC: Asynchronous Cipher Request Handle
1053 *
1054 * The ablkcipher_request data structure contains all pointers to data
1055 * required for the asynchronous cipher operation. This includes the cipher
1056 * handle (which can be used by multiple ablkcipher_request instances), pointer
1057 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1058 * as a handle to the ablkcipher_request_* API calls in a similar way as
1059 * ablkcipher handle to the crypto_ablkcipher_* API calls.
1060 */
1061
1062/**
1063 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1064 * @tfm: cipher handle
1065 *
1066 * Return: number of bytes
1067 */
674static inline unsigned int crypto_ablkcipher_reqsize( 1068static inline unsigned int crypto_ablkcipher_reqsize(
675 struct crypto_ablkcipher *tfm) 1069 struct crypto_ablkcipher *tfm)
676{ 1070{
677 return crypto_ablkcipher_crt(tfm)->reqsize; 1071 return crypto_ablkcipher_crt(tfm)->reqsize;
678} 1072}
679 1073
1074/**
1075 * ablkcipher_request_set_tfm() - update cipher handle reference in request
1076 * @req: request handle to be modified
1077 * @tfm: cipher handle that shall be added to the request handle
1078 *
1079 * Allow the caller to replace the existing ablkcipher handle in the request
1080 * data structure with a different one.
1081 */
680static inline void ablkcipher_request_set_tfm( 1082static inline void ablkcipher_request_set_tfm(
681 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 1083 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
682{ 1084{
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
689 return container_of(req, struct ablkcipher_request, base); 1091 return container_of(req, struct ablkcipher_request, base);
690} 1092}
691 1093
1094/**
1095 * ablkcipher_request_alloc() - allocate request data structure
1096 * @tfm: cipher handle to be registered with the request
1097 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1098 *
1099 * Allocate the request data structure that must be used with the ablkcipher
1100 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1101 * handle is registered in the request data structure.
1102 *
1103 * Return: allocated request handle in case of success; IS_ERR() is true in case
1104 * of an error, PTR_ERR() returns the error code.
1105 */
692static inline struct ablkcipher_request *ablkcipher_request_alloc( 1106static inline struct ablkcipher_request *ablkcipher_request_alloc(
693 struct crypto_ablkcipher *tfm, gfp_t gfp) 1107 struct crypto_ablkcipher *tfm, gfp_t gfp)
694{ 1108{
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
703 return req; 1117 return req;
704} 1118}
705 1119
1120/**
1121 * ablkcipher_request_free() - zeroize and free request data structure
1122 * @req: request data structure cipher handle to be freed
1123 */
706static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1124static inline void ablkcipher_request_free(struct ablkcipher_request *req)
707{ 1125{
708 kzfree(req); 1126 kzfree(req);
709} 1127}
710 1128
1129/**
1130 * ablkcipher_request_set_callback() - set asynchronous callback function
1131 * @req: request handle
1132 * @flags: specify zero or an ORing of the flags
1133 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1134 * increase the wait queue beyond the initial maximum size;
1135 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1136 * @compl: callback function pointer to be registered with the request handle
1137 * @data: The data pointer refers to memory that is not used by the kernel
1138 * crypto API, but provided to the callback function for it to use. Here,
1139 * the caller can provide a reference to memory the callback function can
1140 * operate on. As the callback function is invoked asynchronously to the
1141 * related functionality, it may need to access data structures of the
1142 * related functionality which can be referenced using this pointer. The
1143 * callback function can access the memory via the "data" field in the
1144 * crypto_async_request data structure provided to the callback function.
1145 *
1146 * This function allows setting the callback function that is triggered once the
1147 * cipher operation completes.
1148 *
1149 * The callback function is registered with the ablkcipher_request handle and
1150 * must comply with the following template:
1151 *
1152 * void callback_function(struct crypto_async_request *req, int error)
1153 */
711static inline void ablkcipher_request_set_callback( 1154static inline void ablkcipher_request_set_callback(
712 struct ablkcipher_request *req, 1155 struct ablkcipher_request *req,
713 u32 flags, crypto_completion_t compl, void *data) 1156 u32 flags, crypto_completion_t compl, void *data)
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback(
717 req->base.flags = flags; 1160 req->base.flags = flags;
718} 1161}
719 1162
1163/**
1164 * ablkcipher_request_set_crypt() - set data buffers
1165 * @req: request handle
1166 * @src: source scatter / gather list
1167 * @dst: destination scatter / gather list
1168 * @nbytes: number of bytes to process from @src
1169 * @iv: IV for the cipher operation which must comply with the IV size defined
1170 * by crypto_ablkcipher_ivsize
1171 *
1172 * This function allows setting of the source data and destination data
1173 * scatter / gather lists.
1174 *
1175 * For encryption, the source is treated as the plaintext and the
1176 * destination is the ciphertext. For a decryption operation, the use is
1177 * reversed: the source is the ciphertext and the destination is the plaintext.
1178 */
720static inline void ablkcipher_request_set_crypt( 1179static inline void ablkcipher_request_set_crypt(
721 struct ablkcipher_request *req, 1180 struct ablkcipher_request *req,
722 struct scatterlist *src, struct scatterlist *dst, 1181 struct scatterlist *src, struct scatterlist *dst,
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt(
728 req->info = iv; 1187 req->info = iv;
729} 1188}
730 1189
1190/**
1191 * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
1192 *
1193 * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
1194 * (listed as type "aead" in /proc/crypto)
1195 *
1196 * The most prominent examples for this type of encryption is GCM and CCM.
1197 * However, the kernel supports other types of AEAD ciphers which are defined
1198 * with the following cipher string:
1199 *
1200 * authenc(keyed message digest, block cipher)
1201 *
1202 * For example: authenc(hmac(sha256), cbc(aes))
1203 *
1204 * The example code provided for the asynchronous block cipher operation
1205 * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
1206 * the *aead* pendants discussed in the following. In addtion, for the AEAD
1207 * operation, the aead_request_set_assoc function must be used to set the
1208 * pointer to the associated data memory location before performing the
1209 * encryption or decryption operation. In case of an encryption, the associated
1210 * data memory is filled during the encryption operation. For decryption, the
1211 * associated data memory must contain data that is used to verify the integrity
1212 * of the decrypted data. Another deviation from the asynchronous block cipher
1213 * operation is that the caller should explicitly check for -EBADMSG of the
1214 * crypto_aead_decrypt. That error indicates an authentication error, i.e.
1215 * a breach in the integrity of the message. In essence, that -EBADMSG error
1216 * code is the key bonus an AEAD cipher has over "standard" block chaining
1217 * modes.
1218 */
1219
731static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) 1220static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
732{ 1221{
733 return (struct crypto_aead *)tfm; 1222 return (struct crypto_aead *)tfm;
734} 1223}
735 1224
1225/**
1226 * crypto_alloc_aead() - allocate AEAD cipher handle
1227 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1228 * AEAD cipher
1229 * @type: specifies the type of the cipher
1230 * @mask: specifies the mask for the cipher
1231 *
1232 * Allocate a cipher handle for an AEAD. The returned struct
1233 * crypto_aead is the cipher handle that is required for any subsequent
1234 * API invocation for that AEAD.
1235 *
1236 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1237 * of an error, PTR_ERR() returns the error code.
1238 */
736struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); 1239struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
737 1240
738static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) 1241static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
740 return &tfm->base; 1243 return &tfm->base;
741} 1244}
742 1245
1246/**
1247 * crypto_free_aead() - zeroize and free aead handle
1248 * @tfm: cipher handle to be freed
1249 */
743static inline void crypto_free_aead(struct crypto_aead *tfm) 1250static inline void crypto_free_aead(struct crypto_aead *tfm)
744{ 1251{
745 crypto_free_tfm(crypto_aead_tfm(tfm)); 1252 crypto_free_tfm(crypto_aead_tfm(tfm));
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
750 return &crypto_aead_tfm(tfm)->crt_aead; 1257 return &crypto_aead_tfm(tfm)->crt_aead;
751} 1258}
752 1259
1260/**
1261 * crypto_aead_ivsize() - obtain IV size
1262 * @tfm: cipher handle
1263 *
1264 * The size of the IV for the aead referenced by the cipher handle is
1265 * returned. This IV size may be zero if the cipher does not need an IV.
1266 *
1267 * Return: IV size in bytes
1268 */
753static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) 1269static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
754{ 1270{
755 return crypto_aead_crt(tfm)->ivsize; 1271 return crypto_aead_crt(tfm)->ivsize;
756} 1272}
757 1273
1274/**
1275 * crypto_aead_authsize() - obtain maximum authentication data size
1276 * @tfm: cipher handle
1277 *
1278 * The maximum size of the authentication data for the AEAD cipher referenced
1279 * by the AEAD cipher handle is returned. The authentication data size may be
1280 * zero if the cipher implements a hard-coded maximum.
1281 *
1282 * The authentication data may also be known as "tag value".
1283 *
1284 * Return: authentication data size / tag size in bytes
1285 */
758static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) 1286static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
759{ 1287{
760 return crypto_aead_crt(tfm)->authsize; 1288 return crypto_aead_crt(tfm)->authsize;
761} 1289}
762 1290
1291/**
1292 * crypto_aead_blocksize() - obtain block size of cipher
1293 * @tfm: cipher handle
1294 *
1295 * The block size for the AEAD referenced with the cipher handle is returned.
1296 * The caller may use that information to allocate appropriate memory for the
1297 * data returned by the encryption or decryption operation
1298 *
1299 * Return: block size of cipher
1300 */
763static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) 1301static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
764{ 1302{
765 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); 1303 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
785 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); 1323 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
786} 1324}
787 1325
1326/**
1327 * crypto_aead_setkey() - set key for cipher
1328 * @tfm: cipher handle
1329 * @key: buffer holding the key
1330 * @keylen: length of the key in bytes
1331 *
1332 * The caller provided key is set for the AEAD referenced by the cipher
1333 * handle.
1334 *
1335 * Note, the key length determines the cipher type. Many block ciphers implement
1336 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1337 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1338 * is performed.
1339 *
1340 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1341 */
788static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1342static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
789 unsigned int keylen) 1343 unsigned int keylen)
790{ 1344{
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
793 return crt->setkey(crt->base, key, keylen); 1347 return crt->setkey(crt->base, key, keylen);
794} 1348}
795 1349
1350/**
1351 * crypto_aead_setauthsize() - set authentication data size
1352 * @tfm: cipher handle
1353 * @authsize: size of the authentication data / tag in bytes
1354 *
1355 * Set the authentication data size / tag size. AEAD requires an authentication
1356 * tag (or MAC) in addition to the associated data.
1357 *
1358 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1359 */
796int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); 1360int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
797 1361
798static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) 1362static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
800 return __crypto_aead_cast(req->base.tfm); 1364 return __crypto_aead_cast(req->base.tfm);
801} 1365}
802 1366
1367/**
1368 * crypto_aead_encrypt() - encrypt plaintext
1369 * @req: reference to the aead_request handle that holds all information
1370 * needed to perform the cipher operation
1371 *
1372 * Encrypt plaintext data using the aead_request handle. That data structure
1373 * and how it is filled with data is discussed with the aead_request_*
1374 * functions.
1375 *
1376 * IMPORTANT NOTE The encryption operation creates the authentication data /
1377 * tag. That data is concatenated with the created ciphertext.
1378 * The ciphertext memory size is therefore the given number of
1379 * block cipher blocks + the size defined by the
1380 * crypto_aead_setauthsize invocation. The caller must ensure
1381 * that sufficient memory is available for the ciphertext and
1382 * the authentication tag.
1383 *
1384 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1385 */
803static inline int crypto_aead_encrypt(struct aead_request *req) 1386static inline int crypto_aead_encrypt(struct aead_request *req)
804{ 1387{
805 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); 1388 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
806} 1389}
807 1390
1391/**
1392 * crypto_aead_decrypt() - decrypt ciphertext
1393 * @req: reference to the ablkcipher_request handle that holds all information
1394 * needed to perform the cipher operation
1395 *
1396 * Decrypt ciphertext data using the aead_request handle. That data structure
1397 * and how it is filled with data is discussed with the aead_request_*
1398 * functions.
1399 *
1400 * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
1401 * authentication data / tag. That authentication data / tag
1402 * must have the size defined by the crypto_aead_setauthsize
1403 * invocation.
1404 *
1405 *
1406 * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
1407 * cipher operation performs the authentication of the data during the
1408 * decryption operation. Therefore, the function returns this error if
1409 * the authentication of the ciphertext was unsuccessful (i.e. the
1410 * integrity of the ciphertext or the associated data was violated);
1411 * < 0 if an error occurred.
1412 */
808static inline int crypto_aead_decrypt(struct aead_request *req) 1413static inline int crypto_aead_decrypt(struct aead_request *req)
809{ 1414{
810 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); 1415 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
811} 1416}
812 1417
1418/**
1419 * DOC: Asynchronous AEAD Request Handle
1420 *
1421 * The aead_request data structure contains all pointers to data required for
1422 * the AEAD cipher operation. This includes the cipher handle (which can be
1423 * used by multiple aead_request instances), pointer to plaintext and
1424 * ciphertext, asynchronous callback function, etc. It acts as a handle to the
1425 * aead_request_* API calls in a similar way as AEAD handle to the
1426 * crypto_aead_* API calls.
1427 */
1428
1429/**
1430 * crypto_aead_reqsize() - obtain size of the request data structure
1431 * @tfm: cipher handle
1432 *
1433 * Return: number of bytes
1434 */
813static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) 1435static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
814{ 1436{
815 return crypto_aead_crt(tfm)->reqsize; 1437 return crypto_aead_crt(tfm)->reqsize;
816} 1438}
817 1439
1440/**
1441 * aead_request_set_tfm() - update cipher handle reference in request
1442 * @req: request handle to be modified
1443 * @tfm: cipher handle that shall be added to the request handle
1444 *
1445 * Allow the caller to replace the existing aead handle in the request
1446 * data structure with a different one.
1447 */
818static inline void aead_request_set_tfm(struct aead_request *req, 1448static inline void aead_request_set_tfm(struct aead_request *req,
819 struct crypto_aead *tfm) 1449 struct crypto_aead *tfm)
820{ 1450{
821 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); 1451 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
822} 1452}
823 1453
1454/**
1455 * aead_request_alloc() - allocate request data structure
1456 * @tfm: cipher handle to be registered with the request
1457 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1458 *
1459 * Allocate the request data structure that must be used with the AEAD
1460 * encrypt and decrypt API calls. During the allocation, the provided aead
1461 * handle is registered in the request data structure.
1462 *
1463 * Return: allocated request handle in case of success; IS_ERR() is true in case
1464 * of an error, PTR_ERR() returns the error code.
1465 */
824static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, 1466static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
825 gfp_t gfp) 1467 gfp_t gfp)
826{ 1468{
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
834 return req; 1476 return req;
835} 1477}
836 1478
1479/**
1480 * aead_request_free() - zeroize and free request data structure
1481 * @req: request data structure cipher handle to be freed
1482 */
837static inline void aead_request_free(struct aead_request *req) 1483static inline void aead_request_free(struct aead_request *req)
838{ 1484{
839 kzfree(req); 1485 kzfree(req);
840} 1486}
841 1487
1488/**
1489 * aead_request_set_callback() - set asynchronous callback function
1490 * @req: request handle
1491 * @flags: specify zero or an ORing of the flags
1492 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1493 * increase the wait queue beyond the initial maximum size;
1494 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1495 * @compl: callback function pointer to be registered with the request handle
1496 * @data: The data pointer refers to memory that is not used by the kernel
1497 * crypto API, but provided to the callback function for it to use. Here,
1498 * the caller can provide a reference to memory the callback function can
1499 * operate on. As the callback function is invoked asynchronously to the
1500 * related functionality, it may need to access data structures of the
1501 * related functionality which can be referenced using this pointer. The
1502 * callback function can access the memory via the "data" field in the
1503 * crypto_async_request data structure provided to the callback function.
1504 *
1505 * Setting the callback function that is triggered once the cipher operation
1506 * completes
1507 *
1508 * The callback function is registered with the aead_request handle and
1509 * must comply with the following template:
1510 *
1511 * void callback_function(struct crypto_async_request *req, int error)
1512 */
842static inline void aead_request_set_callback(struct aead_request *req, 1513static inline void aead_request_set_callback(struct aead_request *req,
843 u32 flags, 1514 u32 flags,
844 crypto_completion_t compl, 1515 crypto_completion_t compl,
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req,
849 req->base.flags = flags; 1520 req->base.flags = flags;
850} 1521}
851 1522
1523/**
1524 * aead_request_set_crypt - set data buffers
1525 * @req: request handle
1526 * @src: source scatter / gather list
1527 * @dst: destination scatter / gather list
1528 * @cryptlen: number of bytes to process from @src
1529 * @iv: IV for the cipher operation which must comply with the IV size defined
1530 * by crypto_aead_ivsize()
1531 *
1532 * Setting the source data and destination data scatter / gather lists.
1533 *
1534 * For encryption, the source is treated as the plaintext and the
1535 * destination is the ciphertext. For a decryption operation, the use is
1536 * reversed: the source is the ciphertext and the destination is the plaintext.
1537 *
1538 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
1539 * the caller must concatenate the ciphertext followed by the
1540 * authentication tag and provide the entire data stream to the
1541 * decryption operation (i.e. the data length used for the
1542 * initialization of the scatterlist and the data length for the
1543 * decryption operation is identical). For encryption, however,
1544 * the authentication tag is created while encrypting the data.
1545 * The destination buffer must hold sufficient space for the
1546 * ciphertext and the authentication tag while the encryption
1547 * invocation must only point to the plaintext data size. The
1548 * following code snippet illustrates the memory usage
1549 * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
1550 * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
1551 * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
1552 */
852static inline void aead_request_set_crypt(struct aead_request *req, 1553static inline void aead_request_set_crypt(struct aead_request *req,
853 struct scatterlist *src, 1554 struct scatterlist *src,
854 struct scatterlist *dst, 1555 struct scatterlist *dst,
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req,
860 req->iv = iv; 1561 req->iv = iv;
861} 1562}
862 1563
1564/**
1565 * aead_request_set_assoc() - set the associated data scatter / gather list
1566 * @req: request handle
1567 * @assoc: associated data scatter / gather list
1568 * @assoclen: number of bytes to process from @assoc
1569 *
1570 * For encryption, the memory is filled with the associated data. For
1571 * decryption, the memory must point to the associated data.
1572 */
863static inline void aead_request_set_assoc(struct aead_request *req, 1573static inline void aead_request_set_assoc(struct aead_request *req,
864 struct scatterlist *assoc, 1574 struct scatterlist *assoc,
865 unsigned int assoclen) 1575 unsigned int assoclen)
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req,
868 req->assoclen = assoclen; 1578 req->assoclen = assoclen;
869} 1579}
870 1580
1581/**
1582 * DOC: Synchronous Block Cipher API
1583 *
1584 * The synchronous block cipher API is used with the ciphers of type
1585 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1586 *
1587 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1588 * used in multiple calls and in parallel, this info should not be changeable
1589 * (unless a lock is used). This applies, for example, to the symmetric key.
1590 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1591 * structure for synchronous blkcipher api. So, its the only state info that can
1592 * be kept for synchronous calls without using a big lock across a tfm.
1593 *
1594 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1595 * consisting of a template (a block chaining mode) and a single block cipher
1596 * primitive (e.g. AES).
1597 *
1598 * The plaintext data buffer and the ciphertext data buffer are pointed to
1599 * by using scatter/gather lists. The cipher operation is performed
1600 * on all segments of the provided scatter/gather lists.
1601 *
1602 * The kernel crypto API supports a cipher operation "in-place" which means that
1603 * the caller may provide the same scatter/gather list for the plaintext and
1604 * cipher text. After the completion of the cipher operation, the plaintext
1605 * data is replaced with the ciphertext data in case of an encryption and vice
1606 * versa for a decryption. The caller must ensure that the scatter/gather lists
1607 * for the output data point to sufficiently large buffers, i.e. multiples of
1608 * the block size of the cipher.
1609 */
1610
871static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 1611static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
872 struct crypto_tfm *tfm) 1612 struct crypto_tfm *tfm)
873{ 1613{
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
881 return __crypto_blkcipher_cast(tfm); 1621 return __crypto_blkcipher_cast(tfm);
882} 1622}
883 1623
1624/**
1625 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1626 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1627 * blkcipher cipher
1628 * @type: specifies the type of the cipher
1629 * @mask: specifies the mask for the cipher
1630 *
1631 * Allocate a cipher handle for a block cipher. The returned struct
1632 * crypto_blkcipher is the cipher handle that is required for any subsequent
1633 * API invocation for that block cipher.
1634 *
1635 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1636 * of an error, PTR_ERR() returns the error code.
1637 */
884static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 1638static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
885 const char *alg_name, u32 type, u32 mask) 1639 const char *alg_name, u32 type, u32 mask)
886{ 1640{
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm(
897 return &tfm->base; 1651 return &tfm->base;
898} 1652}
899 1653
1654/**
1655 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1656 * @tfm: cipher handle to be freed
1657 */
900static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 1658static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
901{ 1659{
902 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 1660 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
903} 1661}
904 1662
1663/**
1664 * crypto_has_blkcipher() - Search for the availability of a block cipher
1665 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1666 * block cipher
1667 * @type: specifies the type of the cipher
1668 * @mask: specifies the mask for the cipher
1669 *
1670 * Return: true when the block cipher is known to the kernel crypto API; false
1671 * otherwise
1672 */
905static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 1673static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
906{ 1674{
907 type &= ~CRYPTO_ALG_TYPE_MASK; 1675 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
911 return crypto_has_alg(alg_name, type, mask); 1679 return crypto_has_alg(alg_name, type, mask);
912} 1680}
913 1681
1682/**
1683 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1684 * @tfm: cipher handle
1685 *
1686 * Return: The character string holding the name of the cipher
1687 */
914static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 1688static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
915{ 1689{
916 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 1690 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg(
928 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 1702 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
929} 1703}
930 1704
1705/**
1706 * crypto_blkcipher_ivsize() - obtain IV size
1707 * @tfm: cipher handle
1708 *
1709 * The size of the IV for the block cipher referenced by the cipher handle is
1710 * returned. This IV size may be zero if the cipher does not need an IV.
1711 *
1712 * Return: IV size in bytes
1713 */
931static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 1714static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
932{ 1715{
933 return crypto_blkcipher_alg(tfm)->ivsize; 1716 return crypto_blkcipher_alg(tfm)->ivsize;
934} 1717}
935 1718
1719/**
1720 * crypto_blkcipher_blocksize() - obtain block size of cipher
1721 * @tfm: cipher handle
1722 *
1723 * The block size for the block cipher referenced with the cipher handle is
1724 * returned. The caller may use that information to allocate appropriate
1725 * memory for the data returned by the encryption or decryption operation.
1726 *
1727 * Return: block size of cipher
1728 */
936static inline unsigned int crypto_blkcipher_blocksize( 1729static inline unsigned int crypto_blkcipher_blocksize(
937 struct crypto_blkcipher *tfm) 1730 struct crypto_blkcipher *tfm)
938{ 1731{
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
962 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 1755 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
963} 1756}
964 1757
1758/**
1759 * crypto_blkcipher_setkey() - set key for cipher
1760 * @tfm: cipher handle
1761 * @key: buffer holding the key
1762 * @keylen: length of the key in bytes
1763 *
1764 * The caller provided key is set for the block cipher referenced by the cipher
1765 * handle.
1766 *
1767 * Note, the key length determines the cipher type. Many block ciphers implement
1768 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1769 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1770 * is performed.
1771 *
1772 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1773 */
965static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 1774static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
966 const u8 *key, unsigned int keylen) 1775 const u8 *key, unsigned int keylen)
967{ 1776{
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
969 key, keylen); 1778 key, keylen);
970} 1779}
971 1780
1781/**
1782 * crypto_blkcipher_encrypt() - encrypt plaintext
1783 * @desc: reference to the block cipher handle with meta data
1784 * @dst: scatter/gather list that is filled by the cipher operation with the
1785 * ciphertext
1786 * @src: scatter/gather list that holds the plaintext
1787 * @nbytes: number of bytes of the plaintext to encrypt.
1788 *
1789 * Encrypt plaintext data using the IV set by the caller with a preceding
1790 * call of crypto_blkcipher_set_iv.
1791 *
1792 * The blkcipher_desc data structure must be filled by the caller and can
1793 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1794 * with the block cipher handle; desc.flags is filled with either
1795 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1796 *
1797 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1798 */
972static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 1799static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
973 struct scatterlist *dst, 1800 struct scatterlist *dst,
974 struct scatterlist *src, 1801 struct scatterlist *src,
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
978 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1805 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
979} 1806}
980 1807
1808/**
1809 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1810 * @desc: reference to the block cipher handle with meta data
1811 * @dst: scatter/gather list that is filled by the cipher operation with the
1812 * ciphertext
1813 * @src: scatter/gather list that holds the plaintext
1814 * @nbytes: number of bytes of the plaintext to encrypt.
1815 *
1816 * Encrypt plaintext data with the use of an IV that is solely used for this
1817 * cipher operation. Any previously set IV is not used.
1818 *
1819 * The blkcipher_desc data structure must be filled by the caller and can
1820 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1821 * with the block cipher handle; desc.info is filled with the IV to be used for
1822 * the current operation; desc.flags is filled with either
1823 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1824 *
1825 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1826 */
981static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 1827static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
982 struct scatterlist *dst, 1828 struct scatterlist *dst,
983 struct scatterlist *src, 1829 struct scatterlist *src,
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
986 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1832 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
987} 1833}
988 1834
1835/**
1836 * crypto_blkcipher_decrypt() - decrypt ciphertext
1837 * @desc: reference to the block cipher handle with meta data
1838 * @dst: scatter/gather list that is filled by the cipher operation with the
1839 * plaintext
1840 * @src: scatter/gather list that holds the ciphertext
1841 * @nbytes: number of bytes of the ciphertext to decrypt.
1842 *
1843 * Decrypt ciphertext data using the IV set by the caller with a preceding
1844 * call of crypto_blkcipher_set_iv.
1845 *
1846 * The blkcipher_desc data structure must be filled by the caller as documented
1847 * for the crypto_blkcipher_encrypt call above.
1848 *
1849 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1850 *
1851 */
989static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 1852static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
990 struct scatterlist *dst, 1853 struct scatterlist *dst,
991 struct scatterlist *src, 1854 struct scatterlist *src,
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
995 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1858 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
996} 1859}
997 1860
1861/**
1862 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1863 * @desc: reference to the block cipher handle with meta data
1864 * @dst: scatter/gather list that is filled by the cipher operation with the
1865 * plaintext
1866 * @src: scatter/gather list that holds the ciphertext
1867 * @nbytes: number of bytes of the ciphertext to decrypt.
1868 *
1869 * Decrypt ciphertext data with the use of an IV that is solely used for this
1870 * cipher operation. Any previously set IV is not used.
1871 *
1872 * The blkcipher_desc data structure must be filled by the caller as documented
1873 * for the crypto_blkcipher_encrypt_iv call above.
1874 *
1875 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1876 */
998static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 1877static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
999 struct scatterlist *dst, 1878 struct scatterlist *dst,
1000 struct scatterlist *src, 1879 struct scatterlist *src,
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1003 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1882 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1004} 1883}
1005 1884
1885/**
1886 * crypto_blkcipher_set_iv() - set IV for cipher
1887 * @tfm: cipher handle
1888 * @src: buffer holding the IV
1889 * @len: length of the IV in bytes
1890 *
1891 * The caller provided IV is set for the block cipher referenced by the cipher
1892 * handle.
1893 */
1006static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 1894static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1007 const u8 *src, unsigned int len) 1895 const u8 *src, unsigned int len)
1008{ 1896{
1009 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 1897 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1010} 1898}
1011 1899
1900/**
1901 * crypto_blkcipher_get_iv() - obtain IV from cipher
1902 * @tfm: cipher handle
1903 * @dst: buffer filled with the IV
1904 * @len: length of the buffer dst
1905 *
1906 * The caller can obtain the IV set for the block cipher referenced by the
1907 * cipher handle and store it into the user-provided buffer. If the buffer
1908 * has an insufficient space, the IV is truncated to fit the buffer.
1909 */
1012static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 1910static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1013 u8 *dst, unsigned int len) 1911 u8 *dst, unsigned int len)
1014{ 1912{
1015 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 1913 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1016} 1914}
1017 1915
1916/**
1917 * DOC: Single Block Cipher API
1918 *
1919 * The single block cipher API is used with the ciphers of type
1920 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1921 *
1922 * Using the single block cipher API calls, operations with the basic cipher
1923 * primitive can be implemented. These cipher primitives exclude any block
1924 * chaining operations including IV handling.
1925 *
1926 * The purpose of this single block cipher API is to support the implementation
1927 * of templates or other concepts that only need to perform the cipher operation
1928 * on one block at a time. Templates invoke the underlying cipher primitive
1929 * block-wise and process either the input or the output data of these cipher
1930 * operations.
1931 */
1932
1018static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1933static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1019{ 1934{
1020 return (struct crypto_cipher *)tfm; 1935 return (struct crypto_cipher *)tfm;
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1026 return __crypto_cipher_cast(tfm); 1941 return __crypto_cipher_cast(tfm);
1027} 1942}
1028 1943
1944/**
1945 * crypto_alloc_cipher() - allocate single block cipher handle
1946 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1947 * single block cipher
1948 * @type: specifies the type of the cipher
1949 * @mask: specifies the mask for the cipher
1950 *
1951 * Allocate a cipher handle for a single block cipher. The returned struct
1952 * crypto_cipher is the cipher handle that is required for any subsequent API
1953 * invocation for that single block cipher.
1954 *
1955 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1956 * of an error, PTR_ERR() returns the error code.
1957 */
1029static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1958static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1030 u32 type, u32 mask) 1959 u32 type, u32 mask)
1031{ 1960{
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1041 return &tfm->base; 1970 return &tfm->base;
1042} 1971}
1043 1972
1973/**
1974 * crypto_free_cipher() - zeroize and free the single block cipher handle
1975 * @tfm: cipher handle to be freed
1976 */
1044static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1977static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1045{ 1978{
1046 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1979 crypto_free_tfm(crypto_cipher_tfm(tfm));
1047} 1980}
1048 1981
1982/**
1983 * crypto_has_cipher() - Search for the availability of a single block cipher
1984 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1985 * single block cipher
1986 * @type: specifies the type of the cipher
1987 * @mask: specifies the mask for the cipher
1988 *
1989 * Return: true when the single block cipher is known to the kernel crypto API;
1990 * false otherwise
1991 */
1049static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1992static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1050{ 1993{
1051 type &= ~CRYPTO_ALG_TYPE_MASK; 1994 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1060 return &crypto_cipher_tfm(tfm)->crt_cipher; 2003 return &crypto_cipher_tfm(tfm)->crt_cipher;
1061} 2004}
1062 2005
2006/**
2007 * crypto_cipher_blocksize() - obtain block size for cipher
2008 * @tfm: cipher handle
2009 *
2010 * The block size for the single block cipher referenced with the cipher handle
2011 * tfm is returned. The caller may use that information to allocate appropriate
2012 * memory for the data returned by the encryption or decryption operation
2013 *
2014 * Return: block size of cipher
2015 */
1063static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 2016static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1064{ 2017{
1065 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 2018 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1087 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 2040 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1088} 2041}
1089 2042
2043/**
2044 * crypto_cipher_setkey() - set key for cipher
2045 * @tfm: cipher handle
2046 * @key: buffer holding the key
2047 * @keylen: length of the key in bytes
2048 *
2049 * The caller provided key is set for the single block cipher referenced by the
2050 * cipher handle.
2051 *
2052 * Note, the key length determines the cipher type. Many block ciphers implement
2053 * different cipher modes depending on the key size, such as AES-128 vs AES-192
2054 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
2055 * is performed.
2056 *
2057 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2058 */
1090static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 2059static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1091 const u8 *key, unsigned int keylen) 2060 const u8 *key, unsigned int keylen)
1092{ 2061{
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1094 key, keylen); 2063 key, keylen);
1095} 2064}
1096 2065
2066/**
2067 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
2068 * @tfm: cipher handle
2069 * @dst: points to the buffer that will be filled with the ciphertext
2070 * @src: buffer holding the plaintext to be encrypted
2071 *
2072 * Invoke the encryption operation of one block. The caller must ensure that
2073 * the plaintext and ciphertext buffers are at least one block in size.
2074 */
1097static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 2075static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1098 u8 *dst, const u8 *src) 2076 u8 *dst, const u8 *src)
1099{ 2077{
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1101 dst, src); 2079 dst, src);
1102} 2080}
1103 2081
2082/**
2083 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
2084 * @tfm: cipher handle
2085 * @dst: points to the buffer that will be filled with the plaintext
2086 * @src: buffer holding the ciphertext to be decrypted
2087 *
2088 * Invoke the decryption operation of one block. The caller must ensure that
2089 * the plaintext and ciphertext buffers are at least one block in size.
2090 */
1104static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 2091static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1105 u8 *dst, const u8 *src) 2092 u8 *dst, const u8 *src)
1106{ 2093{
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1108 dst, src); 2095 dst, src);
1109} 2096}
1110 2097
2098/**
2099 * DOC: Synchronous Message Digest API
2100 *
2101 * The synchronous message digest API is used with the ciphers of type
2102 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
2103 */
2104
1111static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) 2105static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
1112{ 2106{
1113 return (struct crypto_hash *)tfm; 2107 return (struct crypto_hash *)tfm;
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
1120 return __crypto_hash_cast(tfm); 2114 return __crypto_hash_cast(tfm);
1121} 2115}
1122 2116
2117/**
2118 * crypto_alloc_hash() - allocate synchronous message digest handle
2119 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2120 * message digest cipher
2121 * @type: specifies the type of the cipher
2122 * @mask: specifies the mask for the cipher
2123 *
2124 * Allocate a cipher handle for a message digest. The returned struct
2125 * crypto_hash is the cipher handle that is required for any subsequent
2126 * API invocation for that message digest.
2127 *
2128 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
2129 * of an error, PTR_ERR() returns the error code.
2130 */
1123static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, 2131static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
1124 u32 type, u32 mask) 2132 u32 type, u32 mask)
1125{ 2133{
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
1136 return &tfm->base; 2144 return &tfm->base;
1137} 2145}
1138 2146
2147/**
2148 * crypto_free_hash() - zeroize and free message digest handle
2149 * @tfm: cipher handle to be freed
2150 */
1139static inline void crypto_free_hash(struct crypto_hash *tfm) 2151static inline void crypto_free_hash(struct crypto_hash *tfm)
1140{ 2152{
1141 crypto_free_tfm(crypto_hash_tfm(tfm)); 2153 crypto_free_tfm(crypto_hash_tfm(tfm));
1142} 2154}
1143 2155
2156/**
2157 * crypto_has_hash() - Search for the availability of a message digest
2158 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2159 * message digest cipher
2160 * @type: specifies the type of the cipher
2161 * @mask: specifies the mask for the cipher
2162 *
2163 * Return: true when the message digest cipher is known to the kernel crypto
2164 * API; false otherwise
2165 */
1144static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) 2166static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
1145{ 2167{
1146 type &= ~CRYPTO_ALG_TYPE_MASK; 2168 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
1156 return &crypto_hash_tfm(tfm)->crt_hash; 2178 return &crypto_hash_tfm(tfm)->crt_hash;
1157} 2179}
1158 2180
2181/**
2182 * crypto_hash_blocksize() - obtain block size for message digest
2183 * @tfm: cipher handle
2184 *
2185 * The block size for the message digest cipher referenced with the cipher
2186 * handle is returned.
2187 *
2188 * Return: block size of cipher
2189 */
1159static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) 2190static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
1160{ 2191{
1161 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); 2192 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
1166 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); 2197 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
1167} 2198}
1168 2199
2200/**
2201 * crypto_hash_digestsize() - obtain message digest size
2202 * @tfm: cipher handle
2203 *
2204 * The size for the message digest created by the message digest cipher
2205 * referenced with the cipher handle is returned.
2206 *
2207 * Return: message digest size
2208 */
1169static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) 2209static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
1170{ 2210{
1171 return crypto_hash_crt(tfm)->digestsize; 2211 return crypto_hash_crt(tfm)->digestsize;
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
1186 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); 2226 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
1187} 2227}
1188 2228
2229/**
2230 * crypto_hash_init() - (re)initialize message digest handle
2231 * @desc: cipher request handle that to be filled by caller --
2232 * desc.tfm is filled with the hash cipher handle;
2233 * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
2234 *
2235 * The call (re-)initializes the message digest referenced by the hash cipher
2236 * request handle. Any potentially existing state created by previous
2237 * operations is discarded.
2238 *
2239 * Return: 0 if the message digest initialization was successful; < 0 if an
2240 * error occurred
2241 */
1189static inline int crypto_hash_init(struct hash_desc *desc) 2242static inline int crypto_hash_init(struct hash_desc *desc)
1190{ 2243{
1191 return crypto_hash_crt(desc->tfm)->init(desc); 2244 return crypto_hash_crt(desc->tfm)->init(desc);
1192} 2245}
1193 2246
2247/**
2248 * crypto_hash_update() - add data to message digest for processing
2249 * @desc: cipher request handle
2250 * @sg: scatter / gather list pointing to the data to be added to the message
2251 * digest
2252 * @nbytes: number of bytes to be processed from @sg
2253 *
2254 * Updates the message digest state of the cipher handle pointed to by the
2255 * hash cipher request handle with the input data pointed to by the
2256 * scatter/gather list.
2257 *
2258 * Return: 0 if the message digest update was successful; < 0 if an error
2259 * occurred
2260 */
1194static inline int crypto_hash_update(struct hash_desc *desc, 2261static inline int crypto_hash_update(struct hash_desc *desc,
1195 struct scatterlist *sg, 2262 struct scatterlist *sg,
1196 unsigned int nbytes) 2263 unsigned int nbytes)
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc,
1198 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); 2265 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
1199} 2266}
1200 2267
2268/**
2269 * crypto_hash_final() - calculate message digest
2270 * @desc: cipher request handle
2271 * @out: message digest output buffer -- The caller must ensure that the out
2272 * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
2273 * function).
2274 *
2275 * Finalize the message digest operation and create the message digest
2276 * based on all data added to the cipher handle. The message digest is placed
2277 * into the output buffer.
2278 *
2279 * Return: 0 if the message digest creation was successful; < 0 if an error
2280 * occurred
2281 */
1201static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) 2282static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
1202{ 2283{
1203 return crypto_hash_crt(desc->tfm)->final(desc, out); 2284 return crypto_hash_crt(desc->tfm)->final(desc, out);
1204} 2285}
1205 2286
2287/**
2288 * crypto_hash_digest() - calculate message digest for a buffer
2289 * @desc: see crypto_hash_final()
2290 * @sg: see crypto_hash_update()
2291 * @nbytes: see crypto_hash_update()
2292 * @out: see crypto_hash_final()
2293 *
2294 * This function is a "short-hand" for the function calls of crypto_hash_init,
2295 * crypto_hash_update and crypto_hash_final. The parameters have the same
2296 * meaning as discussed for those separate three functions.
2297 *
2298 * Return: 0 if the message digest creation was successful; < 0 if an error
2299 * occurred
2300 */
1206static inline int crypto_hash_digest(struct hash_desc *desc, 2301static inline int crypto_hash_digest(struct hash_desc *desc,
1207 struct scatterlist *sg, 2302 struct scatterlist *sg,
1208 unsigned int nbytes, u8 *out) 2303 unsigned int nbytes, u8 *out)
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc,
1210 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); 2305 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
1211} 2306}
1212 2307
2308/**
2309 * crypto_hash_setkey() - set key for message digest
2310 * @hash: cipher handle
2311 * @key: buffer holding the key
2312 * @keylen: length of the key in bytes
2313 *
2314 * The caller provided key is set for the message digest cipher. The cipher
2315 * handle must point to a keyed hash in order for this function to succeed.
2316 *
2317 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2318 */
1213static inline int crypto_hash_setkey(struct crypto_hash *hash, 2319static inline int crypto_hash_setkey(struct crypto_hash *hash,
1214 const u8 *key, unsigned int keylen) 2320 const u8 *key, unsigned int keylen)
1215{ 2321{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b2a2a08523bf..5a813988e6d4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -124,15 +124,15 @@ struct dentry {
124 void *d_fsdata; /* fs-specific data */ 124 void *d_fsdata; /* fs-specific data */
125 125
126 struct list_head d_lru; /* LRU list */ 126 struct list_head d_lru; /* LRU list */
127 struct list_head d_child; /* child of parent list */
128 struct list_head d_subdirs; /* our children */
127 /* 129 /*
128 * d_child and d_rcu can share memory 130 * d_alias and d_rcu can share memory
129 */ 131 */
130 union { 132 union {
131 struct list_head d_child; /* child of parent list */ 133 struct hlist_node d_alias; /* inode alias list */
132 struct rcu_head d_rcu; 134 struct rcu_head d_rcu;
133 } d_u; 135 } d_u;
134 struct list_head d_subdirs; /* our children */
135 struct hlist_node d_alias; /* inode alias list */
136}; 136};
137 137
138/* 138/*
@@ -230,7 +230,6 @@ extern seqlock_t rename_lock;
230 */ 230 */
231extern void d_instantiate(struct dentry *, struct inode *); 231extern void d_instantiate(struct dentry *, struct inode *);
232extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 232extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
233extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
234extern int d_instantiate_no_diralias(struct dentry *, struct inode *); 233extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
235extern void __d_drop(struct dentry *dentry); 234extern void __d_drop(struct dentry *dentry);
236extern void d_drop(struct dentry *dentry); 235extern void d_drop(struct dentry *dentry);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 4d0b4d1aa132..d84f8c254a87 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -92,8 +92,8 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
92 struct dentry *parent, 92 struct dentry *parent,
93 struct debugfs_regset32 *regset); 93 struct debugfs_regset32 *regset);
94 94
95int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, 95void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
96 int nregs, void __iomem *base, char *prefix); 96 int nregs, void __iomem *base, char *prefix);
97 97
98struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, 98struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
99 struct dentry *parent, 99 struct dentry *parent,
@@ -233,10 +233,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name,
233 return ERR_PTR(-ENODEV); 233 return ERR_PTR(-ENODEV);
234} 234}
235 235
236static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, 236static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
237 int nregs, void __iomem *base, char *prefix) 237 int nregs, void __iomem *base, char *prefix)
238{ 238{
239 return 0;
240} 239}
241 240
242static inline bool debugfs_initialized(void) 241static inline bool debugfs_initialized(void)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e1707de043ae..ca6d2acc5eb7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -64,6 +64,7 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti,
64 union map_info *map_context); 64 union map_info *map_context);
65 65
66typedef void (*dm_presuspend_fn) (struct dm_target *ti); 66typedef void (*dm_presuspend_fn) (struct dm_target *ti);
67typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
67typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 68typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
68typedef int (*dm_preresume_fn) (struct dm_target *ti); 69typedef int (*dm_preresume_fn) (struct dm_target *ti);
69typedef void (*dm_resume_fn) (struct dm_target *ti); 70typedef void (*dm_resume_fn) (struct dm_target *ti);
@@ -145,6 +146,7 @@ struct target_type {
145 dm_endio_fn end_io; 146 dm_endio_fn end_io;
146 dm_request_endio_fn rq_end_io; 147 dm_request_endio_fn rq_end_io;
147 dm_presuspend_fn presuspend; 148 dm_presuspend_fn presuspend;
149 dm_presuspend_undo_fn presuspend_undo;
148 dm_postsuspend_fn postsuspend; 150 dm_postsuspend_fn postsuspend;
149 dm_preresume_fn preresume; 151 dm_preresume_fn preresume;
150 dm_resume_fn resume; 152 dm_resume_fn resume;
diff --git a/include/linux/device.h b/include/linux/device.h
index ce1f21608b16..41d6a7555c6b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev)
911 mutex_unlock(&dev->mutex); 911 mutex_unlock(&dev->mutex);
912} 912}
913 913
914static inline void device_lock_assert(struct device *dev)
915{
916 lockdep_assert_held(&dev->mutex);
917}
918
914void driver_init(void); 919void driver_init(void);
915 920
916/* 921/*
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 653a1fd07ae8..40cd75e21ea2 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -447,7 +447,8 @@ struct dmaengine_unmap_data {
447 * communicate status 447 * communicate status
448 * @phys: physical address of the descriptor 448 * @phys: physical address of the descriptor
449 * @chan: target channel for this operation 449 * @chan: target channel for this operation
450 * @tx_submit: set the prepared descriptor(s) to be executed by the engine 450 * @tx_submit: accept the descriptor, assign ordered cookie and mark the
451 * descriptor pending. To be pushed on .issue_pending() call
451 * @callback: routine to call after this operation is complete 452 * @callback: routine to call after this operation is complete
452 * @callback_param: general parameter to pass to the callback routine 453 * @callback_param: general parameter to pass to the callback routine
453 * ---async_tx api specific fields--- 454 * ---async_tx api specific fields---
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 593fff99e6bf..30624954dec5 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -30,6 +30,12 @@
30 30
31struct acpi_dmar_header; 31struct acpi_dmar_header;
32 32
33#ifdef CONFIG_X86
34# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
35#else
36# define DMAR_UNITS_SUPPORTED 64
37#endif
38
33/* DMAR Flags */ 39/* DMAR Flags */
34#define DMAR_INTR_REMAP 0x1 40#define DMAR_INTR_REMAP 0x1
35#define DMAR_X2APIC_OPT_OUT 0x2 41#define DMAR_X2APIC_OPT_OUT 0x2
@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
120/* Intel IOMMU detection */ 126/* Intel IOMMU detection */
121extern int detect_intel_iommu(void); 127extern int detect_intel_iommu(void);
122extern int enable_drhd_fault_handling(void); 128extern int enable_drhd_fault_handling(void);
129extern int dmar_device_add(acpi_handle handle);
130extern int dmar_device_remove(acpi_handle handle);
131
132static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
133{
134 return 0;
135}
123 136
124#ifdef CONFIG_INTEL_IOMMU 137#ifdef CONFIG_INTEL_IOMMU
125extern int iommu_detected, no_iommu; 138extern int iommu_detected, no_iommu;
126extern int intel_iommu_init(void); 139extern int intel_iommu_init(void);
127extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); 140extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
128extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); 141extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
142extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
143extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
144extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
129extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); 145extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
130#else /* !CONFIG_INTEL_IOMMU: */ 146#else /* !CONFIG_INTEL_IOMMU: */
131static inline int intel_iommu_init(void) { return -ENODEV; } 147static inline int intel_iommu_init(void) { return -ENODEV; }
132static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) 148
149#define dmar_parse_one_rmrr dmar_res_noop
150#define dmar_parse_one_atsr dmar_res_noop
151#define dmar_check_one_atsr dmar_res_noop
152#define dmar_release_one_atsr dmar_res_noop
153
154static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
133{ 155{
134 return 0; 156 return 0;
135} 157}
136static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) 158
159static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
137{ 160{
138 return 0; 161 return 0;
139} 162}
140static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) 163#endif /* CONFIG_INTEL_IOMMU */
164
165#ifdef CONFIG_IRQ_REMAP
166extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
167#else /* CONFIG_IRQ_REMAP */
168static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
169{ return 0; }
170#endif /* CONFIG_IRQ_REMAP */
171
172#else /* CONFIG_DMAR_TABLE */
173
174static inline int dmar_device_add(void *handle)
175{
176 return 0;
177}
178
179static inline int dmar_device_remove(void *handle)
141{ 180{
142 return 0; 181 return 0;
143} 182}
144#endif /* CONFIG_INTEL_IOMMU */
145 183
146#endif /* CONFIG_DMAR_TABLE */ 184#endif /* CONFIG_DMAR_TABLE */
147 185
diff --git a/include/linux/edac.h b/include/linux/edac.h
index e1e68da6f35c..da3b72e95db3 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -194,7 +194,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
194 * @MEM_DDR3: DDR3 RAM 194 * @MEM_DDR3: DDR3 RAM
195 * @MEM_RDDR3: Registered DDR3 RAM 195 * @MEM_RDDR3: Registered DDR3 RAM
196 * This is a variant of the DDR3 memories. 196 * This is a variant of the DDR3 memories.
197 * @MEM_DDR4: DDR4 RAM 197 * @MEM_LRDDR3 Load-Reduced DDR3 memory.
198 * @MEM_DDR4: Unbuffered DDR4 RAM
198 * @MEM_RDDR4: Registered DDR4 RAM 199 * @MEM_RDDR4: Registered DDR4 RAM
199 * This is a variant of the DDR4 memories. 200 * This is a variant of the DDR4 memories.
200 */ 201 */
@@ -216,6 +217,7 @@ enum mem_type {
216 MEM_XDR, 217 MEM_XDR,
217 MEM_DDR3, 218 MEM_DDR3,
218 MEM_RDDR3, 219 MEM_RDDR3,
220 MEM_LRDDR3,
219 MEM_DDR4, 221 MEM_DDR4,
220 MEM_RDDR4, 222 MEM_RDDR4,
221}; 223};
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index e50f98b0297a..eb0b1988050a 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
75 const u8 word, u16 *data); 75 const u8 word, u16 *data);
76extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, 76extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
77 const u8 word, __le16 *data, const u16 words); 77 const u8 word, __le16 *data, const u16 words);
78extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom,
79 const u8 byte, u8 *data);
80extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom,
81 const u8 byte, u8 *data, const u16 bytes);
78 82
79extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); 83extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
80 84
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 45cb4ffdea62..0238d612750e 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -92,6 +92,7 @@ typedef struct {
92#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */ 92#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
93#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */ 93#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
94#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */ 94#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
95#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
95#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ 96#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
96#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ 97#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
97#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ 98#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
@@ -502,6 +503,10 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
502typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, 503typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
503 u32 attr, unsigned long data_size, 504 u32 attr, unsigned long data_size,
504 void *data); 505 void *data);
506typedef efi_status_t
507efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
508 u32 attr, unsigned long data_size, void *data);
509
505typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); 510typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
506typedef void efi_reset_system_t (int reset_type, efi_status_t status, 511typedef void efi_reset_system_t (int reset_type, efi_status_t status,
507 unsigned long data_size, efi_char16_t *data); 512 unsigned long data_size, efi_char16_t *data);
@@ -542,6 +547,9 @@ void efi_native_runtime_setup(void);
542#define SMBIOS_TABLE_GUID \ 547#define SMBIOS_TABLE_GUID \
543 EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 548 EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
544 549
550#define SMBIOS3_TABLE_GUID \
551 EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 )
552
545#define SAL_SYSTEM_TABLE_GUID \ 553#define SAL_SYSTEM_TABLE_GUID \
546 EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 554 EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
547 555
@@ -805,7 +813,8 @@ extern struct efi {
805 unsigned long mps; /* MPS table */ 813 unsigned long mps; /* MPS table */
806 unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ 814 unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
807 unsigned long acpi20; /* ACPI table (ACPI 2.0) */ 815 unsigned long acpi20; /* ACPI table (ACPI 2.0) */
808 unsigned long smbios; /* SM BIOS table */ 816 unsigned long smbios; /* SMBIOS table (32 bit entry point) */
817 unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
809 unsigned long sal_systab; /* SAL system table */ 818 unsigned long sal_systab; /* SAL system table */
810 unsigned long boot_info; /* boot info table */ 819 unsigned long boot_info; /* boot info table */
811 unsigned long hcdp; /* HCDP table */ 820 unsigned long hcdp; /* HCDP table */
@@ -821,6 +830,7 @@ extern struct efi {
821 efi_get_variable_t *get_variable; 830 efi_get_variable_t *get_variable;
822 efi_get_next_variable_t *get_next_variable; 831 efi_get_next_variable_t *get_next_variable;
823 efi_set_variable_t *set_variable; 832 efi_set_variable_t *set_variable;
833 efi_set_variable_nonblocking_t *set_variable_nonblocking;
824 efi_query_variable_info_t *query_variable_info; 834 efi_query_variable_info_t *query_variable_info;
825 efi_update_capsule_t *update_capsule; 835 efi_update_capsule_t *update_capsule;
826 efi_query_capsule_caps_t *query_capsule_caps; 836 efi_query_capsule_caps_t *query_capsule_caps;
@@ -886,6 +896,13 @@ extern bool efi_poweroff_required(void);
886 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ 896 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
887 (md) = (void *)(md) + (m)->desc_size) 897 (md) = (void *)(md) + (m)->desc_size)
888 898
899/*
900 * Format an EFI memory descriptor's type and attributes to a user-provided
901 * character buffer, as per snprintf(), and return the buffer.
902 */
903char * __init efi_md_typeattr_format(char *buf, size_t size,
904 const efi_memory_desc_t *md);
905
889/** 906/**
890 * efi_range_is_wc - check the WC bit on an address range 907 * efi_range_is_wc - check the WC bit on an address range
891 * @start: starting kvirt address 908 * @start: starting kvirt address
@@ -1034,6 +1051,7 @@ struct efivar_operations {
1034 efi_get_variable_t *get_variable; 1051 efi_get_variable_t *get_variable;
1035 efi_get_next_variable_t *get_next_variable; 1052 efi_get_next_variable_t *get_next_variable;
1036 efi_set_variable_t *set_variable; 1053 efi_set_variable_t *set_variable;
1054 efi_set_variable_nonblocking_t *set_variable_nonblocking;
1037 efi_query_variable_store_t *query_variable_store; 1055 efi_query_variable_store_t *query_variable_store;
1038}; 1056};
1039 1057
@@ -1227,4 +1245,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
1227 unsigned long *load_addr, 1245 unsigned long *load_addr,
1228 unsigned long *load_size); 1246 unsigned long *load_size);
1229 1247
1248efi_status_t efi_parse_options(char *cmdline);
1249
1250bool efi_runtime_disabled(void);
1230#endif /* _LINUX_EFI_H */ 1251#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 67a5fa7830c4..20fa8d8ae313 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -15,6 +15,11 @@
15 set_personality(PER_LINUX | (current->personality & (~PER_MASK))) 15 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
16#endif 16#endif
17 17
18#ifndef SET_PERSONALITY2
19#define SET_PERSONALITY2(ex, state) \
20 SET_PERSONALITY(ex)
21#endif
22
18#if ELF_CLASS == ELFCLASS32 23#if ELF_CLASS == ELFCLASS32
19 24
20extern Elf32_Dyn _DYNAMIC []; 25extern Elf32_Dyn _DYNAMIC [];
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 733980fce8e3..41c891d05f04 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
392#endif 392#endif
393} 393}
394 394
395/**
396 * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
397 * @skb: Buffer to pad
398 *
399 * An Ethernet frame should have a minimum size of 60 bytes. This function
400 * takes short frames and pads them with zeros up to the 60 byte limit.
401 */
402static inline int eth_skb_pad(struct sk_buff *skb)
403{
404 return skb_put_padto(skb, ETH_ZLEN);
405}
406
395#endif /* _LINUX_ETHERDEVICE_H */ 407#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c1a2d60dfb82..653dc9c4ebac 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -59,6 +59,26 @@ enum ethtool_phys_id_state {
59 ETHTOOL_ID_OFF 59 ETHTOOL_ID_OFF
60}; 60};
61 61
62enum {
63 ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
64 ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
65
66 /*
67 * Add your fresh new hash function bits above and remember to update
68 * rss_hash_func_strings[] in ethtool.c
69 */
70 ETH_RSS_HASH_FUNCS_COUNT
71};
72
73#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
74#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
75
76#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
77#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
78
79#define ETH_RSS_HASH_UNKNOWN 0
80#define ETH_RSS_HASH_NO_CHANGE 0
81
62struct net_device; 82struct net_device;
63 83
64/* Some generic methods drivers may use in their ethtool_ops */ 84/* Some generic methods drivers may use in their ethtool_ops */
@@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
158 * Returns zero if not supported for this specific device. 178 * Returns zero if not supported for this specific device.
159 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. 179 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
160 * Returns zero if not supported for this specific device. 180 * Returns zero if not supported for this specific device.
161 * @get_rxfh: Get the contents of the RX flow hash indirection table and hash 181 * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key
162 * key. 182 * and/or hash function.
163 * Will only be called if one or both of @get_rxfh_indir_size and
164 * @get_rxfh_key_size are implemented and return non-zero.
165 * Returns a negative error code or zero.
166 * @set_rxfh: Set the contents of the RX flow hash indirection table and/or
167 * hash key. In case only the indirection table or hash key is to be
168 * changed, the other argument will be %NULL.
169 * Will only be called if one or both of @get_rxfh_indir_size and
170 * @get_rxfh_key_size are implemented and return non-zero.
171 * Returns a negative error code or zero. 183 * Returns a negative error code or zero.
184 * @set_rxfh: Set the contents of the RX flow hash indirection table, hash
185 * key, and/or hash function. Arguments which are set to %NULL or zero
186 * will remain unchanged.
187 * Returns a negative error code or zero. An error code must be returned
188 * if at least one unsupported change was requested.
172 * @get_channels: Get number of channels. 189 * @get_channels: Get number of channels.
173 * @set_channels: Set number of channels. Returns a negative error code or 190 * @set_channels: Set number of channels. Returns a negative error code or
174 * zero. 191 * zero.
@@ -241,9 +258,10 @@ struct ethtool_ops {
241 int (*reset)(struct net_device *, u32 *); 258 int (*reset)(struct net_device *, u32 *);
242 u32 (*get_rxfh_key_size)(struct net_device *); 259 u32 (*get_rxfh_key_size)(struct net_device *);
243 u32 (*get_rxfh_indir_size)(struct net_device *); 260 u32 (*get_rxfh_indir_size)(struct net_device *);
244 int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); 261 int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
262 u8 *hfunc);
245 int (*set_rxfh)(struct net_device *, const u32 *indir, 263 int (*set_rxfh)(struct net_device *, const u32 *indir,
246 const u8 *key); 264 const u8 *key, const u8 hfunc);
247 void (*get_channels)(struct net_device *, struct ethtool_channels *); 265 void (*get_channels)(struct net_device *, struct ethtool_channels *);
248 int (*set_channels)(struct net_device *, struct ethtool_channels *); 266 int (*set_channels)(struct net_device *, struct ethtool_channels *);
249 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); 267 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 860313a33a43..87f14e90e984 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -33,7 +33,8 @@
33#define F2FS_META_INO(sbi) (sbi->meta_ino_num) 33#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
34 34
35/* This flag is used by node and meta inodes, and by recovery */ 35/* This flag is used by node and meta inodes, and by recovery */
36#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) 36#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
37#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
37 38
38/* 39/*
39 * For further optimization on multi-head logs, on-disk layout supports maximum 40 * For further optimization on multi-head logs, on-disk layout supports maximum
@@ -170,14 +171,12 @@ struct f2fs_extent {
170 171
171#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ 172#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */
172#define F2FS_INLINE_DATA 0x02 /* file inline data flag */ 173#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
174#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
175#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
173 176
174#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ 177#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
175 F2FS_INLINE_XATTR_ADDRS - 1)) 178 F2FS_INLINE_XATTR_ADDRS - 1))
176 179
177#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
178 sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
179 DEF_NIDS_PER_INODE - 1))
180
181struct f2fs_inode { 180struct f2fs_inode {
182 __le16 i_mode; /* file mode */ 181 __le16 i_mode; /* file mode */
183 __u8 i_advise; /* file hints */ 182 __u8 i_advise; /* file hints */
@@ -435,6 +434,24 @@ struct f2fs_dentry_block {
435 __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; 434 __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
436} __packed; 435} __packed;
437 436
437/* for inline dir */
438#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
439 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
440 BITS_PER_BYTE + 1))
441#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
442 BITS_PER_BYTE - 1) / BITS_PER_BYTE)
443#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
444 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
445 NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
446
447/* inline directory entry structure */
448struct f2fs_inline_dentry {
449 __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
450 __u8 reserved[INLINE_RESERVED_SIZE];
451 struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
452 __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
453} __packed;
454
438/* file types used in inode_info->flags */ 455/* file types used in inode_info->flags */
439enum { 456enum {
440 F2FS_FT_UNKNOWN, 457 F2FS_FT_UNKNOWN,
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index c6f996f2abb6..798fad9e420d 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/ratelimit.h>
8#include <linux/atomic.h> 9#include <linux/atomic.h>
9 10
10/* 11/*
@@ -25,14 +26,18 @@ struct fault_attr {
25 unsigned long reject_end; 26 unsigned long reject_end;
26 27
27 unsigned long count; 28 unsigned long count;
29 struct ratelimit_state ratelimit_state;
30 struct dentry *dname;
28}; 31};
29 32
30#define FAULT_ATTR_INITIALIZER { \ 33#define FAULT_ATTR_INITIALIZER { \
31 .interval = 1, \ 34 .interval = 1, \
32 .times = ATOMIC_INIT(1), \ 35 .times = ATOMIC_INIT(1), \
33 .require_end = ULONG_MAX, \ 36 .require_end = ULONG_MAX, \
34 .stacktrace_depth = 32, \ 37 .stacktrace_depth = 32, \
35 .verbose = 2, \ 38 .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
39 .verbose = 2, \
40 .dname = NULL, \
36 } 41 }
37 42
38#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER 43#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
diff --git a/include/linux/fence.h b/include/linux/fence.h
index d174585b874b..39efee130d2b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -128,8 +128,8 @@ struct fence_cb {
128 * from irq context, so normal spinlocks can be used. 128 * from irq context, so normal spinlocks can be used.
129 * 129 *
130 * A return value of false indicates the fence already passed, 130 * A return value of false indicates the fence already passed,
131 * or some failure occured that made it impossible to enable 131 * or some failure occurred that made it impossible to enable
132 * signaling. True indicates succesful enabling. 132 * signaling. True indicates successful enabling.
133 * 133 *
134 * fence->status may be set in enable_signaling, but only when false is 134 * fence->status may be set in enable_signaling, but only when false is
135 * returned. 135 * returned.
diff --git a/include/linux/file.h b/include/linux/file.h
index 4d69123377a2..f87d30882a24 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag);
66extern bool get_close_on_exec(unsigned int fd); 66extern bool get_close_on_exec(unsigned int fd);
67extern void put_filp(struct file *); 67extern void put_filp(struct file *);
68extern int get_unused_fd_flags(unsigned flags); 68extern int get_unused_fd_flags(unsigned flags);
69#define get_unused_fd() get_unused_fd_flags(0)
70extern void put_unused_fd(unsigned int fd); 69extern void put_unused_fd(unsigned int fd);
71 70
72extern void fd_install(unsigned int fd, struct file *file); 71extern void fd_install(unsigned int fd, struct file *file);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ca95abd2bed1..caac2087a4d5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
381void bpf_prog_destroy(struct bpf_prog *fp); 381void bpf_prog_destroy(struct bpf_prog *fp);
382 382
383int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 383int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
384int sk_attach_bpf(u32 ufd, struct sock *sk);
384int sk_detach_filter(struct sock *sk); 385int sk_detach_filter(struct sock *sk);
385 386
386int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); 387int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 7fd81b8c4897..6b7fd9cf5ea2 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -246,15 +246,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
246 * defined in <linux/wait.h> 246 * defined in <linux/wait.h>
247 */ 247 */
248 248
249#define wait_event_freezekillable(wq, condition) \
250({ \
251 int __retval; \
252 freezer_do_not_count(); \
253 __retval = wait_event_killable(wq, (condition)); \
254 freezer_count(); \
255 __retval; \
256})
257
258/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ 249/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
259#define wait_event_freezekillable_unsafe(wq, condition) \ 250#define wait_event_freezekillable_unsafe(wq, condition) \
260({ \ 251({ \
@@ -265,35 +256,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
265 __retval; \ 256 __retval; \
266}) 257})
267 258
268#define wait_event_freezable(wq, condition) \
269({ \
270 int __retval; \
271 freezer_do_not_count(); \
272 __retval = wait_event_interruptible(wq, (condition)); \
273 freezer_count(); \
274 __retval; \
275})
276
277#define wait_event_freezable_timeout(wq, condition, timeout) \
278({ \
279 long __retval = timeout; \
280 freezer_do_not_count(); \
281 __retval = wait_event_interruptible_timeout(wq, (condition), \
282 __retval); \
283 freezer_count(); \
284 __retval; \
285})
286
287#define wait_event_freezable_exclusive(wq, condition) \
288({ \
289 int __retval; \
290 freezer_do_not_count(); \
291 __retval = wait_event_interruptible_exclusive(wq, condition); \
292 freezer_count(); \
293 __retval; \
294})
295
296
297#else /* !CONFIG_FREEZER */ 259#else /* !CONFIG_FREEZER */
298static inline bool frozen(struct task_struct *p) { return false; } 260static inline bool frozen(struct task_struct *p) { return false; }
299static inline bool freezing(struct task_struct *p) { return false; } 261static inline bool freezing(struct task_struct *p) { return false; }
@@ -331,18 +293,6 @@ static inline void set_freezable(void) {}
331#define freezable_schedule_hrtimeout_range(expires, delta, mode) \ 293#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
332 schedule_hrtimeout_range(expires, delta, mode) 294 schedule_hrtimeout_range(expires, delta, mode)
333 295
334#define wait_event_freezable(wq, condition) \
335 wait_event_interruptible(wq, condition)
336
337#define wait_event_freezable_timeout(wq, condition, timeout) \
338 wait_event_interruptible_timeout(wq, condition, timeout)
339
340#define wait_event_freezable_exclusive(wq, condition) \
341 wait_event_interruptible_exclusive(wq, condition)
342
343#define wait_event_freezekillable(wq, condition) \
344 wait_event_killable(wq, condition)
345
346#define wait_event_freezekillable_unsafe(wq, condition) \ 296#define wait_event_freezekillable_unsafe(wq, condition) \
347 wait_event_killable(wq, condition) 297 wait_event_killable(wq, condition)
348 298
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a957d4366c24..4193a0bd99b0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -18,6 +18,7 @@
18#include <linux/pid.h> 18#include <linux/pid.h>
19#include <linux/bug.h> 19#include <linux/bug.h>
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/rwsem.h>
21#include <linux/capability.h> 22#include <linux/capability.h>
22#include <linux/semaphore.h> 23#include <linux/semaphore.h>
23#include <linux/fiemap.h> 24#include <linux/fiemap.h>
@@ -223,6 +224,13 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
223#define ATTR_TIMES_SET (1 << 16) 224#define ATTR_TIMES_SET (1 << 16)
224 225
225/* 226/*
227 * Whiteout is represented by a char device. The following constants define the
228 * mode and device number to use.
229 */
230#define WHITEOUT_MODE 0
231#define WHITEOUT_DEV 0
232
233/*
226 * This is the Inode Attributes structure, used for notify_change(). It 234 * This is the Inode Attributes structure, used for notify_change(). It
227 * uses the above definitions as flags, to know which values have changed. 235 * uses the above definitions as flags, to know which values have changed.
228 * Also, in this manner, a Filesystem can look at only the values it cares 236 * Also, in this manner, a Filesystem can look at only the values it cares
@@ -254,6 +262,12 @@ struct iattr {
254 */ 262 */
255#include <linux/quota.h> 263#include <linux/quota.h>
256 264
265/*
266 * Maximum number of layers of fs stack. Needs to be limited to
267 * prevent kernel stack overflow
268 */
269#define FILESYSTEM_MAX_STACK_DEPTH 2
270
257/** 271/**
258 * enum positive_aop_returns - aop return codes with specific semantics 272 * enum positive_aop_returns - aop return codes with specific semantics
259 * 273 *
@@ -388,7 +402,7 @@ struct address_space {
388 atomic_t i_mmap_writable;/* count VM_SHARED mappings */ 402 atomic_t i_mmap_writable;/* count VM_SHARED mappings */
389 struct rb_root i_mmap; /* tree of private and shared mappings */ 403 struct rb_root i_mmap; /* tree of private and shared mappings */
390 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 404 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
391 struct mutex i_mmap_mutex; /* protect tree, count, list */ 405 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
392 /* Protected by tree_lock together with the radix tree */ 406 /* Protected by tree_lock together with the radix tree */
393 unsigned long nrpages; /* number of total pages */ 407 unsigned long nrpages; /* number of total pages */
394 unsigned long nrshadows; /* number of shadow entries */ 408 unsigned long nrshadows; /* number of shadow entries */
@@ -454,6 +468,26 @@ struct block_device {
454 468
455int mapping_tagged(struct address_space *mapping, int tag); 469int mapping_tagged(struct address_space *mapping, int tag);
456 470
471static inline void i_mmap_lock_write(struct address_space *mapping)
472{
473 down_write(&mapping->i_mmap_rwsem);
474}
475
476static inline void i_mmap_unlock_write(struct address_space *mapping)
477{
478 up_write(&mapping->i_mmap_rwsem);
479}
480
481static inline void i_mmap_lock_read(struct address_space *mapping)
482{
483 down_read(&mapping->i_mmap_rwsem);
484}
485
486static inline void i_mmap_unlock_read(struct address_space *mapping)
487{
488 up_read(&mapping->i_mmap_rwsem);
489}
490
457/* 491/*
458 * Might pages of this file be mapped into userspace? 492 * Might pages of this file be mapped into userspace?
459 */ 493 */
@@ -593,9 +627,6 @@ struct inode {
593 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 627 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
594 struct file_lock *i_flock; 628 struct file_lock *i_flock;
595 struct address_space i_data; 629 struct address_space i_data;
596#ifdef CONFIG_QUOTA
597 struct dquot *i_dquot[MAXQUOTAS];
598#endif
599 struct list_head i_devices; 630 struct list_head i_devices;
600 union { 631 union {
601 struct pipe_inode_info *i_pipe; 632 struct pipe_inode_info *i_pipe;
@@ -626,11 +657,13 @@ static inline int inode_unhashed(struct inode *inode)
626 * 2: child/target 657 * 2: child/target
627 * 3: xattr 658 * 3: xattr
628 * 4: second non-directory 659 * 4: second non-directory
629 * The last is for certain operations (such as rename) which lock two 660 * 5: second parent (when locking independent directories in rename)
661 *
662 * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
630 * non-directories at once. 663 * non-directories at once.
631 * 664 *
632 * The locking order between these classes is 665 * The locking order between these classes is
633 * parent -> child -> normal -> xattr -> second non-directory 666 * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
634 */ 667 */
635enum inode_i_mutex_lock_class 668enum inode_i_mutex_lock_class
636{ 669{
@@ -638,7 +671,8 @@ enum inode_i_mutex_lock_class
638 I_MUTEX_PARENT, 671 I_MUTEX_PARENT,
639 I_MUTEX_CHILD, 672 I_MUTEX_CHILD,
640 I_MUTEX_XATTR, 673 I_MUTEX_XATTR,
641 I_MUTEX_NONDIR2 674 I_MUTEX_NONDIR2,
675 I_MUTEX_PARENT2,
642}; 676};
643 677
644void lock_two_nondirectories(struct inode *, struct inode*); 678void lock_two_nondirectories(struct inode *, struct inode*);
@@ -773,7 +807,6 @@ struct file {
773 struct rcu_head fu_rcuhead; 807 struct rcu_head fu_rcuhead;
774 } f_u; 808 } f_u;
775 struct path f_path; 809 struct path f_path;
776#define f_dentry f_path.dentry
777 struct inode *f_inode; /* cached value */ 810 struct inode *f_inode; /* cached value */
778 const struct file_operations *f_op; 811 const struct file_operations *f_op;
779 812
@@ -1208,6 +1241,7 @@ struct super_block {
1208 struct backing_dev_info *s_bdi; 1241 struct backing_dev_info *s_bdi;
1209 struct mtd_info *s_mtd; 1242 struct mtd_info *s_mtd;
1210 struct hlist_node s_instances; 1243 struct hlist_node s_instances;
1244 unsigned int s_quota_types; /* Bitmask of supported quota types */
1211 struct quota_info s_dquot; /* Diskquota specific options */ 1245 struct quota_info s_dquot; /* Diskquota specific options */
1212 1246
1213 struct sb_writers s_writers; 1247 struct sb_writers s_writers;
@@ -1266,6 +1300,11 @@ struct super_block {
1266 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1300 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1267 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1301 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1268 struct rcu_head rcu; 1302 struct rcu_head rcu;
1303
1304 /*
1305 * Indicates how deep in a filesystem stack this SB is
1306 */
1307 int s_stack_depth;
1269}; 1308};
1270 1309
1271extern struct timespec current_fs_time(struct super_block *sb); 1310extern struct timespec current_fs_time(struct super_block *sb);
@@ -1398,6 +1437,7 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino
1398extern int vfs_rmdir(struct inode *, struct dentry *); 1437extern int vfs_rmdir(struct inode *, struct dentry *);
1399extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); 1438extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
1400extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); 1439extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
1440extern int vfs_whiteout(struct inode *, struct dentry *);
1401 1441
1402/* 1442/*
1403 * VFS dentry helper functions. 1443 * VFS dentry helper functions.
@@ -1445,7 +1485,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
1445 * This allows the kernel to read directories into kernel space or 1485 * This allows the kernel to read directories into kernel space or
1446 * to have different dirent layouts depending on the binary type. 1486 * to have different dirent layouts depending on the binary type.
1447 */ 1487 */
1448typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); 1488struct dir_context;
1489typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
1490 unsigned);
1491
1449struct dir_context { 1492struct dir_context {
1450 const filldir_t actor; 1493 const filldir_t actor;
1451 loff_t pos; 1494 loff_t pos;
@@ -1491,7 +1534,7 @@ struct file_operations {
1491 int (*setlease)(struct file *, long, struct file_lock **, void **); 1534 int (*setlease)(struct file *, long, struct file_lock **, void **);
1492 long (*fallocate)(struct file *file, int mode, loff_t offset, 1535 long (*fallocate)(struct file *file, int mode, loff_t offset,
1493 loff_t len); 1536 loff_t len);
1494 int (*show_fdinfo)(struct seq_file *m, struct file *f); 1537 void (*show_fdinfo)(struct seq_file *m, struct file *f);
1495}; 1538};
1496 1539
1497struct inode_operations { 1540struct inode_operations {
@@ -1528,6 +1571,9 @@ struct inode_operations {
1528 umode_t create_mode, int *opened); 1571 umode_t create_mode, int *opened);
1529 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 1572 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
1530 int (*set_acl)(struct inode *, struct posix_acl *, int); 1573 int (*set_acl)(struct inode *, struct posix_acl *, int);
1574
1575 /* WARNING: probably going away soon, do not use! */
1576 int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
1531} ____cacheline_aligned; 1577} ____cacheline_aligned;
1532 1578
1533ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, 1579ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1552,7 +1598,9 @@ struct super_operations {
1552 void (*evict_inode) (struct inode *); 1598 void (*evict_inode) (struct inode *);
1553 void (*put_super) (struct super_block *); 1599 void (*put_super) (struct super_block *);
1554 int (*sync_fs)(struct super_block *sb, int wait); 1600 int (*sync_fs)(struct super_block *sb, int wait);
1601 int (*freeze_super) (struct super_block *);
1555 int (*freeze_fs) (struct super_block *); 1602 int (*freeze_fs) (struct super_block *);
1603 int (*thaw_super) (struct super_block *);
1556 int (*unfreeze_fs) (struct super_block *); 1604 int (*unfreeze_fs) (struct super_block *);
1557 int (*statfs) (struct dentry *, struct kstatfs *); 1605 int (*statfs) (struct dentry *, struct kstatfs *);
1558 int (*remount_fs) (struct super_block *, int *, char *); 1606 int (*remount_fs) (struct super_block *, int *, char *);
@@ -1565,6 +1613,7 @@ struct super_operations {
1565#ifdef CONFIG_QUOTA 1613#ifdef CONFIG_QUOTA
1566 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 1614 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
1567 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 1615 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
1616 struct dquot **(*get_dquots)(struct inode *);
1568#endif 1617#endif
1569 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); 1618 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
1570 long (*nr_cached_objects)(struct super_block *, int); 1619 long (*nr_cached_objects)(struct super_block *, int);
@@ -1625,6 +1674,9 @@ struct super_operations {
1625#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) 1674#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
1626#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) 1675#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
1627 1676
1677#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
1678 (inode)->i_rdev == WHITEOUT_DEV)
1679
1628/* 1680/*
1629 * Inode state bits. Protected by inode->i_lock 1681 * Inode state bits. Protected by inode->i_lock
1630 * 1682 *
@@ -2040,9 +2092,11 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
2040extern struct file *filp_open(const char *, int, umode_t); 2092extern struct file *filp_open(const char *, int, umode_t);
2041extern struct file *file_open_root(struct dentry *, struct vfsmount *, 2093extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2042 const char *, int); 2094 const char *, int);
2095extern int vfs_open(const struct path *, struct file *, const struct cred *);
2043extern struct file * dentry_open(const struct path *, int, const struct cred *); 2096extern struct file * dentry_open(const struct path *, int, const struct cred *);
2044extern int filp_close(struct file *, fl_owner_t id); 2097extern int filp_close(struct file *, fl_owner_t id);
2045 2098
2099extern struct filename *getname_flags(const char __user *, int, int *);
2046extern struct filename *getname(const char __user *); 2100extern struct filename *getname(const char __user *);
2047extern struct filename *getname_kernel(const char *); 2101extern struct filename *getname_kernel(const char *);
2048 2102
@@ -2253,7 +2307,9 @@ extern sector_t bmap(struct inode *, sector_t);
2253#endif 2307#endif
2254extern int notify_change(struct dentry *, struct iattr *, struct inode **); 2308extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2255extern int inode_permission(struct inode *, int); 2309extern int inode_permission(struct inode *, int);
2310extern int __inode_permission(struct inode *, int);
2256extern int generic_permission(struct inode *, int); 2311extern int generic_permission(struct inode *, int);
2312extern int __check_sticky(struct inode *dir, struct inode *inode);
2257 2313
2258static inline bool execute_ok(struct inode *inode) 2314static inline bool execute_ok(struct inode *inode)
2259{ 2315{
@@ -2438,6 +2494,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
2438extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); 2494extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2439 2495
2440/* fs/block_dev.c */ 2496/* fs/block_dev.c */
2497extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
2441extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); 2498extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
2442extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, 2499extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
2443 int datasync); 2500 int datasync);
@@ -2452,6 +2509,9 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
2452 struct file *, loff_t *, size_t, unsigned int); 2509 struct file *, loff_t *, size_t, unsigned int);
2453extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2510extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2454 struct file *out, loff_t *, size_t len, unsigned int flags); 2511 struct file *out, loff_t *, size_t len, unsigned int flags);
2512extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
2513 loff_t *opos, size_t len, unsigned int flags);
2514
2455 2515
2456extern void 2516extern void
2457file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 2517file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
@@ -2737,12 +2797,25 @@ static inline int is_sxid(umode_t mode)
2737 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); 2797 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
2738} 2798}
2739 2799
2800static inline int check_sticky(struct inode *dir, struct inode *inode)
2801{
2802 if (!(dir->i_mode & S_ISVTX))
2803 return 0;
2804
2805 return __check_sticky(dir, inode);
2806}
2807
2740static inline void inode_has_no_xattr(struct inode *inode) 2808static inline void inode_has_no_xattr(struct inode *inode)
2741{ 2809{
2742 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) 2810 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2743 inode->i_flags |= S_NOSEC; 2811 inode->i_flags |= S_NOSEC;
2744} 2812}
2745 2813
2814static inline bool is_root_inode(struct inode *inode)
2815{
2816 return inode == inode->i_sb->s_root->d_inode;
2817}
2818
2746static inline bool dir_emit(struct dir_context *ctx, 2819static inline bool dir_emit(struct dir_context *ctx,
2747 const char *name, int namelen, 2820 const char *name, int namelen,
2748 u64 ino, unsigned type) 2821 u64 ino, unsigned type)
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index ca060d7c4fa6..0f313f93c586 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -197,24 +197,6 @@ struct fsnotify_group {
197#define FSNOTIFY_EVENT_INODE 2 197#define FSNOTIFY_EVENT_INODE 2
198 198
199/* 199/*
200 * Inode specific fields in an fsnotify_mark
201 */
202struct fsnotify_inode_mark {
203 struct inode *inode; /* inode this mark is associated with */
204 struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */
205 struct list_head free_i_list; /* tmp list used when freeing this mark */
206};
207
208/*
209 * Mount point specific fields in an fsnotify_mark
210 */
211struct fsnotify_vfsmount_mark {
212 struct vfsmount *mnt; /* vfsmount this mark is associated with */
213 struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */
214 struct list_head free_m_list; /* tmp list used when freeing this mark */
215};
216
217/*
218 * a mark is simply an object attached to an in core inode which allows an 200 * a mark is simply an object attached to an in core inode which allows an
219 * fsnotify listener to indicate they are either no longer interested in events 201 * fsnotify listener to indicate they are either no longer interested in events
220 * of a type matching mask or only interested in those events. 202 * of a type matching mask or only interested in those events.
@@ -230,11 +212,17 @@ struct fsnotify_mark {
230 * in kernel that found and may be using this mark. */ 212 * in kernel that found and may be using this mark. */
231 atomic_t refcnt; /* active things looking at this mark */ 213 atomic_t refcnt; /* active things looking at this mark */
232 struct fsnotify_group *group; /* group this mark is for */ 214 struct fsnotify_group *group; /* group this mark is for */
233 struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ 215 struct list_head g_list; /* list of marks by group->i_fsnotify_marks
216 * Also reused for queueing mark into
217 * destroy_list when it's waiting for
218 * the end of SRCU period before it can
219 * be freed */
234 spinlock_t lock; /* protect group and inode */ 220 spinlock_t lock; /* protect group and inode */
221 struct hlist_node obj_list; /* list of marks for inode / vfsmount */
222 struct list_head free_list; /* tmp list used when freeing this mark */
235 union { 223 union {
236 struct fsnotify_inode_mark i; 224 struct inode *inode; /* inode this mark is associated with */
237 struct fsnotify_vfsmount_mark m; 225 struct vfsmount *mnt; /* vfsmount this mark is associated with */
238 }; 226 };
239 __u32 ignored_mask; /* events types to ignore */ 227 __u32 ignored_mask; /* events types to ignore */
240#define FSNOTIFY_MARK_FLAG_INODE 0x01 228#define FSNOTIFY_MARK_FLAG_INODE 0x01
@@ -243,7 +231,6 @@ struct fsnotify_mark {
243#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 231#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
244#define FSNOTIFY_MARK_FLAG_ALIVE 0x10 232#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
245 unsigned int flags; /* vfsmount or inode mark? */ 233 unsigned int flags; /* vfsmount or inode mark? */
246 struct list_head destroy_list;
247 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ 234 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
248}; 235};
249 236
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7b2616fa2472..ed501953f0b2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -61,6 +61,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
61/* 61/*
62 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 62 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
63 * set in the flags member. 63 * set in the flags member.
64 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
65 * IPMODIFY are a kind of attribute flags which can be set only before
66 * registering the ftrace_ops, and can not be modified while registered.
67 * Changing those attribute flags after regsitering ftrace_ops will
68 * cause unexpected results.
64 * 69 *
65 * ENABLED - set/unset when ftrace_ops is registered/unregistered 70 * ENABLED - set/unset when ftrace_ops is registered/unregistered
66 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 71 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
@@ -101,6 +106,10 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
101 * The ftrace_ops trampoline can be set by the ftrace users, and 106 * The ftrace_ops trampoline can be set by the ftrace users, and
102 * in such cases the arch must not modify it. Only the arch ftrace 107 * in such cases the arch must not modify it. Only the arch ftrace
103 * core code should set this flag. 108 * core code should set this flag.
109 * IPMODIFY - The ops can modify the IP register. This can only be set with
110 * SAVE_REGS. If another ops with this flag set is already registered
111 * for any of the functions that this ops will be registered for, then
112 * this ops will fail to register or set_filter_ip.
104 */ 113 */
105enum { 114enum {
106 FTRACE_OPS_FL_ENABLED = 1 << 0, 115 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -116,6 +125,7 @@ enum {
116 FTRACE_OPS_FL_REMOVING = 1 << 10, 125 FTRACE_OPS_FL_REMOVING = 1 << 10,
117 FTRACE_OPS_FL_MODIFYING = 1 << 11, 126 FTRACE_OPS_FL_MODIFYING = 1 << 11,
118 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 127 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
128 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
119}; 129};
120 130
121#ifdef CONFIG_DYNAMIC_FTRACE 131#ifdef CONFIG_DYNAMIC_FTRACE
@@ -310,6 +320,7 @@ bool is_ftrace_trampoline(unsigned long addr);
310 * ENABLED - the function is being traced 320 * ENABLED - the function is being traced
311 * REGS - the record wants the function to save regs 321 * REGS - the record wants the function to save regs
312 * REGS_EN - the function is set up to save regs. 322 * REGS_EN - the function is set up to save regs.
323 * IPMODIFY - the record allows for the IP address to be changed.
313 * 324 *
314 * When a new ftrace_ops is registered and wants a function to save 325 * When a new ftrace_ops is registered and wants a function to save
315 * pt_regs, the rec->flag REGS is set. When the function has been 326 * pt_regs, the rec->flag REGS is set. When the function has been
@@ -323,10 +334,11 @@ enum {
323 FTRACE_FL_REGS_EN = (1UL << 29), 334 FTRACE_FL_REGS_EN = (1UL << 29),
324 FTRACE_FL_TRAMP = (1UL << 28), 335 FTRACE_FL_TRAMP = (1UL << 28),
325 FTRACE_FL_TRAMP_EN = (1UL << 27), 336 FTRACE_FL_TRAMP_EN = (1UL << 27),
337 FTRACE_FL_IPMODIFY = (1UL << 26),
326}; 338};
327 339
328#define FTRACE_REF_MAX_SHIFT 27 340#define FTRACE_REF_MAX_SHIFT 26
329#define FTRACE_FL_BITS 5 341#define FTRACE_FL_BITS 6
330#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) 342#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
331#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) 343#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
332#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 344#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 41b30fd4d041..b840e3b2770d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -110,11 +110,8 @@ struct vm_area_struct;
110#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ 110#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
111 __GFP_RECLAIMABLE) 111 __GFP_RECLAIMABLE)
112#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 112#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
113#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ 113#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
114 __GFP_HIGHMEM) 114#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
115#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
116 __GFP_HARDWALL | __GFP_HIGHMEM | \
117 __GFP_MOVABLE)
118#define GFP_IOFS (__GFP_IO | __GFP_FS) 115#define GFP_IOFS (__GFP_IO | __GFP_FS)
119#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 116#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
120 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ 117 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
@@ -381,8 +378,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order);
381 378
382void page_alloc_init(void); 379void page_alloc_init(void);
383void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 380void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
384void drain_all_pages(void); 381void drain_all_pages(struct zone *zone);
385void drain_local_pages(void *dummy); 382void drain_local_pages(struct zone *zone);
386 383
387/* 384/*
388 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 385 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 12f146fa6604..00b1b70d68ba 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -94,6 +94,13 @@ int gpiod_to_irq(const struct gpio_desc *desc);
94struct gpio_desc *gpio_to_desc(unsigned gpio); 94struct gpio_desc *gpio_to_desc(unsigned gpio);
95int desc_to_gpio(const struct gpio_desc *desc); 95int desc_to_gpio(const struct gpio_desc *desc);
96 96
97/* Child properties interface */
98struct fwnode_handle;
99
100struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
101 const char *propname);
102struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
103 struct fwnode_handle *child);
97#else /* CONFIG_GPIOLIB */ 104#else /* CONFIG_GPIOLIB */
98 105
99static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, 106static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 8b622468952c..ee2d8c6f9130 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -2,6 +2,7 @@
2#define _GPIO_KEYS_H 2#define _GPIO_KEYS_H
3 3
4struct device; 4struct device;
5struct gpio_desc;
5 6
6/** 7/**
7 * struct gpio_keys_button - configuration parameters 8 * struct gpio_keys_button - configuration parameters
@@ -17,6 +18,7 @@ struct device;
17 * disable button via sysfs 18 * disable button via sysfs
18 * @value: axis value for %EV_ABS 19 * @value: axis value for %EV_ABS
19 * @irq: Irq number in case of interrupt keys 20 * @irq: Irq number in case of interrupt keys
21 * @gpiod: GPIO descriptor
20 */ 22 */
21struct gpio_keys_button { 23struct gpio_keys_button {
22 unsigned int code; 24 unsigned int code;
@@ -29,6 +31,7 @@ struct gpio_keys_button {
29 bool can_disable; 31 bool can_disable;
30 int value; 32 int value;
31 unsigned int irq; 33 unsigned int irq;
34 struct gpio_desc *gpiod;
32}; 35};
33 36
34/** 37/**
diff --git a/include/linux/hash.h b/include/linux/hash.h
index d0494c399392..1afde47e1528 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/hash.h>
19#include <linux/compiler.h> 18#include <linux/compiler.h>
20 19
21/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 20/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr)
84 return (u32)val; 83 return (u32)val;
85} 84}
86 85
87struct fast_hash_ops {
88 u32 (*hash)(const void *data, u32 len, u32 seed);
89 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
90};
91
92/**
93 * arch_fast_hash - Caclulates a hash over a given buffer that can have
94 * arbitrary size. This function will eventually use an
95 * architecture-optimized hashing implementation if
96 * available, and trades off distribution for speed.
97 *
98 * @data: buffer to hash
99 * @len: length of buffer in bytes
100 * @seed: start seed
101 *
102 * Returns 32bit hash.
103 */
104extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
105
106/**
107 * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
108 * size that is of a multiple of 32bit words. This
109 * function will eventually use an architecture-
110 * optimized hashing implementation if available,
111 * and trades off distribution for speed.
112 *
113 * @data: buffer to hash (must be 32bit padded)
114 * @len: number of 32bit words
115 * @seed: start seed
116 *
117 * Returns 32bit hash.
118 */
119extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
120
121#endif /* _LINUX_HASH_H */ 86#endif /* _LINUX_HASH_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 78ea9bf941cd..06c4607744f6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -234,6 +234,33 @@ struct hid_item {
234#define HID_DG_BARRELSWITCH 0x000d0044 234#define HID_DG_BARRELSWITCH 0x000d0044
235#define HID_DG_ERASER 0x000d0045 235#define HID_DG_ERASER 0x000d0045
236#define HID_DG_TABLETPICK 0x000d0046 236#define HID_DG_TABLETPICK 0x000d0046
237
238#define HID_CP_CONSUMERCONTROL 0x000c0001
239#define HID_CP_NUMERICKEYPAD 0x000c0002
240#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003
241#define HID_CP_MICROPHONE 0x000c0004
242#define HID_CP_HEADPHONE 0x000c0005
243#define HID_CP_GRAPHICEQUALIZER 0x000c0006
244#define HID_CP_FUNCTIONBUTTONS 0x000c0036
245#define HID_CP_SELECTION 0x000c0080
246#define HID_CP_MEDIASELECTION 0x000c0087
247#define HID_CP_SELECTDISC 0x000c00ba
248#define HID_CP_PLAYBACKSPEED 0x000c00f1
249#define HID_CP_PROXIMITY 0x000c0109
250#define HID_CP_SPEAKERSYSTEM 0x000c0160
251#define HID_CP_CHANNELLEFT 0x000c0161
252#define HID_CP_CHANNELRIGHT 0x000c0162
253#define HID_CP_CHANNELCENTER 0x000c0163
254#define HID_CP_CHANNELFRONT 0x000c0164
255#define HID_CP_CHANNELCENTERFRONT 0x000c0165
256#define HID_CP_CHANNELSIDE 0x000c0166
257#define HID_CP_CHANNELSURROUND 0x000c0167
258#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168
259#define HID_CP_CHANNELTOP 0x000c0169
260#define HID_CP_CHANNELUNKNOWN 0x000c016a
261#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
262#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
263
237#define HID_DG_CONFIDENCE 0x000d0047 264#define HID_DG_CONFIDENCE 0x000d0047
238#define HID_DG_WIDTH 0x000d0048 265#define HID_DG_WIDTH 0x000d0048
239#define HID_DG_HEIGHT 0x000d0049 266#define HID_DG_HEIGHT 0x000d0049
@@ -312,11 +339,8 @@ struct hid_item {
312 * Vendor specific HID device groups 339 * Vendor specific HID device groups
313 */ 340 */
314#define HID_GROUP_RMI 0x0100 341#define HID_GROUP_RMI 0x0100
315
316/*
317 * Vendor specific HID device groups
318 */
319#define HID_GROUP_WACOM 0x0101 342#define HID_GROUP_WACOM 0x0101
343#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
320 344
321/* 345/*
322 * This is the global environment of the parser. This information is 346 * This is the global environment of the parser. This information is
@@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev)
1063 hdev->ll_driver->wait(hdev); 1087 hdev->ll_driver->wait(hdev);
1064} 1088}
1065 1089
1090/**
1091 * hid_report_len - calculate the report length
1092 *
1093 * @report: the report we want to know the length
1094 */
1095static inline int hid_report_len(struct hid_report *report)
1096{
1097 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1098 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1099}
1100
1066int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, 1101int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1067 int interrupt); 1102 int interrupt);
1068 1103
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6e6d338641fe..431b7fc605c9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
175} 175}
176 176
177#endif /* !CONFIG_HUGETLB_PAGE */ 177#endif /* !CONFIG_HUGETLB_PAGE */
178/*
179 * hugepages at page global directory. If arch support
180 * hugepages at pgd level, they need to define this.
181 */
182#ifndef pgd_huge
183#define pgd_huge(x) 0
184#endif
185
186#ifndef pgd_write
187static inline int pgd_write(pgd_t pgd)
188{
189 BUG();
190 return 0;
191}
192#endif
193
194#ifndef pud_write
195static inline int pud_write(pud_t pud)
196{
197 BUG();
198 return 0;
199}
200#endif
201
202#ifndef is_hugepd
203/*
204 * Some architectures requires a hugepage directory format that is
205 * required to support multiple hugepage sizes. For example
206 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
207 * introduced the same on powerpc. This allows for a more flexible hugepage
208 * pagetable layout.
209 */
210typedef struct { unsigned long pd; } hugepd_t;
211#define is_hugepd(hugepd) (0)
212#define __hugepd(x) ((hugepd_t) { (x) })
213static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
214 unsigned pdshift, unsigned long end,
215 int write, struct page **pages, int *nr)
216{
217 return 0;
218}
219#else
220extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
221 unsigned pdshift, unsigned long end,
222 int write, struct page **pages, int *nr);
223#endif
178 224
179#define HUGETLB_ANON_FILE "anon_hugepage" 225#define HUGETLB_ANON_FILE "anon_hugepage"
180 226
@@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
311{ 357{
312 if (!page_size_log) 358 if (!page_size_log)
313 return &default_hstate; 359 return &default_hstate;
314 return size_to_hstate(1 << page_size_log); 360
361 return size_to_hstate(1UL << page_size_log);
315} 362}
316 363
317static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 364static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 0129f89cf98d..bcc853eccc85 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -16,7 +16,6 @@
16#define _LINUX_HUGETLB_CGROUP_H 16#define _LINUX_HUGETLB_CGROUP_H
17 17
18#include <linux/mmdebug.h> 18#include <linux/mmdebug.h>
19#include <linux/res_counter.h>
20 19
21struct hugetlb_cgroup; 20struct hugetlb_cgroup;
22/* 21/*
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 08cfaff8a072..476c685ca6f9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -650,6 +650,8 @@ struct vmbus_channel {
650 u8 monitor_grp; 650 u8 monitor_grp;
651 u8 monitor_bit; 651 u8 monitor_bit;
652 652
653 bool rescind; /* got rescind msg */
654
653 u32 ringbuffer_gpadlhandle; 655 u32 ringbuffer_gpadlhandle;
654 656
655 /* Allocated memory for ring buffer */ 657 /* Allocated memory for ring buffer */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b556e0ab946f..70ee0d3a2be3 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -359,7 +359,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
359 * to name two of the most common. 359 * to name two of the most common.
360 * 360 *
361 * The return codes from the @master_xfer field should indicate the type of 361 * The return codes from the @master_xfer field should indicate the type of
362 * error code that occured during the transfer, as documented in the kernel 362 * error code that occurred during the transfer, as documented in the kernel
363 * Documentation file Documentation/i2c/fault-codes. 363 * Documentation file Documentation/i2c/fault-codes.
364 */ 364 */
365struct i2c_algorithm { 365struct i2c_algorithm {
diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h
index 69280db02c41..ee3c2aba2a8e 100644
--- a/include/linux/i2c/pmbus.h
+++ b/include/linux/i2c/pmbus.h
@@ -40,6 +40,10 @@
40 40
41struct pmbus_platform_data { 41struct pmbus_platform_data {
42 u32 flags; /* Device specific flags */ 42 u32 flags; /* Device specific flags */
43
44 /* regulator support */
45 int num_regulators;
46 struct regulator_init_data *reg_init_data;
43}; 47};
44 48
45#endif /* _PMBUS_H_ */ 49#endif /* _PMBUS_H_ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b1be39c76931..4f4eea8a6288 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -19,6 +19,7 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/if_ether.h> 20#include <linux/if_ether.h>
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/unaligned.h>
22 23
23/* 24/*
24 * DS bit usage 25 * DS bit usage
@@ -1066,6 +1067,12 @@ struct ieee80211_pspoll {
1066 1067
1067/* TDLS */ 1068/* TDLS */
1068 1069
1070/* Channel switch timing */
1071struct ieee80211_ch_switch_timing {
1072 __le16 switch_time;
1073 __le16 switch_timeout;
1074} __packed;
1075
1069/* Link-id information element */ 1076/* Link-id information element */
1070struct ieee80211_tdls_lnkie { 1077struct ieee80211_tdls_lnkie {
1071 u8 ie_type; /* Link Identifier IE */ 1078 u8 ie_type; /* Link Identifier IE */
@@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data {
1107 u8 dialog_token; 1114 u8 dialog_token;
1108 u8 variable[0]; 1115 u8 variable[0];
1109 } __packed discover_req; 1116 } __packed discover_req;
1117 struct {
1118 u8 target_channel;
1119 u8 oper_class;
1120 u8 variable[0];
1121 } __packed chan_switch_req;
1122 struct {
1123 __le16 status_code;
1124 u8 variable[0];
1125 } __packed chan_switch_resp;
1110 } u; 1126 } u;
1111} __packed; 1127} __packed;
1112 1128
@@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap {
1274#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 1290#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
1275 1291
1276/* 1292/*
1277 * Maximum length of AMPDU that the STA can receive. 1293 * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
1278 * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) 1294 * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
1279 */ 1295 */
1280enum ieee80211_max_ampdu_length_exp { 1296enum ieee80211_max_ampdu_length_exp {
@@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp {
1284 IEEE80211_HT_MAX_AMPDU_64K = 3 1300 IEEE80211_HT_MAX_AMPDU_64K = 3
1285}; 1301};
1286 1302
1303/*
1304 * Maximum length of AMPDU that the STA can receive in VHT.
1305 * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
1306 */
1307enum ieee80211_vht_max_ampdu_length_exp {
1308 IEEE80211_VHT_MAX_AMPDU_8K = 0,
1309 IEEE80211_VHT_MAX_AMPDU_16K = 1,
1310 IEEE80211_VHT_MAX_AMPDU_32K = 2,
1311 IEEE80211_VHT_MAX_AMPDU_64K = 3,
1312 IEEE80211_VHT_MAX_AMPDU_128K = 4,
1313 IEEE80211_VHT_MAX_AMPDU_256K = 5,
1314 IEEE80211_VHT_MAX_AMPDU_512K = 6,
1315 IEEE80211_VHT_MAX_AMPDU_1024K = 7
1316};
1317
1287#define IEEE80211_HT_MAX_AMPDU_FACTOR 13 1318#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
1288 1319
1289/* Minimum MPDU start spacing */ 1320/* Minimum MPDU start spacing */
@@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode {
1998 WLAN_TDLS_DISCOVERY_REQUEST = 10, 2029 WLAN_TDLS_DISCOVERY_REQUEST = 10,
1999}; 2030};
2000 2031
2032/* Extended Channel Switching capability to be set in the 1st byte of
2033 * the @WLAN_EID_EXT_CAPABILITY information element
2034 */
2035#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
2036
2037/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
2038#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
2039#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
2040#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
2041
2001/* Interworking capabilities are set in 7th bit of 4th byte of the 2042/* Interworking capabilities are set in 7th bit of 4th byte of the
2002 * @WLAN_EID_EXT_CAPABILITY information element 2043 * @WLAN_EID_EXT_CAPABILITY information element
2003 */ 2044 */
@@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode {
2009 */ 2050 */
2010#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) 2051#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5)
2011#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) 2052#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
2053#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
2012 2054
2013#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) 2055#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
2014#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) 2056#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
@@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode {
2016/* TDLS specific payload type in the LLC/SNAP header */ 2058/* TDLS specific payload type in the LLC/SNAP header */
2017#define WLAN_TDLS_SNAP_RFTYPE 0x2 2059#define WLAN_TDLS_SNAP_RFTYPE 0x2
2018 2060
2061/* BSS Coex IE information field bits */
2062#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
2063
2019/** 2064/**
2020 * enum - mesh synchronization method identifier 2065 * enum - mesh synchronization method identifier
2021 * 2066 *
@@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
2398 return !!(tim->virtual_map[index] & mask); 2443 return !!(tim->virtual_map[index] & mask);
2399} 2444}
2400 2445
2446/**
2447 * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
2448 * @skb: the skb containing the frame, length will not be checked
2449 * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
2450 *
2451 * This function assumes the frame is a data frame, and that the network header
2452 * is in the correct place.
2453 */
2454static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
2455{
2456 if (!skb_is_nonlinear(skb) &&
2457 skb->len > (skb_network_offset(skb) + 2)) {
2458 /* Point to where the indication of TDLS should start */
2459 const u8 *tdls_data = skb_network_header(skb) - 2;
2460
2461 if (get_unaligned_be16(tdls_data) == ETH_P_TDLS &&
2462 tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE &&
2463 tdls_data[3] == WLAN_CATEGORY_TDLS)
2464 return tdls_data[4];
2465 }
2466
2467 return -1;
2468}
2469
2401/* convert time units */ 2470/* convert time units */
2402#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) 2471#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
2403#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) 2472#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
new file mode 100644
index 000000000000..6e82d888287c
--- /dev/null
+++ b/include/linux/ieee802154.h
@@ -0,0 +1,242 @@
1/*
2 * IEEE802.15.4-2003 specification
3 *
4 * Copyright (C) 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Written by:
16 * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
17 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
18 * Maxim Osipov <maxim.osipov@siemens.com>
19 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
20 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
21 */
22
23#ifndef LINUX_IEEE802154_H
24#define LINUX_IEEE802154_H
25
26#include <linux/types.h>
27#include <linux/random.h>
28#include <asm/byteorder.h>
29
30#define IEEE802154_MTU 127
31#define IEEE802154_MIN_PSDU_LEN 5
32
33#define IEEE802154_PAN_ID_BROADCAST 0xffff
34#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
35#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
36
37#define IEEE802154_EXTENDED_ADDR_LEN 8
38
39#define IEEE802154_LIFS_PERIOD 40
40#define IEEE802154_SIFS_PERIOD 12
41
42#define IEEE802154_MAX_CHANNEL 26
43#define IEEE802154_MAX_PAGE 31
44
45#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
46#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
47#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */
48#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */
49
50#define IEEE802154_FC_TYPE_SHIFT 0
51#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1)
52#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT)
53#define IEEE802154_FC_SET_TYPE(v, x) do { \
54 v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \
55 (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \
56 } while (0)
57
58#define IEEE802154_FC_SECEN_SHIFT 3
59#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT)
60#define IEEE802154_FC_FRPEND_SHIFT 4
61#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT)
62#define IEEE802154_FC_ACK_REQ_SHIFT 5
63#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT)
64#define IEEE802154_FC_INTRA_PAN_SHIFT 6
65#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT)
66
67#define IEEE802154_FC_SAMODE_SHIFT 14
68#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT)
69#define IEEE802154_FC_DAMODE_SHIFT 10
70#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT)
71
72#define IEEE802154_FC_VERSION_SHIFT 12
73#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT)
74#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT)
75
76#define IEEE802154_FC_SAMODE(x) \
77 (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT)
78
79#define IEEE802154_FC_DAMODE(x) \
80 (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
81
82#define IEEE802154_SCF_SECLEVEL_MASK 7
83#define IEEE802154_SCF_SECLEVEL_SHIFT 0
84#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK)
85#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3
86#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT)
87#define IEEE802154_SCF_KEY_ID_MODE(x) \
88 ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT)
89
90#define IEEE802154_SCF_KEY_IMPLICIT 0
91#define IEEE802154_SCF_KEY_INDEX 1
92#define IEEE802154_SCF_KEY_SHORT_INDEX 2
93#define IEEE802154_SCF_KEY_HW_INDEX 3
94
95#define IEEE802154_SCF_SECLEVEL_NONE 0
96#define IEEE802154_SCF_SECLEVEL_MIC32 1
97#define IEEE802154_SCF_SECLEVEL_MIC64 2
98#define IEEE802154_SCF_SECLEVEL_MIC128 3
99#define IEEE802154_SCF_SECLEVEL_ENC 4
100#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5
101#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6
102#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7
103
104/* MAC footer size */
105#define IEEE802154_MFR_SIZE 2 /* 2 octets */
106
107/* MAC's Command Frames Identifiers */
108#define IEEE802154_CMD_ASSOCIATION_REQ 0x01
109#define IEEE802154_CMD_ASSOCIATION_RESP 0x02
110#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03
111#define IEEE802154_CMD_DATA_REQ 0x04
112#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05
113#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06
114#define IEEE802154_CMD_BEACON_REQ 0x07
115#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08
116#define IEEE802154_CMD_GTS_REQ 0x09
117
118/*
119 * The return values of MAC operations
120 */
121enum {
122 /*
123 * The requested operation was completed successfully.
124 * For a transmission request, this value indicates
125 * a successful transmission.
126 */
127 IEEE802154_SUCCESS = 0x0,
128
129 /* The beacon was lost following a synchronization request. */
130 IEEE802154_BEACON_LOSS = 0xe0,
131 /*
132 * A transmission could not take place due to activity on the
133 * channel, i.e., the CSMA-CA mechanism has failed.
134 */
135 IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
136 /* The GTS request has been denied by the PAN coordinator. */
137 IEEE802154_DENINED = 0xe2,
138 /* The attempt to disable the transceiver has failed. */
139 IEEE802154_DISABLE_TRX_FAIL = 0xe3,
140 /*
141 * The received frame induces a failed security check according to
142 * the security suite.
143 */
144 IEEE802154_FAILED_SECURITY_CHECK = 0xe4,
145 /*
146 * The frame resulting from secure processing has a length that is
147 * greater than aMACMaxFrameSize.
148 */
149 IEEE802154_FRAME_TOO_LONG = 0xe5,
150 /*
151 * The requested GTS transmission failed because the specified GTS
152 * either did not have a transmit GTS direction or was not defined.
153 */
154 IEEE802154_INVALID_GTS = 0xe6,
155 /*
156 * A request to purge an MSDU from the transaction queue was made using
157 * an MSDU handle that was not found in the transaction table.
158 */
159 IEEE802154_INVALID_HANDLE = 0xe7,
160 /* A parameter in the primitive is out of the valid range.*/
161 IEEE802154_INVALID_PARAMETER = 0xe8,
162 /* No acknowledgment was received after aMaxFrameRetries. */
163 IEEE802154_NO_ACK = 0xe9,
164 /* A scan operation failed to find any network beacons.*/
165 IEEE802154_NO_BEACON = 0xea,
166 /* No response data were available following a request. */
167 IEEE802154_NO_DATA = 0xeb,
168 /* The operation failed because a short address was not allocated. */
169 IEEE802154_NO_SHORT_ADDRESS = 0xec,
170 /*
171 * A receiver enable request was unsuccessful because it could not be
172 * completed within the CAP.
173 */
174 IEEE802154_OUT_OF_CAP = 0xed,
175 /*
176 * A PAN identifier conflict has been detected and communicated to the
177 * PAN coordinator.
178 */
179 IEEE802154_PANID_CONFLICT = 0xee,
180 /* A coordinator realignment command has been received. */
181 IEEE802154_REALIGMENT = 0xef,
182 /* The transaction has expired and its information discarded. */
183 IEEE802154_TRANSACTION_EXPIRED = 0xf0,
184 /* There is no capacity to store the transaction. */
185 IEEE802154_TRANSACTION_OVERFLOW = 0xf1,
186 /*
187 * The transceiver was in the transmitter enabled state when the
188 * receiver was requested to be enabled.
189 */
190 IEEE802154_TX_ACTIVE = 0xf2,
191 /* The appropriate key is not available in the ACL. */
192 IEEE802154_UNAVAILABLE_KEY = 0xf3,
193 /*
194 * A SET/GET request was issued with the identifier of a PIB attribute
195 * that is not supported.
196 */
197 IEEE802154_UNSUPPORTED_ATTR = 0xf4,
198 /*
199 * A request to perform a scan operation failed because the MLME was
200 * in the process of performing a previously initiated scan operation.
201 */
202 IEEE802154_SCAN_IN_PROGRESS = 0xfc,
203};
204
205/**
206 * ieee802154_is_valid_psdu_len - check if psdu len is valid
207 * @len: psdu len with (MHR + payload + MFR)
208 */
209static inline bool ieee802154_is_valid_psdu_len(const u8 len)
210{
211 return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
212}
213
214/**
215 * ieee802154_is_valid_psdu_len - check if extended addr is valid
216 * @addr: extended addr to check
217 */
218static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
219{
220 /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
221 * is used internally as extended to short address broadcast mapping.
222 * This is currently a workaround because neighbor discovery can't
223 * deal with short addresses types right now.
224 */
225 return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
226 (addr != cpu_to_le64(0xffffffffffffffffULL)));
227}
228
229/**
230 * ieee802154_random_extended_addr - generates a random extended address
231 * @addr: extended addr pointer to place the random address
232 */
233static inline void ieee802154_random_extended_addr(__le64 *addr)
234{
235 get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
236
237 /* toggle some bit if we hit an invalid extended addr */
238 if (!ieee802154_is_valid_extended_addr(*addr))
239 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
240}
241
242#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 808dcb8cc04f..0a8ce762a47f 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <uapi/linux/if_bridge.h> 17#include <uapi/linux/if_bridge.h>
18#include <linux/bitops.h>
18 19
19struct br_ip { 20struct br_ip {
20 union { 21 union {
@@ -32,11 +33,41 @@ struct br_ip_list {
32 struct br_ip addr; 33 struct br_ip addr;
33}; 34};
34 35
36#define BR_HAIRPIN_MODE BIT(0)
37#define BR_BPDU_GUARD BIT(1)
38#define BR_ROOT_BLOCK BIT(2)
39#define BR_MULTICAST_FAST_LEAVE BIT(3)
40#define BR_ADMIN_COST BIT(4)
41#define BR_LEARNING BIT(5)
42#define BR_FLOOD BIT(6)
43#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
44#define BR_PROMISC BIT(7)
45#define BR_PROXYARP BIT(8)
46#define BR_LEARNING_SYNC BIT(9)
47
35extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 48extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
36 49
37typedef int br_should_route_hook_t(struct sk_buff *skb); 50typedef int br_should_route_hook_t(struct sk_buff *skb);
38extern br_should_route_hook_t __rcu *br_should_route_hook; 51extern br_should_route_hook_t __rcu *br_should_route_hook;
39 52
53#if IS_ENABLED(CONFIG_BRIDGE)
54int br_fdb_external_learn_add(struct net_device *dev,
55 const unsigned char *addr, u16 vid);
56int br_fdb_external_learn_del(struct net_device *dev,
57 const unsigned char *addr, u16 vid);
58#else
59static inline int br_fdb_external_learn_add(struct net_device *dev,
60 const unsigned char *addr, u16 vid)
61{
62 return 0;
63}
64static inline int br_fdb_external_learn_del(struct net_device *dev,
65 const unsigned char *addr, u16 vid)
66{
67 return 0;
68}
69#endif
70
40#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) 71#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
41int br_multicast_list_adjacent(struct net_device *dev, 72int br_multicast_list_adjacent(struct net_device *dev,
42 struct list_head *br_ip_list); 73 struct list_head *br_ip_list);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d69f0577a319..515a35e2a48a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
282} 282}
283 283
284/** 284/**
285 * vlan_insert_tag - regular VLAN tag inserting 285 * __vlan_insert_tag - regular VLAN tag inserting
286 * @skb: skbuff to tag 286 * @skb: skbuff to tag
287 * @vlan_proto: VLAN encapsulation protocol 287 * @vlan_proto: VLAN encapsulation protocol
288 * @vlan_tci: VLAN TCI to insert 288 * @vlan_tci: VLAN TCI to insert
289 * 289 *
290 * Inserts the VLAN tag into @skb as part of the payload 290 * Inserts the VLAN tag into @skb as part of the payload
291 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 291 * Returns error if skb_cow_head failes.
292 *
293 * Following the skb_unshare() example, in case of error, the calling function
294 * doesn't have to worry about freeing the original skb.
295 * 292 *
296 * Does not change skb->protocol so this function can be used during receive. 293 * Does not change skb->protocol so this function can be used during receive.
297 */ 294 */
298static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, 295static inline int __vlan_insert_tag(struct sk_buff *skb,
299 __be16 vlan_proto, u16 vlan_tci) 296 __be16 vlan_proto, u16 vlan_tci)
300{ 297{
301 struct vlan_ethhdr *veth; 298 struct vlan_ethhdr *veth;
302 299
303 if (skb_cow_head(skb, VLAN_HLEN) < 0) { 300 if (skb_cow_head(skb, VLAN_HLEN) < 0)
304 dev_kfree_skb_any(skb); 301 return -ENOMEM;
305 return NULL; 302
306 }
307 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); 303 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
308 304
309 /* Move the mac addresses to the beginning of the new header. */ 305 /* Move the mac addresses to the beginning of the new header. */
@@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
316 /* now, the TCI */ 312 /* now, the TCI */
317 veth->h_vlan_TCI = htons(vlan_tci); 313 veth->h_vlan_TCI = htons(vlan_tci);
318 314
315 return 0;
316}
317
318/**
319 * vlan_insert_tag - regular VLAN tag inserting
320 * @skb: skbuff to tag
321 * @vlan_proto: VLAN encapsulation protocol
322 * @vlan_tci: VLAN TCI to insert
323 *
324 * Inserts the VLAN tag into @skb as part of the payload
325 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
326 *
327 * Following the skb_unshare() example, in case of error, the calling function
328 * doesn't have to worry about freeing the original skb.
329 *
330 * Does not change skb->protocol so this function can be used during receive.
331 */
332static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
333 __be16 vlan_proto, u16 vlan_tci)
334{
335 int err;
336
337 err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
338 if (err) {
339 dev_kfree_skb_any(skb);
340 return NULL;
341 }
319 return skb; 342 return skb;
320} 343}
321 344
322/** 345/**
323 * __vlan_put_tag - regular VLAN tag inserting 346 * vlan_insert_tag_set_proto - regular VLAN tag inserting
324 * @skb: skbuff to tag 347 * @skb: skbuff to tag
348 * @vlan_proto: VLAN encapsulation protocol
325 * @vlan_tci: VLAN TCI to insert 349 * @vlan_tci: VLAN TCI to insert
326 * 350 *
327 * Inserts the VLAN tag into @skb as part of the payload 351 * Inserts the VLAN tag into @skb as part of the payload
@@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
330 * Following the skb_unshare() example, in case of error, the calling function 354 * Following the skb_unshare() example, in case of error, the calling function
331 * doesn't have to worry about freeing the original skb. 355 * doesn't have to worry about freeing the original skb.
332 */ 356 */
333static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, 357static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
334 __be16 vlan_proto, u16 vlan_tci) 358 __be16 vlan_proto,
359 u16 vlan_tci)
335{ 360{
336 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); 361 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
337 if (skb) 362 if (skb)
@@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
339 return skb; 364 return skb;
340} 365}
341 366
342/** 367/*
343 * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting 368 * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
344 * @skb: skbuff to tag 369 * @skb: skbuff to tag
345 * @vlan_proto: VLAN encapsulation protocol
346 * @vlan_tci: VLAN TCI to insert
347 * 370 *
348 * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest 371 * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
372 *
373 * Following the skb_unshare() example, in case of error, the calling function
374 * doesn't have to worry about freeing the original skb.
349 */ 375 */
350static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, 376static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
351 __be16 vlan_proto,
352 u16 vlan_tci)
353{ 377{
354 skb->vlan_proto = vlan_proto; 378 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
355 skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; 379 vlan_tx_tag_get(skb));
380 if (likely(skb))
381 skb->vlan_tci = 0;
382 return skb;
383}
384/*
385 * vlan_hwaccel_push_inside - pushes vlan tag to the payload
386 * @skb: skbuff to tag
387 *
388 * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
389 * VLAN tag from @skb->vlan_tci inside to the payload.
390 *
391 * Following the skb_unshare() example, in case of error, the calling function
392 * doesn't have to worry about freeing the original skb.
393 */
394static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
395{
396 if (vlan_tx_tag_present(skb))
397 skb = __vlan_hwaccel_push_inside(skb);
356 return skb; 398 return skb;
357} 399}
358 400
359/** 401/**
360 * vlan_put_tag - inserts VLAN tag according to device features 402 * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
361 * @skb: skbuff to tag 403 * @skb: skbuff to tag
404 * @vlan_proto: VLAN encapsulation protocol
362 * @vlan_tci: VLAN TCI to insert 405 * @vlan_tci: VLAN TCI to insert
363 * 406 *
364 * Assumes skb->dev is the target that will xmit this frame. 407 * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
365 * Returns a VLAN tagged skb.
366 */ 408 */
367static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, 409static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
368 __be16 vlan_proto, u16 vlan_tci) 410 __be16 vlan_proto, u16 vlan_tci)
369{ 411{
370 if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { 412 skb->vlan_proto = vlan_proto;
371 return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 413 skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
372 } else {
373 return __vlan_put_tag(skb, vlan_proto, vlan_tci);
374 }
375} 414}
376 415
377/** 416/**
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 8bbd7bc1043d..03fa332ad2a8 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -72,7 +72,7 @@ struct iio_event_data {
72 72
73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) 73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
74 74
75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) 75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
76 76
77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) 77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
78 78
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0068708161ff..0a21fbefdfbe 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
242static __inline__ __be32 inet_make_mask(int logmask) 242static __inline__ __be32 inet_make_mask(int logmask)
243{ 243{
244 if (logmask) 244 if (logmask)
245 return htonl(~((1<<(32-logmask))-1)); 245 return htonl(~((1U<<(32-logmask))-1));
246 return 0; 246 return 0;
247} 247}
248 248
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 77fc43f8fb72..3037fc085e8e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -102,7 +102,7 @@ extern struct group_info init_groups;
102#define INIT_IDS 102#define INIT_IDS
103#endif 103#endif
104 104
105#ifdef CONFIG_TREE_PREEMPT_RCU 105#ifdef CONFIG_PREEMPT_RCU
106#define INIT_TASK_RCU_TREE_PREEMPT() \ 106#define INIT_TASK_RCU_TREE_PREEMPT() \
107 .rcu_blocked_node = NULL, 107 .rcu_blocked_node = NULL,
108#else 108#else
@@ -166,6 +166,15 @@ extern struct task_group root_task_group;
166# define INIT_RT_MUTEXES(tsk) 166# define INIT_RT_MUTEXES(tsk)
167#endif 167#endif
168 168
169#ifdef CONFIG_NUMA_BALANCING
170# define INIT_NUMA_BALANCING(tsk) \
171 .numa_preferred_nid = -1, \
172 .numa_group = NULL, \
173 .numa_faults = NULL,
174#else
175# define INIT_NUMA_BALANCING(tsk)
176#endif
177
169/* 178/*
170 * INIT_TASK is used to set up the first task table, touch at 179 * INIT_TASK is used to set up the first task table, touch at
171 * your own risk!. Base=0, limit=0x1fffff (=2MB) 180 * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -237,6 +246,7 @@ extern struct task_group root_task_group;
237 INIT_CPUSET_SEQ(tsk) \ 246 INIT_CPUSET_SEQ(tsk) \
238 INIT_RT_MUTEXES(tsk) \ 247 INIT_RT_MUTEXES(tsk) \
239 INIT_VTIME(tsk) \ 248 INIT_VTIME(tsk) \
249 INIT_NUMA_BALANCING(tsk) \
240} 250}
241 251
242 252
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 69517a24bc50..d9b05b5bf8c7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -556,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t)
556 atomic_dec(&t->count); 556 atomic_dec(&t->count);
557} 557}
558 558
559static inline void tasklet_hi_enable(struct tasklet_struct *t)
560{
561 smp_mb__before_atomic();
562 atomic_dec(&t->count);
563}
564
565extern void tasklet_kill(struct tasklet_struct *t); 559extern void tasklet_kill(struct tasklet_struct *t);
566extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 560extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
567extern void tasklet_init(struct tasklet_struct *t, 561extern void tasklet_init(struct tasklet_struct *t,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e6a7c9ff72f2..7a7bd15e54f1 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -22,12 +22,13 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/scatterlist.h>
25#include <trace/events/iommu.h> 26#include <trace/events/iommu.h>
26 27
27#define IOMMU_READ (1 << 0) 28#define IOMMU_READ (1 << 0)
28#define IOMMU_WRITE (1 << 1) 29#define IOMMU_WRITE (1 << 1)
29#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 30#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
30#define IOMMU_EXEC (1 << 3) 31#define IOMMU_NOEXEC (1 << 3)
31 32
32struct iommu_ops; 33struct iommu_ops;
33struct iommu_group; 34struct iommu_group;
@@ -61,6 +62,7 @@ enum iommu_cap {
61 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA 62 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
62 transactions */ 63 transactions */
63 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 64 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
65 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
64}; 66};
65 67
66/* 68/*
@@ -97,6 +99,8 @@ enum iommu_attr {
97 * @detach_dev: detach device from an iommu domain 99 * @detach_dev: detach device from an iommu domain
98 * @map: map a physically contiguous memory region to an iommu domain 100 * @map: map a physically contiguous memory region to an iommu domain
99 * @unmap: unmap a physically contiguous memory region from an iommu domain 101 * @unmap: unmap a physically contiguous memory region from an iommu domain
102 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
103 * to an iommu domain
100 * @iova_to_phys: translate iova to physical address 104 * @iova_to_phys: translate iova to physical address
101 * @add_device: add device to iommu grouping 105 * @add_device: add device to iommu grouping
102 * @remove_device: remove device from iommu grouping 106 * @remove_device: remove device from iommu grouping
@@ -114,6 +118,8 @@ struct iommu_ops {
114 phys_addr_t paddr, size_t size, int prot); 118 phys_addr_t paddr, size_t size, int prot);
115 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 119 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
116 size_t size); 120 size_t size);
121 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
122 struct scatterlist *sg, unsigned int nents, int prot);
117 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); 123 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
118 int (*add_device)(struct device *dev); 124 int (*add_device)(struct device *dev);
119 void (*remove_device)(struct device *dev); 125 void (*remove_device)(struct device *dev);
@@ -156,6 +162,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
156 phys_addr_t paddr, size_t size, int prot); 162 phys_addr_t paddr, size_t size, int prot);
157extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 163extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
158 size_t size); 164 size_t size);
165extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
166 struct scatterlist *sg,unsigned int nents,
167 int prot);
159extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 168extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
160extern void iommu_set_fault_handler(struct iommu_domain *domain, 169extern void iommu_set_fault_handler(struct iommu_domain *domain,
161 iommu_fault_handler_t handler, void *token); 170 iommu_fault_handler_t handler, void *token);
@@ -241,6 +250,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
241 return ret; 250 return ret;
242} 251}
243 252
253static inline size_t iommu_map_sg(struct iommu_domain *domain,
254 unsigned long iova, struct scatterlist *sg,
255 unsigned int nents, int prot)
256{
257 return domain->ops->map_sg(domain, iova, sg, nents, prot);
258}
259
244#else /* CONFIG_IOMMU_API */ 260#else /* CONFIG_IOMMU_API */
245 261
246struct iommu_ops {}; 262struct iommu_ops {};
@@ -293,6 +309,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
293 return -ENODEV; 309 return -ENODEV;
294} 310}
295 311
312static inline size_t iommu_map_sg(struct iommu_domain *domain,
313 unsigned long iova, struct scatterlist *sg,
314 unsigned int nents, int prot)
315{
316 return -ENODEV;
317}
318
296static inline int iommu_domain_window_enable(struct iommu_domain *domain, 319static inline int iommu_domain_window_enable(struct iommu_domain *domain,
297 u32 wnd_nr, phys_addr_t paddr, 320 u32 wnd_nr, phys_addr_t paddr,
298 u64 size, int prot) 321 u64 size, int prot)
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 35e7eca4e33b..e365d5ec69cb 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -7,15 +7,6 @@
7#include <linux/notifier.h> 7#include <linux/notifier.h>
8#include <linux/nsproxy.h> 8#include <linux/nsproxy.h>
9 9
10/*
11 * ipc namespace events
12 */
13#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */
14#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */
15#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */
16
17#define IPCNS_CALLBACK_PRI 0
18
19struct user_namespace; 10struct user_namespace;
20 11
21struct ipc_ids { 12struct ipc_ids {
@@ -38,7 +29,6 @@ struct ipc_namespace {
38 unsigned int msg_ctlmni; 29 unsigned int msg_ctlmni;
39 atomic_t msg_bytes; 30 atomic_t msg_bytes;
40 atomic_t msg_hdrs; 31 atomic_t msg_hdrs;
41 int auto_msgmni;
42 32
43 size_t shm_ctlmax; 33 size_t shm_ctlmax;
44 size_t shm_ctlall; 34 size_t shm_ctlall;
@@ -77,18 +67,8 @@ extern atomic_t nr_ipc_ns;
77extern spinlock_t mq_lock; 67extern spinlock_t mq_lock;
78 68
79#ifdef CONFIG_SYSVIPC 69#ifdef CONFIG_SYSVIPC
80extern int register_ipcns_notifier(struct ipc_namespace *);
81extern int cond_register_ipcns_notifier(struct ipc_namespace *);
82extern void unregister_ipcns_notifier(struct ipc_namespace *);
83extern int ipcns_notify(unsigned long);
84extern void shm_destroy_orphaned(struct ipc_namespace *ns); 70extern void shm_destroy_orphaned(struct ipc_namespace *ns);
85#else /* CONFIG_SYSVIPC */ 71#else /* CONFIG_SYSVIPC */
86static inline int register_ipcns_notifier(struct ipc_namespace *ns)
87{ return 0; }
88static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns)
89{ return 0; }
90static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { }
91static inline int ipcns_notify(unsigned long l) { return 0; }
92static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} 72static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
93#endif /* CONFIG_SYSVIPC */ 73#endif /* CONFIG_SYSVIPC */
94 74
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 76d2acbfa7c6..838dbfa3c331 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -37,6 +37,7 @@
37 37
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
40#include <linux/acpi.h> /* For acpi_handle */
40 41
41struct module; 42struct module;
42struct device; 43struct device;
@@ -278,15 +279,18 @@ enum ipmi_addr_src {
278 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, 279 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
279 SI_PCI, SI_DEVICETREE, SI_DEFAULT 280 SI_PCI, SI_DEVICETREE, SI_DEFAULT
280}; 281};
282const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
281 283
282union ipmi_smi_info_union { 284union ipmi_smi_info_union {
285#ifdef CONFIG_ACPI
283 /* 286 /*
284 * the acpi_info element is defined for the SI_ACPI 287 * the acpi_info element is defined for the SI_ACPI
285 * address type 288 * address type
286 */ 289 */
287 struct { 290 struct {
288 void *acpi_handle; 291 acpi_handle acpi_handle;
289 } acpi_info; 292 } acpi_info;
293#endif
290}; 294};
291 295
292struct ipmi_smi_info { 296struct ipmi_smi_info {
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index bd349240d50e..0b1e569f5ff5 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -98,12 +98,11 @@ struct ipmi_smi_handlers {
98 operation is not allowed to fail. If an error occurs, it 98 operation is not allowed to fail. If an error occurs, it
99 should report back the error in a received message. It may 99 should report back the error in a received message. It may
100 do this in the current call context, since no write locks 100 do this in the current call context, since no write locks
101 are held when this is run. If the priority is > 0, the 101 are held when this is run. Message are delivered one at
102 message will go into a high-priority queue and be sent 102 a time by the message handler, a new message will not be
103 first. Otherwise, it goes into a normal-priority queue. */ 103 delivered until the previous message is returned. */
104 void (*sender)(void *send_info, 104 void (*sender)(void *send_info,
105 struct ipmi_smi_msg *msg, 105 struct ipmi_smi_msg *msg);
106 int priority);
107 106
108 /* Called by the upper layer to request that we try to get 107 /* Called by the upper layer to request that we try to get
109 events from the BMC we are attached to. */ 108 events from the BMC we are attached to. */
@@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
212 void *send_info, 211 void *send_info,
213 struct ipmi_device_id *device_id, 212 struct ipmi_device_id *device_id,
214 struct device *dev, 213 struct device *dev,
215 const char *sysfs_name,
216 unsigned char slave_addr); 214 unsigned char slave_addr);
217 215
218/* 216/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ff560537dd61..c694e7baa621 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -42,6 +42,7 @@ struct ipv6_devconf {
42 __s32 accept_ra_from_local; 42 __s32 accept_ra_from_local;
43#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 43#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
44 __s32 optimistic_dad; 44 __s32 optimistic_dad;
45 __s32 use_optimistic;
45#endif 46#endif
46#ifdef CONFIG_IPV6_MROUTE 47#ifdef CONFIG_IPV6_MROUTE
47 __s32 mc_forwarding; 48 __s32 mc_forwarding;
@@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
316#define tcp_twsk_ipv6only(__sk) 0 317#define tcp_twsk_ipv6only(__sk) 0
317#define inet_v6_ipv6only(__sk) 0 318#define inet_v6_ipv6only(__sk) 0
318#endif /* IS_ENABLED(CONFIG_IPV6) */ 319#endif /* IS_ENABLED(CONFIG_IPV6) */
319
320#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
321 (((__sk)->sk_portpair == (__ports)) && \
322 ((__sk)->sk_family == AF_INET6) && \
323 ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
324 ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
325 (!(__sk)->sk_bound_dev_if || \
326 ((__sk)->sk_bound_dev_if == (__dif))) && \
327 net_eq(sock_net(__sk), (__net)))
328
329#endif /* _IPV6_H */ 320#endif /* _IPV6_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 03f48d936f66..d09ec7a1243e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -15,11 +15,13 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/irqhandler.h>
18#include <linux/irqreturn.h> 19#include <linux/irqreturn.h>
19#include <linux/irqnr.h> 20#include <linux/irqnr.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/topology.h> 22#include <linux/topology.h>
22#include <linux/wait.h> 23#include <linux/wait.h>
24#include <linux/io.h>
23 25
24#include <asm/irq.h> 26#include <asm/irq.h>
25#include <asm/ptrace.h> 27#include <asm/ptrace.h>
@@ -27,11 +29,7 @@
27 29
28struct seq_file; 30struct seq_file;
29struct module; 31struct module;
30struct irq_desc; 32struct msi_msg;
31struct irq_data;
32typedef void (*irq_flow_handler_t)(unsigned int irq,
33 struct irq_desc *desc);
34typedef void (*irq_preflow_handler_t)(struct irq_data *data);
35 33
36/* 34/*
37 * IRQ line status. 35 * IRQ line status.
@@ -113,10 +111,14 @@ enum {
113 * 111 *
114 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity 112 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
115 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity 113 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
114 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
115 * support stacked irqchips, which indicates skipping
116 * all descendent irqchips.
116 */ 117 */
117enum { 118enum {
118 IRQ_SET_MASK_OK = 0, 119 IRQ_SET_MASK_OK = 0,
119 IRQ_SET_MASK_OK_NOCOPY, 120 IRQ_SET_MASK_OK_NOCOPY,
121 IRQ_SET_MASK_OK_DONE,
120}; 122};
121 123
122struct msi_desc; 124struct msi_desc;
@@ -133,6 +135,8 @@ struct irq_domain;
133 * @chip: low level interrupt hardware access 135 * @chip: low level interrupt hardware access
134 * @domain: Interrupt translation domain; responsible for mapping 136 * @domain: Interrupt translation domain; responsible for mapping
135 * between hwirq number and linux irq number. 137 * between hwirq number and linux irq number.
138 * @parent_data: pointer to parent struct irq_data to support hierarchy
139 * irq_domain
136 * @handler_data: per-IRQ data for the irq_chip methods 140 * @handler_data: per-IRQ data for the irq_chip methods
137 * @chip_data: platform-specific per-chip private data for the chip 141 * @chip_data: platform-specific per-chip private data for the chip
138 * methods, to allow shared chip implementations 142 * methods, to allow shared chip implementations
@@ -151,6 +155,9 @@ struct irq_data {
151 unsigned int state_use_accessors; 155 unsigned int state_use_accessors;
152 struct irq_chip *chip; 156 struct irq_chip *chip;
153 struct irq_domain *domain; 157 struct irq_domain *domain;
158#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
159 struct irq_data *parent_data;
160#endif
154 void *handler_data; 161 void *handler_data;
155 void *chip_data; 162 void *chip_data;
156 struct msi_desc *msi_desc; 163 struct msi_desc *msi_desc;
@@ -315,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
315 * any other callback related to this irq 322 * any other callback related to this irq
316 * @irq_release_resources: optional to release resources acquired with 323 * @irq_release_resources: optional to release resources acquired with
317 * irq_request_resources 324 * irq_request_resources
325 * @irq_compose_msi_msg: optional to compose message content for MSI
326 * @irq_write_msi_msg: optional to write message content for MSI
318 * @flags: chip specific flags 327 * @flags: chip specific flags
319 */ 328 */
320struct irq_chip { 329struct irq_chip {
@@ -351,6 +360,9 @@ struct irq_chip {
351 int (*irq_request_resources)(struct irq_data *data); 360 int (*irq_request_resources)(struct irq_data *data);
352 void (*irq_release_resources)(struct irq_data *data); 361 void (*irq_release_resources)(struct irq_data *data);
353 362
363 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
364 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
365
354 unsigned long flags; 366 unsigned long flags;
355}; 367};
356 368
@@ -438,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
438extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 450extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
439extern void handle_nested_irq(unsigned int irq); 451extern void handle_nested_irq(unsigned int irq);
440 452
453extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
454#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
455extern void irq_chip_ack_parent(struct irq_data *data);
456extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
457extern void irq_chip_mask_parent(struct irq_data *data);
458extern void irq_chip_unmask_parent(struct irq_data *data);
459extern void irq_chip_eoi_parent(struct irq_data *data);
460extern int irq_chip_set_affinity_parent(struct irq_data *data,
461 const struct cpumask *dest,
462 bool force);
463#endif
464
441/* Handling of unhandled and spurious interrupts: */ 465/* Handling of unhandled and spurious interrupts: */
442extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 466extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
443 irqreturn_t action_ret); 467 irqreturn_t action_ret);
@@ -639,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq);
639void irq_init_desc(unsigned int irq); 663void irq_init_desc(unsigned int irq);
640#endif 664#endif
641 665
642#ifndef irq_reg_writel
643# define irq_reg_writel(val, addr) writel(val, addr)
644#endif
645#ifndef irq_reg_readl
646# define irq_reg_readl(addr) readl(addr)
647#endif
648
649/** 666/**
650 * struct irq_chip_regs - register offsets for struct irq_gci 667 * struct irq_chip_regs - register offsets for struct irq_gci
651 * @enable: Enable register offset to reg_base 668 * @enable: Enable register offset to reg_base
@@ -692,6 +709,8 @@ struct irq_chip_type {
692 * struct irq_chip_generic - Generic irq chip data structure 709 * struct irq_chip_generic - Generic irq chip data structure
693 * @lock: Lock to protect register and cache data access 710 * @lock: Lock to protect register and cache data access
694 * @reg_base: Register base address (virtual) 711 * @reg_base: Register base address (virtual)
712 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
713 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
695 * @irq_base: Interrupt base nr for this chip 714 * @irq_base: Interrupt base nr for this chip
696 * @irq_cnt: Number of interrupts handled by this chip 715 * @irq_cnt: Number of interrupts handled by this chip
697 * @mask_cache: Cached mask register shared between all chip types 716 * @mask_cache: Cached mask register shared between all chip types
@@ -716,6 +735,8 @@ struct irq_chip_type {
716struct irq_chip_generic { 735struct irq_chip_generic {
717 raw_spinlock_t lock; 736 raw_spinlock_t lock;
718 void __iomem *reg_base; 737 void __iomem *reg_base;
738 u32 (*reg_readl)(void __iomem *addr);
739 void (*reg_writel)(u32 val, void __iomem *addr);
719 unsigned int irq_base; 740 unsigned int irq_base;
720 unsigned int irq_cnt; 741 unsigned int irq_cnt;
721 u32 mask_cache; 742 u32 mask_cache;
@@ -740,12 +761,14 @@ struct irq_chip_generic {
740 * the parent irq. Usually GPIO implementations 761 * the parent irq. Usually GPIO implementations
741 * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private 762 * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
742 * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask 763 * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask
764 * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE)
743 */ 765 */
744enum irq_gc_flags { 766enum irq_gc_flags {
745 IRQ_GC_INIT_MASK_CACHE = 1 << 0, 767 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
746 IRQ_GC_INIT_NESTED_LOCK = 1 << 1, 768 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
747 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, 769 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
748 IRQ_GC_NO_MASK = 1 << 3, 770 IRQ_GC_NO_MASK = 1 << 3,
771 IRQ_GC_BE_IO = 1 << 4,
749}; 772};
750 773
751/* 774/*
@@ -821,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
821static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } 844static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
822#endif 845#endif
823 846
847static inline void irq_reg_writel(struct irq_chip_generic *gc,
848 u32 val, int reg_offset)
849{
850 if (gc->reg_writel)
851 gc->reg_writel(val, gc->reg_base + reg_offset);
852 else
853 writel(val, gc->reg_base + reg_offset);
854}
855
856static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
857 int reg_offset)
858{
859 if (gc->reg_readl)
860 return gc->reg_readl(gc->reg_base + reg_offset);
861 else
862 return readl(gc->reg_base + reg_offset);
863}
864
824#endif /* _LINUX_IRQ_H */ 865#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
new file mode 100644
index 000000000000..420f77b34d02
--- /dev/null
+++ b/include/linux/irqchip/mips-gic.h
@@ -0,0 +1,249 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 07 MIPS Technologies, Inc.
7 */
8#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
9#define __LINUX_IRQCHIP_MIPS_GIC_H
10
11#include <linux/clocksource.h>
12
13#define GIC_MAX_INTRS 256
14
15/* Constants */
16#define GIC_POL_POS 1
17#define GIC_POL_NEG 0
18#define GIC_TRIG_EDGE 1
19#define GIC_TRIG_LEVEL 0
20#define GIC_TRIG_DUAL_ENABLE 1
21#define GIC_TRIG_DUAL_DISABLE 0
22
23#define MSK(n) ((1 << (n)) - 1)
24
25/* Accessors */
26#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
27
28/* GIC Address Space */
29#define SHARED_SECTION_OFS 0x0000
30#define SHARED_SECTION_SIZE 0x8000
31#define VPE_LOCAL_SECTION_OFS 0x8000
32#define VPE_LOCAL_SECTION_SIZE 0x4000
33#define VPE_OTHER_SECTION_OFS 0xc000
34#define VPE_OTHER_SECTION_SIZE 0x4000
35#define USM_VISIBLE_SECTION_OFS 0x10000
36#define USM_VISIBLE_SECTION_SIZE 0x10000
37
38/* Register Map for Shared Section */
39
40#define GIC_SH_CONFIG_OFS 0x0000
41
42/* Shared Global Counter */
43#define GIC_SH_COUNTER_31_00_OFS 0x0010
44#define GIC_SH_COUNTER_63_32_OFS 0x0014
45#define GIC_SH_REVISIONID_OFS 0x0020
46
47/* Convert an interrupt number to a byte offset/bit for multi-word registers */
48#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
49#define GIC_INTR_BIT(intr) ((intr) % 32)
50
51/* Polarity : Reset Value is always 0 */
52#define GIC_SH_SET_POLARITY_OFS 0x0100
53
54/* Triggering : Reset Value is always 0 */
55#define GIC_SH_SET_TRIGGER_OFS 0x0180
56
57/* Dual edge triggering : Reset Value is always 0 */
58#define GIC_SH_SET_DUAL_OFS 0x0200
59
60/* Set/Clear corresponding bit in Edge Detect Register */
61#define GIC_SH_WEDGE_OFS 0x0280
62
63/* Mask manipulation */
64#define GIC_SH_RMASK_OFS 0x0300
65#define GIC_SH_SMASK_OFS 0x0380
66
67/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
68#define GIC_SH_MASK_OFS 0x0400
69
70/* Pending Global Interrupts (RO) */
71#define GIC_SH_PEND_OFS 0x0480
72
73/* Maps Interrupt X to a Pin */
74#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
75#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
76
77/* Maps Interrupt X to a VPE */
78#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
79#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
80 ((32 * (intr)) + (((vpe) / 32) * 4))
81#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
82
83/* Register Map for Local Section */
84#define GIC_VPE_CTL_OFS 0x0000
85#define GIC_VPE_PEND_OFS 0x0004
86#define GIC_VPE_MASK_OFS 0x0008
87#define GIC_VPE_RMASK_OFS 0x000c
88#define GIC_VPE_SMASK_OFS 0x0010
89#define GIC_VPE_WD_MAP_OFS 0x0040
90#define GIC_VPE_COMPARE_MAP_OFS 0x0044
91#define GIC_VPE_TIMER_MAP_OFS 0x0048
92#define GIC_VPE_FDC_MAP_OFS 0x004c
93#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
94#define GIC_VPE_SWINT0_MAP_OFS 0x0054
95#define GIC_VPE_SWINT1_MAP_OFS 0x0058
96#define GIC_VPE_OTHER_ADDR_OFS 0x0080
97#define GIC_VPE_WD_CONFIG0_OFS 0x0090
98#define GIC_VPE_WD_COUNT0_OFS 0x0094
99#define GIC_VPE_WD_INITIAL0_OFS 0x0098
100#define GIC_VPE_COMPARE_LO_OFS 0x00a0
101#define GIC_VPE_COMPARE_HI_OFS 0x00a4
102
103#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
104#define GIC_VPE_EIC_SS(intr) (4 * (intr))
105
106#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
107#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
108
109#define GIC_VPE_TENABLE_NMI_OFS 0x1000
110#define GIC_VPE_TENABLE_YQ_OFS 0x1004
111#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
112#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
113
114/* User Mode Visible Section Register Map */
115#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
116#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
117
118/* Masks */
119#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
120#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
121
122#define GIC_SH_CONFIG_COUNTBITS_SHF 24
123#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
124
125#define GIC_SH_CONFIG_NUMINTRS_SHF 16
126#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
127
128#define GIC_SH_CONFIG_NUMVPES_SHF 0
129#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
130
131#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
132#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
133
134#define GIC_MAP_TO_PIN_SHF 31
135#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
136#define GIC_MAP_TO_NMI_SHF 30
137#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
138#define GIC_MAP_TO_YQ_SHF 29
139#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
140#define GIC_MAP_SHF 0
141#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
142
143/* GIC_VPE_CTL Masks */
144#define GIC_VPE_CTL_FDC_RTBL_SHF 4
145#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
146#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
147#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
148#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
149#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
150#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
151#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
152#define GIC_VPE_CTL_EIC_MODE_SHF 0
153#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
154
155/* GIC_VPE_PEND Masks */
156#define GIC_VPE_PEND_WD_SHF 0
157#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
158#define GIC_VPE_PEND_CMP_SHF 1
159#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
160#define GIC_VPE_PEND_TIMER_SHF 2
161#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
162#define GIC_VPE_PEND_PERFCOUNT_SHF 3
163#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
164#define GIC_VPE_PEND_SWINT0_SHF 4
165#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
166#define GIC_VPE_PEND_SWINT1_SHF 5
167#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
168
169/* GIC_VPE_RMASK Masks */
170#define GIC_VPE_RMASK_WD_SHF 0
171#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
172#define GIC_VPE_RMASK_CMP_SHF 1
173#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
174#define GIC_VPE_RMASK_TIMER_SHF 2
175#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
176#define GIC_VPE_RMASK_PERFCNT_SHF 3
177#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
178#define GIC_VPE_RMASK_SWINT0_SHF 4
179#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
180#define GIC_VPE_RMASK_SWINT1_SHF 5
181#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
182
183/* GIC_VPE_SMASK Masks */
184#define GIC_VPE_SMASK_WD_SHF 0
185#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
186#define GIC_VPE_SMASK_CMP_SHF 1
187#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
188#define GIC_VPE_SMASK_TIMER_SHF 2
189#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
190#define GIC_VPE_SMASK_PERFCNT_SHF 3
191#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
192#define GIC_VPE_SMASK_SWINT0_SHF 4
193#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
194#define GIC_VPE_SMASK_SWINT1_SHF 5
195#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
196
197/* GIC nomenclature for Core Interrupt Pins. */
198#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
199#define GIC_CPU_INT1 1 /* . */
200#define GIC_CPU_INT2 2 /* . */
201#define GIC_CPU_INT3 3 /* . */
202#define GIC_CPU_INT4 4 /* . */
203#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
204
205/* Add 2 to convert GIC CPU pin to core interrupt */
206#define GIC_CPU_PIN_OFFSET 2
207
208/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
209#define GIC_CPU_TO_VEC_OFFSET 2
210
211/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
212#define GIC_PIN_TO_VEC_OFFSET 1
213
214/* Local GIC interrupts. */
215#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
216#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
217#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
218#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
219#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
220#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
221#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
222#define GIC_NUM_LOCAL_INTRS 7
223
224/* Convert between local/shared IRQ number and GIC HW IRQ number. */
225#define GIC_LOCAL_HWIRQ_BASE 0
226#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
227#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
228#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
229#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
230#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
231
232extern unsigned int gic_present;
233
234extern void gic_init(unsigned long gic_base_addr,
235 unsigned long gic_addrspace_size, unsigned int cpu_vec,
236 unsigned int irqbase);
237extern void gic_clocksource_init(unsigned int);
238extern cycle_t gic_read_count(void);
239extern unsigned int gic_get_count_width(void);
240extern cycle_t gic_read_compare(void);
241extern void gic_write_compare(cycle_t cnt);
242extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
243extern void gic_send_ipi(unsigned int intr);
244extern unsigned int plat_ipi_call_int_xlate(unsigned int);
245extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
246extern unsigned int gic_get_timer_pending(void);
247extern int gic_get_c0_compare_int(void);
248extern int gic_get_c0_perfcount_int(void);
249#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b0f9d16e48f6..676d7306a360 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -33,11 +33,14 @@
33#define _LINUX_IRQDOMAIN_H 33#define _LINUX_IRQDOMAIN_H
34 34
35#include <linux/types.h> 35#include <linux/types.h>
36#include <linux/irqhandler.h>
36#include <linux/radix-tree.h> 37#include <linux/radix-tree.h>
37 38
38struct device_node; 39struct device_node;
39struct irq_domain; 40struct irq_domain;
40struct of_device_id; 41struct of_device_id;
42struct irq_chip;
43struct irq_data;
41 44
42/* Number of irqs reserved for a legacy isa controller */ 45/* Number of irqs reserved for a legacy isa controller */
43#define NUM_ISA_INTERRUPTS 16 46#define NUM_ISA_INTERRUPTS 16
@@ -64,6 +67,16 @@ struct irq_domain_ops {
64 int (*xlate)(struct irq_domain *d, struct device_node *node, 67 int (*xlate)(struct irq_domain *d, struct device_node *node,
65 const u32 *intspec, unsigned int intsize, 68 const u32 *intspec, unsigned int intsize,
66 unsigned long *out_hwirq, unsigned int *out_type); 69 unsigned long *out_hwirq, unsigned int *out_type);
70
71#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
72 /* extended V2 interfaces to support hierarchy irq_domains */
73 int (*alloc)(struct irq_domain *d, unsigned int virq,
74 unsigned int nr_irqs, void *arg);
75 void (*free)(struct irq_domain *d, unsigned int virq,
76 unsigned int nr_irqs);
77 void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
78 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
79#endif
67}; 80};
68 81
69extern struct irq_domain_ops irq_generic_chip_ops; 82extern struct irq_domain_ops irq_generic_chip_ops;
@@ -77,6 +90,7 @@ struct irq_domain_chip_generic;
77 * @ops: pointer to irq_domain methods 90 * @ops: pointer to irq_domain methods
78 * @host_data: private data pointer for use by owner. Not touched by irq_domain 91 * @host_data: private data pointer for use by owner. Not touched by irq_domain
79 * core code. 92 * core code.
93 * @flags: host per irq_domain flags
80 * 94 *
81 * Optional elements 95 * Optional elements
82 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used 96 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
@@ -84,6 +98,7 @@ struct irq_domain_chip_generic;
84 * @gc: Pointer to a list of generic chips. There is a helper function for 98 * @gc: Pointer to a list of generic chips. There is a helper function for
85 * setting up one or more generic chips for interrupt controllers 99 * setting up one or more generic chips for interrupt controllers
86 * drivers using the generic chip library which uses this pointer. 100 * drivers using the generic chip library which uses this pointer.
101 * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
87 * 102 *
88 * Revmap data, used internally by irq_domain 103 * Revmap data, used internally by irq_domain
89 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that 104 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
@@ -97,10 +112,14 @@ struct irq_domain {
97 const char *name; 112 const char *name;
98 const struct irq_domain_ops *ops; 113 const struct irq_domain_ops *ops;
99 void *host_data; 114 void *host_data;
115 unsigned int flags;
100 116
101 /* Optional data */ 117 /* Optional data */
102 struct device_node *of_node; 118 struct device_node *of_node;
103 struct irq_domain_chip_generic *gc; 119 struct irq_domain_chip_generic *gc;
120#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
121 struct irq_domain *parent;
122#endif
104 123
105 /* reverse map data. The linear map gets appended to the irq_domain */ 124 /* reverse map data. The linear map gets appended to the irq_domain */
106 irq_hw_number_t hwirq_max; 125 irq_hw_number_t hwirq_max;
@@ -110,6 +129,22 @@ struct irq_domain {
110 unsigned int linear_revmap[]; 129 unsigned int linear_revmap[];
111}; 130};
112 131
132/* Irq domain flags */
133enum {
134 /* Irq domain is hierarchical */
135 IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
136
137 /* Core calls alloc/free recursive through the domain hierarchy. */
138 IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
139
140 /*
141 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
142 * for implementation specific purposes and ignored by the
143 * core code.
144 */
145 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
146};
147
113#ifdef CONFIG_IRQ_DOMAIN 148#ifdef CONFIG_IRQ_DOMAIN
114struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 149struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
115 irq_hw_number_t hwirq_max, int direct_max, 150 irq_hw_number_t hwirq_max, int direct_max,
@@ -220,8 +255,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
220 const u32 *intspec, unsigned int intsize, 255 const u32 *intspec, unsigned int intsize,
221 irq_hw_number_t *out_hwirq, unsigned int *out_type); 256 irq_hw_number_t *out_hwirq, unsigned int *out_type);
222 257
258/* V2 interfaces to support hierarchy IRQ domains. */
259extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
260 unsigned int virq);
261#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
262extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
263 unsigned int flags, unsigned int size,
264 struct device_node *node,
265 const struct irq_domain_ops *ops, void *host_data);
266extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
267 unsigned int nr_irqs, int node, void *arg,
268 bool realloc);
269extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
270extern void irq_domain_activate_irq(struct irq_data *irq_data);
271extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
272
273static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
274 unsigned int nr_irqs, int node, void *arg)
275{
276 return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
277}
278
279extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
280 unsigned int virq,
281 irq_hw_number_t hwirq,
282 struct irq_chip *chip,
283 void *chip_data);
284extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
285 irq_hw_number_t hwirq, struct irq_chip *chip,
286 void *chip_data, irq_flow_handler_t handler,
287 void *handler_data, const char *handler_name);
288extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
289extern void irq_domain_free_irqs_common(struct irq_domain *domain,
290 unsigned int virq,
291 unsigned int nr_irqs);
292extern void irq_domain_free_irqs_top(struct irq_domain *domain,
293 unsigned int virq, unsigned int nr_irqs);
294
295extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
296 unsigned int irq_base,
297 unsigned int nr_irqs, void *arg);
298
299extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
300 unsigned int irq_base,
301 unsigned int nr_irqs);
302
303static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
304{
305 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
306}
307#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
308static inline void irq_domain_activate_irq(struct irq_data *data) { }
309static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
310static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
311 unsigned int nr_irqs, int node, void *arg)
312{
313 return -1;
314}
315
316static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
317{
318 return false;
319}
320#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
321
223#else /* CONFIG_IRQ_DOMAIN */ 322#else /* CONFIG_IRQ_DOMAIN */
224static inline void irq_dispose_mapping(unsigned int virq) { } 323static inline void irq_dispose_mapping(unsigned int virq) { }
324static inline void irq_domain_activate_irq(struct irq_data *data) { }
325static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
225#endif /* !CONFIG_IRQ_DOMAIN */ 326#endif /* !CONFIG_IRQ_DOMAIN */
226 327
227#endif /* _LINUX_IRQDOMAIN_H */ 328#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
new file mode 100644
index 000000000000..62d543004197
--- /dev/null
+++ b/include/linux/irqhandler.h
@@ -0,0 +1,14 @@
1#ifndef _LINUX_IRQHANDLER_H
2#define _LINUX_IRQHANDLER_H
3
4/*
5 * Interrupt flow handler typedefs are defined here to avoid circular
6 * include dependencies.
7 */
8
9struct irq_desc;
10struct irq_data;
11typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
12typedef void (*irq_preflow_handler_t)(struct irq_data *data);
13
14#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0dae71e9971c..704b9a599b26 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1042,7 +1042,7 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1042extern void jbd2_journal_commit_transaction(journal_t *); 1042extern void jbd2_journal_commit_transaction(journal_t *);
1043 1043
1044/* Checkpoint list management */ 1044/* Checkpoint list management */
1045int __jbd2_journal_clean_checkpoint_list(journal_t *journal); 1045void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
1046int __jbd2_journal_remove_checkpoint(struct journal_head *); 1046int __jbd2_journal_remove_checkpoint(struct journal_head *);
1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); 1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
1048 1048
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index 866caaa9e2bb..c2ce155d83cc 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -22,4 +22,17 @@
22 */ 22 */
23#define KERN_CONT "" 23#define KERN_CONT ""
24 24
25/* integer equivalents of KERN_<LEVEL> */
26#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code
27 * are set to this special level */
28#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */
29#define LOGLEVEL_EMERG 0 /* system is unusable */
30#define LOGLEVEL_ALERT 1 /* action must be taken immediately */
31#define LOGLEVEL_CRIT 2 /* critical conditions */
32#define LOGLEVEL_ERR 3 /* error conditions */
33#define LOGLEVEL_WARNING 4 /* warning conditions */
34#define LOGLEVEL_NOTICE 5 /* normal but significant condition */
35#define LOGLEVEL_INFO 6 /* informational */
36#define LOGLEVEL_DEBUG 7 /* debug-level messages */
37
25#endif 38#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 40728cf1c452..233ea8107038 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -162,6 +162,7 @@ extern int _cond_resched(void);
162#endif 162#endif
163 163
164#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 164#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
165 void ___might_sleep(const char *file, int line, int preempt_offset);
165 void __might_sleep(const char *file, int line, int preempt_offset); 166 void __might_sleep(const char *file, int line, int preempt_offset);
166/** 167/**
167 * might_sleep - annotation for functions that can sleep 168 * might_sleep - annotation for functions that can sleep
@@ -175,10 +176,14 @@ extern int _cond_resched(void);
175 */ 176 */
176# define might_sleep() \ 177# define might_sleep() \
177 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) 178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
179# define sched_annotate_sleep() __set_current_state(TASK_RUNNING)
178#else 180#else
181 static inline void ___might_sleep(const char *file, int line,
182 int preempt_offset) { }
179 static inline void __might_sleep(const char *file, int line, 183 static inline void __might_sleep(const char *file, int line,
180 int preempt_offset) { } 184 int preempt_offset) { }
181# define might_sleep() do { might_resched(); } while (0) 185# define might_sleep() do { might_resched(); } while (0)
186# define sched_annotate_sleep() do { } while (0)
182#endif 187#endif
183 188
184#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) 189#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
@@ -403,6 +408,7 @@ int vsscanf(const char *, const char *, va_list);
403extern int get_option(char **str, int *pint); 408extern int get_option(char **str, int *pint);
404extern char *get_options(const char *str, int nints, int *ints); 409extern char *get_options(const char *str, int nints, int *ints);
405extern unsigned long long memparse(const char *ptr, char **retptr); 410extern unsigned long long memparse(const char *ptr, char **retptr);
411extern bool parse_option_str(const char *str, const char *option);
406 412
407extern int core_kernel_text(unsigned long addr); 413extern int core_kernel_text(unsigned long addr);
408extern int core_kernel_data(unsigned long addr); 414extern int core_kernel_data(unsigned long addr);
@@ -421,6 +427,7 @@ extern int panic_timeout;
421extern int panic_on_oops; 427extern int panic_on_oops;
422extern int panic_on_unrecovered_nmi; 428extern int panic_on_unrecovered_nmi;
423extern int panic_on_io_nmi; 429extern int panic_on_io_nmi;
430extern int panic_on_warn;
424extern int sysctl_panic_on_stackoverflow; 431extern int sysctl_panic_on_stackoverflow;
425/* 432/*
426 * Only to be used by arch init code. If the user over-wrote the default 433 * Only to be used by arch init code. If the user over-wrote the default
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 8422b4ed6882..b9376cd5a187 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
77 return kstat_cpu(cpu).irqs_sum; 77 return kstat_cpu(cpu).irqs_sum;
78} 78}
79 79
80/*
81 * Lock/unlock the current runqueue - to extract task statistics:
82 */
83extern unsigned long long task_delta_exec(struct task_struct *);
84
85extern void account_user_time(struct task_struct *, cputime_t, cputime_t); 80extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
86extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); 81extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
87extern void account_steal_time(cputime_t); 82extern void account_steal_time(cputime_t);
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6b06d378f3df..e465bb15912d 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -283,7 +283,7 @@ struct kgdb_io {
283 283
284extern struct kgdb_arch arch_kgdb_ops; 284extern struct kgdb_arch arch_kgdb_ops;
285 285
286extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); 286extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
287 287
288#ifdef CONFIG_SERIAL_KGDB_NMI 288#ifdef CONFIG_SERIAL_KGDB_NMI
289extern int kgdb_register_nmi_console(void); 289extern int kgdb_register_nmi_console(void);
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 6b394f0b5148..eeb307985715 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -6,7 +6,8 @@
6#ifdef CONFIG_TRANSPARENT_HUGEPAGE 6#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7extern int __khugepaged_enter(struct mm_struct *mm); 7extern int __khugepaged_enter(struct mm_struct *mm);
8extern void __khugepaged_exit(struct mm_struct *mm); 8extern void __khugepaged_exit(struct mm_struct *mm);
9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); 9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
10 unsigned long vm_flags);
10 11
11#define khugepaged_enabled() \ 12#define khugepaged_enabled() \
12 (transparent_hugepage_flags & \ 13 (transparent_hugepage_flags & \
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
35 __khugepaged_exit(mm); 36 __khugepaged_exit(mm);
36} 37}
37 38
38static inline int khugepaged_enter(struct vm_area_struct *vma) 39static inline int khugepaged_enter(struct vm_area_struct *vma,
40 unsigned long vm_flags)
39{ 41{
40 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) 42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
41 if ((khugepaged_always() || 43 if ((khugepaged_always() ||
42 (khugepaged_req_madv() && 44 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
43 vma->vm_flags & VM_HUGEPAGE)) && 45 !(vm_flags & VM_NOHUGEPAGE))
44 !(vma->vm_flags & VM_NOHUGEPAGE))
45 if (__khugepaged_enter(vma->vm_mm)) 46 if (__khugepaged_enter(vma->vm_mm))
46 return -ENOMEM; 47 return -ENOMEM;
47 return 0; 48 return 0;
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
54static inline void khugepaged_exit(struct mm_struct *mm) 55static inline void khugepaged_exit(struct mm_struct *mm)
55{ 56{
56} 57}
57static inline int khugepaged_enter(struct vm_area_struct *vma) 58static inline int khugepaged_enter(struct vm_area_struct *vma,
59 unsigned long vm_flags)
58{ 60{
59 return 0; 61 return 0;
60} 62}
61static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 63static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
64 unsigned long vm_flags)
62{ 65{
63 return 0; 66 return 0;
64} 67}
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 057e95971014..e705467ddb47 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -21,6 +21,8 @@
21#ifndef __KMEMLEAK_H 21#ifndef __KMEMLEAK_H
22#define __KMEMLEAK_H 22#define __KMEMLEAK_H
23 23
24#include <linux/slab.h>
25
24#ifdef CONFIG_DEBUG_KMEMLEAK 26#ifdef CONFIG_DEBUG_KMEMLEAK
25 27
26extern void kmemleak_init(void) __ref; 28extern void kmemleak_init(void) __ref;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index f7296e57d614..5297f9fa0ef2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
335extern int arch_prepare_kprobe_ftrace(struct kprobe *p); 335extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
336#endif 336#endif
337 337
338int arch_check_ftrace_location(struct kprobe *p);
338 339
339/* Get the kprobe at this addr (if any) - called with preemption disabled */ 340/* Get the kprobe at this addr (if any) - called with preemption disabled */
340struct kprobe *get_kprobe(void *addr); 341struct kprobe *get_kprobe(void *addr);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 28be31f49250..a6059bdf7b03 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
704void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 704void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
705 705
706bool kvm_is_mmio_pfn(pfn_t pfn); 706bool kvm_is_reserved_pfn(pfn_t pfn);
707 707
708struct kvm_irq_ack_notifier { 708struct kvm_irq_ack_notifier {
709 struct hlist_node link; 709 struct hlist_node link;
@@ -1080,6 +1080,7 @@ void kvm_device_get(struct kvm_device *dev);
1080void kvm_device_put(struct kvm_device *dev); 1080void kvm_device_put(struct kvm_device *dev);
1081struct kvm_device *kvm_device_from_filp(struct file *filp); 1081struct kvm_device *kvm_device_from_filp(struct file *filp);
1082int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); 1082int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1083void kvm_unregister_device_ops(u32 type);
1083 1084
1084extern struct kvm_device_ops kvm_mpic_ops; 1085extern struct kvm_device_ops kvm_mpic_ops;
1085extern struct kvm_device_ops kvm_xics_ops; 1086extern struct kvm_device_ops kvm_xics_ops;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index e43686472197..361101fef270 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,8 +13,8 @@
13#define __LINUX_LEDS_H_INCLUDED 13#define __LINUX_LEDS_H_INCLUDED
14 14
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/rwsem.h> 16#include <linux/rwsem.h>
17#include <linux/spinlock.h>
18#include <linux/timer.h> 18#include <linux/timer.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20 20
@@ -31,8 +31,8 @@ enum led_brightness {
31 31
32struct led_classdev { 32struct led_classdev {
33 const char *name; 33 const char *name;
34 int brightness; 34 enum led_brightness brightness;
35 int max_brightness; 35 enum led_brightness max_brightness;
36 int flags; 36 int flags;
37 37
38 /* Lower 16 bits reflect status */ 38 /* Lower 16 bits reflect status */
@@ -140,6 +140,16 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
140 */ 140 */
141extern void led_set_brightness(struct led_classdev *led_cdev, 141extern void led_set_brightness(struct led_classdev *led_cdev,
142 enum led_brightness brightness); 142 enum led_brightness brightness);
143/**
144 * led_update_brightness - update LED brightness
145 * @led_cdev: the LED to query
146 *
147 * Get an LED's current brightness and update led_cdev->brightness
148 * member with the obtained value.
149 *
150 * Returns: 0 on success or negative error value on failure
151 */
152extern int led_update_brightness(struct led_classdev *led_cdev);
143 153
144/* 154/*
145 * LED Triggers 155 * LED Triggers
@@ -251,6 +261,7 @@ struct gpio_led {
251 unsigned retain_state_suspended : 1; 261 unsigned retain_state_suspended : 1;
252 unsigned default_state : 2; 262 unsigned default_state : 2;
253 /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ 263 /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
264 struct gpio_desc *gpiod;
254}; 265};
255#define LEDS_GPIO_DEFSTATE_OFF 0 266#define LEDS_GPIO_DEFSTATE_OFF 0
256#define LEDS_GPIO_DEFSTATE_ON 1 267#define LEDS_GPIO_DEFSTATE_ON 1
@@ -263,7 +274,7 @@ struct gpio_led_platform_data {
263#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ 274#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
264#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ 275#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
265#define GPIO_LED_BLINK 2 /* Please, blink */ 276#define GPIO_LED_BLINK 2 /* Please, blink */
266 int (*gpio_blink_set)(unsigned gpio, int state, 277 int (*gpio_blink_set)(struct gpio_desc *desc, int state,
267 unsigned long *delay_on, 278 unsigned long *delay_on,
268 unsigned long *delay_off); 279 unsigned long *delay_off);
269}; 280};
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bd5fefeaf548..2d182413b1db 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -191,7 +191,8 @@ enum {
191 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ 191 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
192 ATA_DEV_SEMB = 7, /* SEMB */ 192 ATA_DEV_SEMB = 7, /* SEMB */
193 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ 193 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
194 ATA_DEV_NONE = 9, /* no device */ 194 ATA_DEV_ZAC = 9, /* ZAC device */
195 ATA_DEV_NONE = 10, /* no device */
195 196
196 /* struct ata_link flags */ 197 /* struct ata_link flags */
197 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ 198 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
@@ -1191,9 +1192,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
1191extern int ata_scsi_slave_config(struct scsi_device *sdev); 1192extern int ata_scsi_slave_config(struct scsi_device *sdev);
1192extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 1193extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
1193extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 1194extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
1194 int queue_depth, int reason); 1195 int queue_depth);
1195extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, 1196extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1196 int queue_depth, int reason); 1197 int queue_depth);
1197extern struct ata_device *ata_dev_pair(struct ata_device *adev); 1198extern struct ata_device *ata_dev_pair(struct ata_device *adev);
1198extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); 1199extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
1199extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); 1200extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
1491static inline unsigned int ata_class_enabled(unsigned int class) 1492static inline unsigned int ata_class_enabled(unsigned int class)
1492{ 1493{
1493 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || 1494 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
1494 class == ATA_DEV_PMP || class == ATA_DEV_SEMB; 1495 class == ATA_DEV_PMP || class == ATA_DEV_SEMB ||
1496 class == ATA_DEV_ZAC;
1495} 1497}
1496 1498
1497static inline unsigned int ata_class_disabled(unsigned int class) 1499static inline unsigned int ata_class_disabled(unsigned int class)
diff --git a/include/linux/list.h b/include/linux/list.h
index f33f831eb3c8..feb773c76ee0 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list,
346 * list_entry - get the struct for this entry 346 * list_entry - get the struct for this entry
347 * @ptr: the &struct list_head pointer. 347 * @ptr: the &struct list_head pointer.
348 * @type: the type of the struct this is embedded in. 348 * @type: the type of the struct this is embedded in.
349 * @member: the name of the list_struct within the struct. 349 * @member: the name of the list_head within the struct.
350 */ 350 */
351#define list_entry(ptr, type, member) \ 351#define list_entry(ptr, type, member) \
352 container_of(ptr, type, member) 352 container_of(ptr, type, member)
@@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list,
355 * list_first_entry - get the first element from a list 355 * list_first_entry - get the first element from a list
356 * @ptr: the list head to take the element from. 356 * @ptr: the list head to take the element from.
357 * @type: the type of the struct this is embedded in. 357 * @type: the type of the struct this is embedded in.
358 * @member: the name of the list_struct within the struct. 358 * @member: the name of the list_head within the struct.
359 * 359 *
360 * Note, that list is expected to be not empty. 360 * Note, that list is expected to be not empty.
361 */ 361 */
@@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list,
366 * list_last_entry - get the last element from a list 366 * list_last_entry - get the last element from a list
367 * @ptr: the list head to take the element from. 367 * @ptr: the list head to take the element from.
368 * @type: the type of the struct this is embedded in. 368 * @type: the type of the struct this is embedded in.
369 * @member: the name of the list_struct within the struct. 369 * @member: the name of the list_head within the struct.
370 * 370 *
371 * Note, that list is expected to be not empty. 371 * Note, that list is expected to be not empty.
372 */ 372 */
@@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list,
377 * list_first_entry_or_null - get the first element from a list 377 * list_first_entry_or_null - get the first element from a list
378 * @ptr: the list head to take the element from. 378 * @ptr: the list head to take the element from.
379 * @type: the type of the struct this is embedded in. 379 * @type: the type of the struct this is embedded in.
380 * @member: the name of the list_struct within the struct. 380 * @member: the name of the list_head within the struct.
381 * 381 *
382 * Note that if the list is empty, it returns NULL. 382 * Note that if the list is empty, it returns NULL.
383 */ 383 */
@@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list,
387/** 387/**
388 * list_next_entry - get the next element in list 388 * list_next_entry - get the next element in list
389 * @pos: the type * to cursor 389 * @pos: the type * to cursor
390 * @member: the name of the list_struct within the struct. 390 * @member: the name of the list_head within the struct.
391 */ 391 */
392#define list_next_entry(pos, member) \ 392#define list_next_entry(pos, member) \
393 list_entry((pos)->member.next, typeof(*(pos)), member) 393 list_entry((pos)->member.next, typeof(*(pos)), member)
@@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list,
395/** 395/**
396 * list_prev_entry - get the prev element in list 396 * list_prev_entry - get the prev element in list
397 * @pos: the type * to cursor 397 * @pos: the type * to cursor
398 * @member: the name of the list_struct within the struct. 398 * @member: the name of the list_head within the struct.
399 */ 399 */
400#define list_prev_entry(pos, member) \ 400#define list_prev_entry(pos, member) \
401 list_entry((pos)->member.prev, typeof(*(pos)), member) 401 list_entry((pos)->member.prev, typeof(*(pos)), member)
@@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list,
441 * list_for_each_entry - iterate over list of given type 441 * list_for_each_entry - iterate over list of given type
442 * @pos: the type * to use as a loop cursor. 442 * @pos: the type * to use as a loop cursor.
443 * @head: the head for your list. 443 * @head: the head for your list.
444 * @member: the name of the list_struct within the struct. 444 * @member: the name of the list_head within the struct.
445 */ 445 */
446#define list_for_each_entry(pos, head, member) \ 446#define list_for_each_entry(pos, head, member) \
447 for (pos = list_first_entry(head, typeof(*pos), member); \ 447 for (pos = list_first_entry(head, typeof(*pos), member); \
@@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list,
452 * list_for_each_entry_reverse - iterate backwards over list of given type. 452 * list_for_each_entry_reverse - iterate backwards over list of given type.
453 * @pos: the type * to use as a loop cursor. 453 * @pos: the type * to use as a loop cursor.
454 * @head: the head for your list. 454 * @head: the head for your list.
455 * @member: the name of the list_struct within the struct. 455 * @member: the name of the list_head within the struct.
456 */ 456 */
457#define list_for_each_entry_reverse(pos, head, member) \ 457#define list_for_each_entry_reverse(pos, head, member) \
458 for (pos = list_last_entry(head, typeof(*pos), member); \ 458 for (pos = list_last_entry(head, typeof(*pos), member); \
@@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list,
463 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() 463 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
464 * @pos: the type * to use as a start point 464 * @pos: the type * to use as a start point
465 * @head: the head of the list 465 * @head: the head of the list
466 * @member: the name of the list_struct within the struct. 466 * @member: the name of the list_head within the struct.
467 * 467 *
468 * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). 468 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
469 */ 469 */
@@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list,
474 * list_for_each_entry_continue - continue iteration over list of given type 474 * list_for_each_entry_continue - continue iteration over list of given type
475 * @pos: the type * to use as a loop cursor. 475 * @pos: the type * to use as a loop cursor.
476 * @head: the head for your list. 476 * @head: the head for your list.
477 * @member: the name of the list_struct within the struct. 477 * @member: the name of the list_head within the struct.
478 * 478 *
479 * Continue to iterate over list of given type, continuing after 479 * Continue to iterate over list of given type, continuing after
480 * the current position. 480 * the current position.
@@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list,
488 * list_for_each_entry_continue_reverse - iterate backwards from the given point 488 * list_for_each_entry_continue_reverse - iterate backwards from the given point
489 * @pos: the type * to use as a loop cursor. 489 * @pos: the type * to use as a loop cursor.
490 * @head: the head for your list. 490 * @head: the head for your list.
491 * @member: the name of the list_struct within the struct. 491 * @member: the name of the list_head within the struct.
492 * 492 *
493 * Start to iterate over list of given type backwards, continuing after 493 * Start to iterate over list of given type backwards, continuing after
494 * the current position. 494 * the current position.
@@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list,
502 * list_for_each_entry_from - iterate over list of given type from the current point 502 * list_for_each_entry_from - iterate over list of given type from the current point
503 * @pos: the type * to use as a loop cursor. 503 * @pos: the type * to use as a loop cursor.
504 * @head: the head for your list. 504 * @head: the head for your list.
505 * @member: the name of the list_struct within the struct. 505 * @member: the name of the list_head within the struct.
506 * 506 *
507 * Iterate over list of given type, continuing from current position. 507 * Iterate over list of given type, continuing from current position.
508 */ 508 */
@@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list,
515 * @pos: the type * to use as a loop cursor. 515 * @pos: the type * to use as a loop cursor.
516 * @n: another type * to use as temporary storage 516 * @n: another type * to use as temporary storage
517 * @head: the head for your list. 517 * @head: the head for your list.
518 * @member: the name of the list_struct within the struct. 518 * @member: the name of the list_head within the struct.
519 */ 519 */
520#define list_for_each_entry_safe(pos, n, head, member) \ 520#define list_for_each_entry_safe(pos, n, head, member) \
521 for (pos = list_first_entry(head, typeof(*pos), member), \ 521 for (pos = list_first_entry(head, typeof(*pos), member), \
@@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list,
528 * @pos: the type * to use as a loop cursor. 528 * @pos: the type * to use as a loop cursor.
529 * @n: another type * to use as temporary storage 529 * @n: another type * to use as temporary storage
530 * @head: the head for your list. 530 * @head: the head for your list.
531 * @member: the name of the list_struct within the struct. 531 * @member: the name of the list_head within the struct.
532 * 532 *
533 * Iterate over list of given type, continuing after current point, 533 * Iterate over list of given type, continuing after current point,
534 * safe against removal of list entry. 534 * safe against removal of list entry.
@@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list,
544 * @pos: the type * to use as a loop cursor. 544 * @pos: the type * to use as a loop cursor.
545 * @n: another type * to use as temporary storage 545 * @n: another type * to use as temporary storage
546 * @head: the head for your list. 546 * @head: the head for your list.
547 * @member: the name of the list_struct within the struct. 547 * @member: the name of the list_head within the struct.
548 * 548 *
549 * Iterate over list of given type from current point, safe against 549 * Iterate over list of given type from current point, safe against
550 * removal of list entry. 550 * removal of list entry.
@@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list,
559 * @pos: the type * to use as a loop cursor. 559 * @pos: the type * to use as a loop cursor.
560 * @n: another type * to use as temporary storage 560 * @n: another type * to use as temporary storage
561 * @head: the head for your list. 561 * @head: the head for your list.
562 * @member: the name of the list_struct within the struct. 562 * @member: the name of the list_head within the struct.
563 * 563 *
564 * Iterate backwards over list of given type, safe against removal 564 * Iterate backwards over list of given type, safe against removal
565 * of list entry. 565 * of list entry.
@@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list,
574 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop 574 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
575 * @pos: the loop cursor used in the list_for_each_entry_safe loop 575 * @pos: the loop cursor used in the list_for_each_entry_safe loop
576 * @n: temporary storage used in list_for_each_entry_safe 576 * @n: temporary storage used in list_for_each_entry_safe
577 * @member: the name of the list_struct within the struct. 577 * @member: the name of the list_head within the struct.
578 * 578 *
579 * list_safe_reset_next is not safe to use in general if the list may be 579 * list_safe_reset_next is not safe to use in general if the list may be
580 * modified concurrently (eg. the lock is dropped in the loop body). An 580 * modified concurrently (eg. the lock is dropped in the loop body). An
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 257d3779f2ab..0ca8109934e4 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -17,12 +17,8 @@
17 * Enable lockd debugging. 17 * Enable lockd debugging.
18 * Requires RPC_DEBUG. 18 * Requires RPC_DEBUG.
19 */ 19 */
20#ifdef RPC_DEBUG
21# define LOCKD_DEBUG 1
22#endif
23
24#undef ifdebug 20#undef ifdebug
25#if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) 21#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
26# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) 22# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag))
27#else 23#else
28# define ifdebug(flag) if (0) 24# define ifdebug(flag) if (0)
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
new file mode 100644
index 000000000000..1726ccbd8009
--- /dev/null
+++ b/include/linux/mailbox_client.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (C) 2013-2014 Linaro Ltd.
3 * Author: Jassi Brar <jassisinghbrar@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __MAILBOX_CLIENT_H
11#define __MAILBOX_CLIENT_H
12
13#include <linux/of.h>
14#include <linux/device.h>
15
16struct mbox_chan;
17
18/**
19 * struct mbox_client - User of a mailbox
20 * @dev: The client device
21 * @tx_block: If the mbox_send_message should block until data is
22 * transmitted.
23 * @tx_tout: Max block period in ms before TX is assumed failure
24 * @knows_txdone: If the client could run the TX state machine. Usually
25 * if the client receives some ACK packet for transmission.
26 * Unused if the controller already has TX_Done/RTR IRQ.
27 * @rx_callback: Atomic callback to provide client the data received
28 * @tx_prepare: Atomic callback to ask client to prepare the payload
29 * before initiating the transmission if required.
30 * @tx_done: Atomic callback to tell client of data transmission
31 */
32struct mbox_client {
33 struct device *dev;
34 bool tx_block;
35 unsigned long tx_tout;
36 bool knows_txdone;
37
38 void (*rx_callback)(struct mbox_client *cl, void *mssg);
39 void (*tx_prepare)(struct mbox_client *cl, void *mssg);
40 void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
41};
42
43struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
44int mbox_send_message(struct mbox_chan *chan, void *mssg);
45void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
46bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
47void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
48
49#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
new file mode 100644
index 000000000000..d4cf96f07cfc
--- /dev/null
+++ b/include/linux/mailbox_controller.h
@@ -0,0 +1,133 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#ifndef __MAILBOX_CONTROLLER_H
8#define __MAILBOX_CONTROLLER_H
9
10#include <linux/of.h>
11#include <linux/types.h>
12#include <linux/timer.h>
13#include <linux/device.h>
14#include <linux/completion.h>
15
16struct mbox_chan;
17
18/**
19 * struct mbox_chan_ops - methods to control mailbox channels
20 * @send_data: The API asks the MBOX controller driver, in atomic
21 * context try to transmit a message on the bus. Returns 0 if
22 * data is accepted for transmission, -EBUSY while rejecting
23 * if the remote hasn't yet read the last data sent. Actual
24 * transmission of data is reported by the controller via
25 * mbox_chan_txdone (if it has some TX ACK irq). It must not
26 * sleep.
27 * @startup: Called when a client requests the chan. The controller
28 * could ask clients for additional parameters of communication
29 * to be provided via client's chan_data. This call may
30 * block. After this call the Controller must forward any
31 * data received on the chan by calling mbox_chan_received_data.
32 * The controller may do stuff that need to sleep.
33 * @shutdown: Called when a client relinquishes control of a chan.
34 * This call may block too. The controller must not forward
35 * any received data anymore.
36 * The controller may do stuff that need to sleep.
37 * @last_tx_done: If the controller sets 'txdone_poll', the API calls
38 * this to poll status of last TX. The controller must
39 * give priority to IRQ method over polling and never
40 * set both txdone_poll and txdone_irq. Only in polling
41 * mode 'send_data' is expected to return -EBUSY.
42 * The controller may do stuff that need to sleep/block.
43 * Used only if txdone_poll:=true && txdone_irq:=false
44 * @peek_data: Atomic check for any received data. Return true if controller
45 * has some data to push to the client. False otherwise.
46 */
47struct mbox_chan_ops {
48 int (*send_data)(struct mbox_chan *chan, void *data);
49 int (*startup)(struct mbox_chan *chan);
50 void (*shutdown)(struct mbox_chan *chan);
51 bool (*last_tx_done)(struct mbox_chan *chan);
52 bool (*peek_data)(struct mbox_chan *chan);
53};
54
55/**
56 * struct mbox_controller - Controller of a class of communication channels
57 * @dev: Device backing this controller
58 * @ops: Operators that work on each communication chan
59 * @chans: Array of channels
60 * @num_chans: Number of channels in the 'chans' array.
61 * @txdone_irq: Indicates if the controller can report to API when
62 * the last transmitted data was read by the remote.
63 * Eg, if it has some TX ACK irq.
64 * @txdone_poll: If the controller can read but not report the TX
65 * done. Ex, some register shows the TX status but
66 * no interrupt rises. Ignored if 'txdone_irq' is set.
67 * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
68 * last TX's status after these many millisecs
69 * @of_xlate: Controller driver specific mapping of channel via DT
70 * @poll: API private. Used to poll for TXDONE on all channels.
71 * @node: API private. To hook into list of controllers.
72 */
73struct mbox_controller {
74 struct device *dev;
75 struct mbox_chan_ops *ops;
76 struct mbox_chan *chans;
77 int num_chans;
78 bool txdone_irq;
79 bool txdone_poll;
80 unsigned txpoll_period;
81 struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
82 const struct of_phandle_args *sp);
83 /* Internal to API */
84 struct timer_list poll;
85 struct list_head node;
86};
87
88/*
89 * The length of circular buffer for queuing messages from a client.
90 * 'msg_count' tracks the number of buffered messages while 'msg_free'
91 * is the index where the next message would be buffered.
92 * We shouldn't need it too big because every transfer is interrupt
93 * triggered and if we have lots of data to transfer, the interrupt
94 * latencies are going to be the bottleneck, not the buffer length.
95 * Besides, mbox_send_message could be called from atomic context and
96 * the client could also queue another message from the notifier 'tx_done'
97 * of the last transfer done.
98 * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
99 * print, it needs to be taken from config option or somesuch.
100 */
101#define MBOX_TX_QUEUE_LEN 20
102
103/**
104 * struct mbox_chan - s/w representation of a communication chan
105 * @mbox: Pointer to the parent/provider of this channel
106 * @txdone_method: Way to detect TXDone chosen by the API
107 * @cl: Pointer to the current owner of this channel
108 * @tx_complete: Transmission completion
109 * @active_req: Currently active request hook
110 * @msg_count: No. of mssg currently queued
111 * @msg_free: Index of next available mssg slot
112 * @msg_data: Hook for data packet
113 * @lock: Serialise access to the channel
114 * @con_priv: Hook for controller driver to attach private data
115 */
116struct mbox_chan {
117 struct mbox_controller *mbox;
118 unsigned txdone_method;
119 struct mbox_client *cl;
120 struct completion tx_complete;
121 void *active_req;
122 unsigned msg_count, msg_free;
123 void *msg_data[MBOX_TX_QUEUE_LEN];
124 spinlock_t lock; /* Serialise access to the channel */
125 void *con_priv;
126};
127
128int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */
129void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
130void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
131void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
132
133#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 8e9a029e093d..e6982ac3200d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
16#define MARVELL_PHY_ID_88E1318S 0x01410e90 16#define MARVELL_PHY_ID_88E1318S 0x01410e90
17#define MARVELL_PHY_ID_88E1116R 0x01410e40 17#define MARVELL_PHY_ID_88E1116R 0x01410e40
18#define MARVELL_PHY_ID_88E1510 0x01410dd0 18#define MARVELL_PHY_ID_88E1510 0x01410dd0
19#define MARVELL_PHY_ID_88E3016 0x01410e60
19 20
20/* struct phy_device dev_flags definitions */ 21/* struct phy_device dev_flags definitions */
21#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 22#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 550c88fb0267..611b69fa8594 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -61,6 +61,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
61} 61}
62#endif 62#endif
63 63
64int mvebu_mbus_save_cpu_target(u32 *store_addr);
64void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); 65void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
65void mvebu_mbus_get_pcie_io_aperture(struct resource *res); 66void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
66int mvebu_mbus_add_window_remap_by_id(unsigned int target, 67int mvebu_mbus_add_window_remap_by_id(unsigned int target,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 19df5d857411..7c95af8d552c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -25,7 +25,6 @@
25#include <linux/jump_label.h> 25#include <linux/jump_label.h>
26 26
27struct mem_cgroup; 27struct mem_cgroup;
28struct page_cgroup;
29struct page; 28struct page;
30struct mm_struct; 29struct mm_struct;
31struct kmem_cache; 30struct kmem_cache;
@@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 67struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 68struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
70 69
71bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 70bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
72 struct mem_cgroup *memcg); 71 struct mem_cgroup *root);
73bool task_in_mem_cgroup(struct task_struct *task, 72bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
74 const struct mem_cgroup *memcg);
75 73
76extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 74extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
77extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 75extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
@@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
79extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 77extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
80extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 78extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
81 79
82static inline 80static inline bool mm_match_cgroup(struct mm_struct *mm,
83bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) 81 struct mem_cgroup *memcg)
84{ 82{
85 struct mem_cgroup *task_memcg; 83 struct mem_cgroup *task_memcg;
86 bool match; 84 bool match = false;
87 85
88 rcu_read_lock(); 86 rcu_read_lock();
89 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 87 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
90 match = __mem_cgroup_same_or_subtree(memcg, task_memcg); 88 if (task_memcg)
89 match = mem_cgroup_is_descendant(task_memcg, memcg);
91 rcu_read_unlock(); 90 rcu_read_unlock();
92 return match; 91 return match;
93} 92}
@@ -139,48 +138,23 @@ static inline bool mem_cgroup_disabled(void)
139 return false; 138 return false;
140} 139}
141 140
142void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 141struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
143 unsigned long *flags); 142 unsigned long *flags);
144 143void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
145extern atomic_t memcg_moving; 144 unsigned long *flags);
146 145void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
147static inline void mem_cgroup_begin_update_page_stat(struct page *page, 146 enum mem_cgroup_stat_index idx, int val);
148 bool *locked, unsigned long *flags)
149{
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
154 if (atomic_read(&memcg_moving))
155 __mem_cgroup_begin_update_page_stat(page, locked, flags);
156}
157 147
158void __mem_cgroup_end_update_page_stat(struct page *page, 148static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
159 unsigned long *flags);
160static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 bool *locked, unsigned long *flags)
162{
163 if (mem_cgroup_disabled())
164 return;
165 if (*locked)
166 __mem_cgroup_end_update_page_stat(page, flags);
167 rcu_read_unlock();
168}
169
170void mem_cgroup_update_page_stat(struct page *page,
171 enum mem_cgroup_stat_index idx,
172 int val);
173
174static inline void mem_cgroup_inc_page_stat(struct page *page,
175 enum mem_cgroup_stat_index idx) 149 enum mem_cgroup_stat_index idx)
176{ 150{
177 mem_cgroup_update_page_stat(page, idx, 1); 151 mem_cgroup_update_page_stat(memcg, idx, 1);
178} 152}
179 153
180static inline void mem_cgroup_dec_page_stat(struct page *page, 154static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
181 enum mem_cgroup_stat_index idx) 155 enum mem_cgroup_stat_index idx)
182{ 156{
183 mem_cgroup_update_page_stat(page, idx, -1); 157 mem_cgroup_update_page_stat(memcg, idx, -1);
184} 158}
185 159
186unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 160unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -199,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
199void mem_cgroup_split_huge_fixup(struct page *head); 173void mem_cgroup_split_huge_fixup(struct page *head);
200#endif 174#endif
201 175
202#ifdef CONFIG_DEBUG_VM
203bool mem_cgroup_bad_page_check(struct page *page);
204void mem_cgroup_print_bad_page(struct page *page);
205#endif
206#else /* CONFIG_MEMCG */ 176#else /* CONFIG_MEMCG */
207struct mem_cgroup; 177struct mem_cgroup;
208 178
@@ -315,12 +285,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315{ 285{
316} 286}
317 287
318static inline void mem_cgroup_begin_update_page_stat(struct page *page, 288static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
319 bool *locked, unsigned long *flags) 289 bool *locked, unsigned long *flags)
320{ 290{
291 return NULL;
321} 292}
322 293
323static inline void mem_cgroup_end_update_page_stat(struct page *page, 294static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
324 bool *locked, unsigned long *flags) 295 bool *locked, unsigned long *flags)
325{ 296{
326} 297}
@@ -343,12 +314,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
343 return false; 314 return false;
344} 315}
345 316
346static inline void mem_cgroup_inc_page_stat(struct page *page, 317static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
347 enum mem_cgroup_stat_index idx) 318 enum mem_cgroup_stat_index idx)
348{ 319{
349} 320}
350 321
351static inline void mem_cgroup_dec_page_stat(struct page *page, 322static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
352 enum mem_cgroup_stat_index idx) 323 enum mem_cgroup_stat_index idx)
353{ 324{
354} 325}
@@ -371,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
371} 342}
372#endif /* CONFIG_MEMCG */ 343#endif /* CONFIG_MEMCG */
373 344
374#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
375static inline bool
376mem_cgroup_bad_page_check(struct page *page)
377{
378 return false;
379}
380
381static inline void
382mem_cgroup_print_bad_page(struct page *page)
383{
384}
385#endif
386
387enum { 345enum {
388 UNDER_LIMIT, 346 UNDER_LIMIT,
389 SOFT_LIMIT, 347 SOFT_LIMIT,
@@ -442,8 +400,8 @@ int memcg_cache_id(struct mem_cgroup *memcg);
442 400
443void memcg_update_array_size(int num_groups); 401void memcg_update_array_size(int num_groups);
444 402
445struct kmem_cache * 403struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
446__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); 404void __memcg_kmem_put_cache(struct kmem_cache *cachep);
447 405
448int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); 406int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
449void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); 407void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
@@ -471,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
471 /* 429 /*
472 * __GFP_NOFAIL allocations will move on even if charging is not 430 * __GFP_NOFAIL allocations will move on even if charging is not
473 * possible. Therefore we don't even try, and have this allocation 431 * possible. Therefore we don't even try, and have this allocation
474 * unaccounted. We could in theory charge it with 432 * unaccounted. We could in theory charge it forcibly, but we hope
475 * res_counter_charge_nofail, but we hope those allocations are rare, 433 * those allocations are rare, and won't be worth the trouble.
476 * and won't be worth the trouble.
477 */ 434 */
478 if (gfp & __GFP_NOFAIL) 435 if (gfp & __GFP_NOFAIL)
479 return true; 436 return true;
@@ -491,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
491 * memcg_kmem_uncharge_pages: uncharge pages from memcg 448 * memcg_kmem_uncharge_pages: uncharge pages from memcg
492 * @page: pointer to struct page being freed 449 * @page: pointer to struct page being freed
493 * @order: allocation order. 450 * @order: allocation order.
494 *
495 * there is no need to specify memcg here, since it is embedded in page_cgroup
496 */ 451 */
497static inline void 452static inline void
498memcg_kmem_uncharge_pages(struct page *page, int order) 453memcg_kmem_uncharge_pages(struct page *page, int order)
@@ -509,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order)
509 * 464 *
510 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or 465 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
511 * failure of the allocation. if @page is NULL, this function will revert the 466 * failure of the allocation. if @page is NULL, this function will revert the
512 * charges. Otherwise, it will commit the memcg given by @memcg to the 467 * charges. Otherwise, it will commit @page to @memcg.
513 * corresponding page_cgroup.
514 */ 468 */
515static inline void 469static inline void
516memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 470memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
@@ -538,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
538 if (unlikely(fatal_signal_pending(current))) 492 if (unlikely(fatal_signal_pending(current)))
539 return cachep; 493 return cachep;
540 494
541 return __memcg_kmem_get_cache(cachep, gfp); 495 return __memcg_kmem_get_cache(cachep);
496}
497
498static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
499{
500 if (memcg_kmem_enabled())
501 __memcg_kmem_put_cache(cachep);
542} 502}
543#else 503#else
544#define for_each_memcg_cache_index(_idx) \ 504#define for_each_memcg_cache_index(_idx) \
@@ -574,6 +534,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
574{ 534{
575 return cachep; 535 return cachep;
576} 536}
537
538static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
539{
540}
577#endif /* CONFIG_MEMCG_KMEM */ 541#endif /* CONFIG_MEMCG_KMEM */
578#endif /* _LINUX_MEMCONTROL_H */ 542#endif /* _LINUX_MEMCONTROL_H */
579 543
diff --git a/include/linux/memory.h b/include/linux/memory.h
index bb7384e3c3d8..8b8d8d12348e 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -35,7 +35,7 @@ struct memory_block {
35}; 35};
36 36
37int arch_get_memory_phys_device(unsigned long start_pfn); 37int arch_get_memory_phys_device(unsigned long start_pfn);
38unsigned long __weak memory_block_size_bytes(void); 38unsigned long memory_block_size_bytes(void);
39 39
40/* These states are exposed to userspace as text strings in sysfs */ 40/* These states are exposed to userspace as text strings in sysfs */
41#define MEM_ONLINE (1<<0) /* exposed to userspace */ 41#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index adba89d9c660..689312745b2f 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -12,7 +12,6 @@
12 12
13int ab8500_sysctrl_read(u16 reg, u8 *value); 13int ab8500_sysctrl_read(u16 reg, u8 *value);
14int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); 14int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value);
15void ab8500_restart(char mode, const char *cmd);
16 15
17#else 16#else
18 17
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index f34723f7663c..910e3aa1e965 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -141,6 +141,7 @@ struct arizona {
141 141
142 uint16_t dac_comp_coeff; 142 uint16_t dac_comp_coeff;
143 uint8_t dac_comp_enabled; 143 uint8_t dac_comp_enabled;
144 struct mutex dac_comp_lock;
144}; 145};
145 146
146int arizona_clk32k_enable(struct arizona *arizona); 147int arizona_clk32k_enable(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index c0b075f6bc35..aacc10d7789c 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -125,6 +125,8 @@
125#define ARIZONA_MIC_BIAS_CTRL_1 0x218 125#define ARIZONA_MIC_BIAS_CTRL_1 0x218
126#define ARIZONA_MIC_BIAS_CTRL_2 0x219 126#define ARIZONA_MIC_BIAS_CTRL_2 0x219
127#define ARIZONA_MIC_BIAS_CTRL_3 0x21A 127#define ARIZONA_MIC_BIAS_CTRL_3 0x21A
128#define ARIZONA_HP_CTRL_1L 0x225
129#define ARIZONA_HP_CTRL_1R 0x226
128#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 130#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293
129#define ARIZONA_HEADPHONE_DETECT_1 0x29B 131#define ARIZONA_HEADPHONE_DETECT_1 0x29B
130#define ARIZONA_HEADPHONE_DETECT_2 0x29C 132#define ARIZONA_HEADPHONE_DETECT_2 0x29C
@@ -279,8 +281,16 @@
279#define ARIZONA_AIF2_FRAME_CTRL_2 0x548 281#define ARIZONA_AIF2_FRAME_CTRL_2 0x548
280#define ARIZONA_AIF2_FRAME_CTRL_3 0x549 282#define ARIZONA_AIF2_FRAME_CTRL_3 0x549
281#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A 283#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A
284#define ARIZONA_AIF2_FRAME_CTRL_5 0x54B
285#define ARIZONA_AIF2_FRAME_CTRL_6 0x54C
286#define ARIZONA_AIF2_FRAME_CTRL_7 0x54D
287#define ARIZONA_AIF2_FRAME_CTRL_8 0x54E
282#define ARIZONA_AIF2_FRAME_CTRL_11 0x551 288#define ARIZONA_AIF2_FRAME_CTRL_11 0x551
283#define ARIZONA_AIF2_FRAME_CTRL_12 0x552 289#define ARIZONA_AIF2_FRAME_CTRL_12 0x552
290#define ARIZONA_AIF2_FRAME_CTRL_13 0x553
291#define ARIZONA_AIF2_FRAME_CTRL_14 0x554
292#define ARIZONA_AIF2_FRAME_CTRL_15 0x555
293#define ARIZONA_AIF2_FRAME_CTRL_16 0x556
284#define ARIZONA_AIF2_TX_ENABLES 0x559 294#define ARIZONA_AIF2_TX_ENABLES 0x559
285#define ARIZONA_AIF2_RX_ENABLES 0x55A 295#define ARIZONA_AIF2_RX_ENABLES 0x55A
286#define ARIZONA_AIF2_FORCE_WRITE 0x55B 296#define ARIZONA_AIF2_FORCE_WRITE 0x55B
@@ -2245,6 +2255,46 @@
2245#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */ 2255#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */
2246 2256
2247/* 2257/*
2258 * R549 (0x225) - HP Ctrl 1L
2259 */
2260#define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */
2261#define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */
2262#define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */
2263#define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */
2264#define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */
2265#define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */
2266#define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */
2267#define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */
2268#define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */
2269#define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */
2270#define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */
2271#define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */
2272#define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */
2273#define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */
2274#define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */
2275#define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */
2276
2277/*
2278 * R550 (0x226) - HP Ctrl 1R
2279 */
2280#define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */
2281#define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */
2282#define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */
2283#define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */
2284#define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */
2285#define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */
2286#define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */
2287#define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */
2288#define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */
2289#define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */
2290#define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */
2291#define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */
2292#define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */
2293#define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */
2294#define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */
2295#define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */
2296
2297/*
2248 * R659 (0x293) - Accessory Detect Mode 1 2298 * R659 (0x293) - Accessory Detect Mode 1
2249 */ 2299 */
2250#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */ 2300#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
new file mode 100644
index 000000000000..1279ab1644b5
--- /dev/null
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright (C) 2014 Free Electrons
3 * Copyright (C) 2014 Atmel
4 *
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef __LINUX_MFD_HLCDC_H
21#define __LINUX_MFD_HLCDC_H
22
23#include <linux/clk.h>
24#include <linux/regmap.h>
25
26#define ATMEL_HLCDC_CFG(i) ((i) * 0x4)
27#define ATMEL_HLCDC_SIG_CFG LCDCFG(5)
28#define ATMEL_HLCDC_HSPOL BIT(0)
29#define ATMEL_HLCDC_VSPOL BIT(1)
30#define ATMEL_HLCDC_VSPDLYS BIT(2)
31#define ATMEL_HLCDC_VSPDLYE BIT(3)
32#define ATMEL_HLCDC_DISPPOL BIT(4)
33#define ATMEL_HLCDC_DITHER BIT(6)
34#define ATMEL_HLCDC_DISPDLY BIT(7)
35#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8)
36#define ATMEL_HLCDC_PP BIT(10)
37#define ATMEL_HLCDC_VSPSU BIT(12)
38#define ATMEL_HLCDC_VSPHO BIT(13)
39#define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16)
40
41#define ATMEL_HLCDC_EN 0x20
42#define ATMEL_HLCDC_DIS 0x24
43#define ATMEL_HLCDC_SR 0x28
44#define ATMEL_HLCDC_IER 0x2c
45#define ATMEL_HLCDC_IDR 0x30
46#define ATMEL_HLCDC_IMR 0x34
47#define ATMEL_HLCDC_ISR 0x38
48
49#define ATMEL_HLCDC_CLKPOL BIT(0)
50#define ATMEL_HLCDC_CLKSEL BIT(2)
51#define ATMEL_HLCDC_CLKPWMSEL BIT(3)
52#define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i))
53#define ATMEL_HLCDC_CLKDIV_SHFT 16
54#define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16)
55#define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT)
56
57#define ATMEL_HLCDC_PIXEL_CLK BIT(0)
58#define ATMEL_HLCDC_SYNC BIT(1)
59#define ATMEL_HLCDC_DISP BIT(2)
60#define ATMEL_HLCDC_PWM BIT(3)
61#define ATMEL_HLCDC_SIP BIT(4)
62
63#define ATMEL_HLCDC_SOF BIT(0)
64#define ATMEL_HLCDC_SYNCDIS BIT(1)
65#define ATMEL_HLCDC_FIFOERR BIT(4)
66#define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8)
67
68/**
69 * Structure shared by the MFD device and its subdevices.
70 *
71 * @regmap: register map used to access HLCDC IP registers
72 * @periph_clk: the hlcdc peripheral clock
73 * @sys_clk: the hlcdc system clock
74 * @slow_clk: the system slow clk
75 * @irq: the hlcdc irq
76 */
77struct atmel_hlcdc {
78 struct regmap *regmap;
79 struct clk *periph_clk;
80 struct clk *sys_clk;
81 struct clk *slow_clk;
82 int irq;
83};
84
85#endif /* __LINUX_MFD_HLCDC_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index d0e31a2287ac..81589d176ae8 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,8 @@
14enum { 14enum {
15 AXP202_ID = 0, 15 AXP202_ID = 0,
16 AXP209_ID, 16 AXP209_ID,
17 AXP288_ID,
18 NR_AXP20X_VARIANTS,
17}; 19};
18 20
19#define AXP20X_DATACACHE(m) (0x04 + (m)) 21#define AXP20X_DATACACHE(m) (0x04 + (m))
@@ -49,11 +51,13 @@ enum {
49#define AXP20X_IRQ3_EN 0x42 51#define AXP20X_IRQ3_EN 0x42
50#define AXP20X_IRQ4_EN 0x43 52#define AXP20X_IRQ4_EN 0x43
51#define AXP20X_IRQ5_EN 0x44 53#define AXP20X_IRQ5_EN 0x44
54#define AXP20X_IRQ6_EN 0x45
52#define AXP20X_IRQ1_STATE 0x48 55#define AXP20X_IRQ1_STATE 0x48
53#define AXP20X_IRQ2_STATE 0x49 56#define AXP20X_IRQ2_STATE 0x49
54#define AXP20X_IRQ3_STATE 0x4a 57#define AXP20X_IRQ3_STATE 0x4a
55#define AXP20X_IRQ4_STATE 0x4b 58#define AXP20X_IRQ4_STATE 0x4b
56#define AXP20X_IRQ5_STATE 0x4c 59#define AXP20X_IRQ5_STATE 0x4c
60#define AXP20X_IRQ6_STATE 0x4d
57 61
58/* ADC */ 62/* ADC */
59#define AXP20X_ACIN_V_ADC_H 0x56 63#define AXP20X_ACIN_V_ADC_H 0x56
@@ -116,6 +120,15 @@ enum {
116#define AXP20X_CC_CTRL 0xb8 120#define AXP20X_CC_CTRL 0xb8
117#define AXP20X_FG_RES 0xb9 121#define AXP20X_FG_RES 0xb9
118 122
123/* AXP288 specific registers */
124#define AXP288_PMIC_ADC_H 0x56
125#define AXP288_PMIC_ADC_L 0x57
126#define AXP288_ADC_TS_PIN_CTRL 0x84
127
128#define AXP288_PMIC_ADC_EN 0x84
129#define AXP288_FG_TUNE5 0xed
130
131
119/* Regulators IDs */ 132/* Regulators IDs */
120enum { 133enum {
121 AXP20X_LDO1 = 0, 134 AXP20X_LDO1 = 0,
@@ -169,12 +182,58 @@ enum {
169 AXP20X_IRQ_GPIO0_INPUT, 182 AXP20X_IRQ_GPIO0_INPUT,
170}; 183};
171 184
185enum axp288_irqs {
186 AXP288_IRQ_VBUS_FALL = 2,
187 AXP288_IRQ_VBUS_RISE,
188 AXP288_IRQ_OV,
189 AXP288_IRQ_FALLING_ALT,
190 AXP288_IRQ_RISING_ALT,
191 AXP288_IRQ_OV_ALT,
192 AXP288_IRQ_DONE = 10,
193 AXP288_IRQ_CHARGING,
194 AXP288_IRQ_SAFE_QUIT,
195 AXP288_IRQ_SAFE_ENTER,
196 AXP288_IRQ_ABSENT,
197 AXP288_IRQ_APPEND,
198 AXP288_IRQ_QWBTU,
199 AXP288_IRQ_WBTU,
200 AXP288_IRQ_QWBTO,
201 AXP288_IRQ_WBTO,
202 AXP288_IRQ_QCBTU,
203 AXP288_IRQ_CBTU,
204 AXP288_IRQ_QCBTO,
205 AXP288_IRQ_CBTO,
206 AXP288_IRQ_WL2,
207 AXP288_IRQ_WL1,
208 AXP288_IRQ_GPADC,
209 AXP288_IRQ_OT = 31,
210 AXP288_IRQ_GPIO0,
211 AXP288_IRQ_GPIO1,
212 AXP288_IRQ_POKO,
213 AXP288_IRQ_POKL,
214 AXP288_IRQ_POKS,
215 AXP288_IRQ_POKN,
216 AXP288_IRQ_POKP,
217 AXP288_IRQ_TIMER,
218 AXP288_IRQ_MV_CHNG,
219 AXP288_IRQ_BC_USB_CHNG,
220};
221
222#define AXP288_TS_ADC_H 0x58
223#define AXP288_TS_ADC_L 0x59
224#define AXP288_GP_ADC_H 0x5a
225#define AXP288_GP_ADC_L 0x5b
226
172struct axp20x_dev { 227struct axp20x_dev {
173 struct device *dev; 228 struct device *dev;
174 struct i2c_client *i2c_client; 229 struct i2c_client *i2c_client;
175 struct regmap *regmap; 230 struct regmap *regmap;
176 struct regmap_irq_chip_data *regmap_irqc; 231 struct regmap_irq_chip_data *regmap_irqc;
177 long variant; 232 long variant;
233 int nr_cells;
234 struct mfd_cell *cells;
235 const struct regmap_config *regmap_cfg;
236 const struct regmap_irq_chip *regmap_irq_chip;
178}; 237};
179 238
180#endif /* __LINUX_MFD_AXP20X_H */ 239#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 73e1709d4c09..a76bc100bf97 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -111,6 +111,13 @@ extern int mfd_add_devices(struct device *parent, int id,
111 struct resource *mem_base, 111 struct resource *mem_base,
112 int irq_base, struct irq_domain *irq_domain); 112 int irq_base, struct irq_domain *irq_domain);
113 113
114static inline int mfd_add_hotplug_devices(struct device *parent,
115 const struct mfd_cell *cells, int n_devs)
116{
117 return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs,
118 NULL, 0, NULL);
119}
120
114extern void mfd_remove_devices(struct device *parent); 121extern void mfd_remove_devices(struct device *parent);
115 122
116#endif 123#endif
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index cb01496bfa49..8e1cdbef3dad 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -99,12 +99,6 @@ struct davinci_vcif {
99 dma_addr_t dma_rx_addr; 99 dma_addr_t dma_rx_addr;
100}; 100};
101 101
102struct cq93vc {
103 struct platform_device *pdev;
104 struct snd_soc_codec *codec;
105 u32 sysclk;
106};
107
108struct davinci_vc; 102struct davinci_vc;
109 103
110struct davinci_vc { 104struct davinci_vc {
@@ -122,7 +116,6 @@ struct davinci_vc {
122 116
123 /* Client devices */ 117 /* Client devices */
124 struct davinci_vcif davinci_vcif; 118 struct davinci_vcif davinci_vcif;
125 struct cq93vc cq93vc;
126}; 119};
127 120
128#endif 121#endif
diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h
new file mode 100644
index 000000000000..004b24576da8
--- /dev/null
+++ b/include/linux/mfd/dln2.h
@@ -0,0 +1,103 @@
1#ifndef __LINUX_USB_DLN2_H
2#define __LINUX_USB_DLN2_H
3
4#define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8))
5
6struct dln2_platform_data {
7 u16 handle; /* sub-driver handle (internally used only) */
8 u8 port; /* I2C/SPI port */
9};
10
11/**
12 * dln2_event_cb_t - event callback function signature
13 *
14 * @pdev - the sub-device that registered this callback
15 * @echo - the echo header field received in the message
16 * @data - the data payload
17 * @len - the data payload length
18 *
19 * The callback function is called in interrupt context and the data payload is
20 * only valid during the call. If the user needs later access of the data, it
21 * must copy it.
22 */
23
24typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo,
25 const void *data, int len);
26
27/**
28 * dl2n_register_event_cb - register a callback function for an event
29 *
30 * @pdev - the sub-device that registers the callback
31 * @event - the event for which to register a callback
32 * @event_cb - the callback function
33 *
34 * @return 0 in case of success, negative value in case of error
35 */
36int dln2_register_event_cb(struct platform_device *pdev, u16 event,
37 dln2_event_cb_t event_cb);
38
39/**
40 * dln2_unregister_event_cb - unregister the callback function for an event
41 *
42 * @pdev - the sub-device that registered the callback
43 * @event - the event for which to register a callback
44 */
45void dln2_unregister_event_cb(struct platform_device *pdev, u16 event);
46
47/**
48 * dln2_transfer - issue a DLN2 command and wait for a response and the
49 * associated data
50 *
51 * @pdev - the sub-device which is issuing this transfer
52 * @cmd - the command to be sent to the device
53 * @obuf - the buffer to be sent to the device; it can be NULL if the user
54 * doesn't need to transmit data with this command
55 * @obuf_len - the size of the buffer to be sent to the device
56 * @ibuf - any data associated with the response will be copied here; it can be
57 * NULL if the user doesn't need the response data
58 * @ibuf_len - must be initialized to the input buffer size; it will be modified
59 * to indicate the actual data transferred;
60 *
61 * @return 0 for success, negative value for errors
62 */
63int dln2_transfer(struct platform_device *pdev, u16 cmd,
64 const void *obuf, unsigned obuf_len,
65 void *ibuf, unsigned *ibuf_len);
66
67/**
68 * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed
69 *
70 * @pdev - the sub-device which is issuing this transfer
71 * @cmd - the command to be sent to the device
72 * @ibuf - any data associated with the response will be copied here; it can be
73 * NULL if the user doesn't need the response data
74 * @ibuf_len - must be initialized to the input buffer size; it will be modified
75 * to indicate the actual data transferred;
76 *
77 * @return 0 for success, negative value for errors
78 */
79
80static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd,
81 void *ibuf, unsigned *ibuf_len)
82{
83 return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len);
84}
85
86/**
87 * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed
88 *
89 * @pdev - the sub-device which is issuing this transfer
90 * @cmd - the command to be sent to the device
91 * @obuf - the buffer to be sent to the device; it can be NULL if the
92 * user doesn't need to transmit data with this command
93 * @obuf_len - the size of the buffer to be sent to the device
94 *
95 * @return 0 for success, negative value for errors
96 */
97static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd,
98 const void *obuf, unsigned obuf_len)
99{
100 return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL);
101}
102
103#endif
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index 7e6dc4b2b795..553f7d09258a 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -131,13 +131,6 @@ enum max77686_opmode {
131 MAX77686_OPMODE_STANDBY, 131 MAX77686_OPMODE_STANDBY,
132}; 132};
133 133
134enum max77802_opmode {
135 MAX77802_OPMODE_OFF,
136 MAX77802_OPMODE_STANDBY,
137 MAX77802_OPMODE_LP,
138 MAX77802_OPMODE_NORMAL,
139};
140
141struct max77686_opmode_data { 134struct max77686_opmode_data {
142 int id; 135 int id;
143 int mode; 136 int mode;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index fc17d56581b2..08dae01258b9 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -26,7 +26,6 @@
26 26
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28 28
29#define MAX77693_NUM_IRQ_MUIC_REGS 3
30#define MAX77693_REG_INVALID (0xff) 29#define MAX77693_REG_INVALID (0xff)
31 30
32/* Slave addr = 0xCC: PMIC, Charger, Flash LED */ 31/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
@@ -330,6 +329,13 @@ enum max77693_irq_source {
330 MAX77693_IRQ_GROUP_NR, 329 MAX77693_IRQ_GROUP_NR,
331}; 330};
332 331
332#define SRC_IRQ_CHARGER BIT(0)
333#define SRC_IRQ_TOP BIT(1)
334#define SRC_IRQ_FLASH BIT(2)
335#define SRC_IRQ_MUIC BIT(3)
336#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
337 | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
338
333#define LED_IRQ_FLED2_OPEN BIT(0) 339#define LED_IRQ_FLED2_OPEN BIT(0)
334#define LED_IRQ_FLED2_SHORT BIT(1) 340#define LED_IRQ_FLED2_SHORT BIT(1)
335#define LED_IRQ_FLED1_OPEN BIT(2) 341#define LED_IRQ_FLED1_OPEN BIT(2)
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 74346d5e7899..0c12628e91c6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -558,6 +558,7 @@
558#define SD_SAMPLE_POINT_CTL 0xFDA7 558#define SD_SAMPLE_POINT_CTL 0xFDA7
559#define SD_PUSH_POINT_CTL 0xFDA8 559#define SD_PUSH_POINT_CTL 0xFDA8
560#define SD_CMD0 0xFDA9 560#define SD_CMD0 0xFDA9
561#define SD_CMD_START 0x40
561#define SD_CMD1 0xFDAA 562#define SD_CMD1 0xFDAA
562#define SD_CMD2 0xFDAB 563#define SD_CMD2 0xFDAB
563#define SD_CMD3 0xFDAC 564#define SD_CMD3 0xFDAC
@@ -707,6 +708,14 @@
707#define PM_CTRL1 0xFF44 708#define PM_CTRL1 0xFF44
708#define PM_CTRL2 0xFF45 709#define PM_CTRL2 0xFF45
709#define PM_CTRL3 0xFF46 710#define PM_CTRL3 0xFF46
711#define SDIO_SEND_PME_EN 0x80
712#define FORCE_RC_MODE_ON 0x40
713#define FORCE_RX50_LINK_ON 0x20
714#define D3_DELINK_MODE_EN 0x10
715#define USE_PESRTB_CTL_DELINK 0x08
716#define DELAY_PIN_WAKE 0x04
717#define RESET_PIN_WAKE 0x02
718#define PM_WAKE_EN 0x01
710#define PM_CTRL4 0xFF47 719#define PM_CTRL4 0xFF47
711 720
712/* Memory mapping */ 721/* Memory mapping */
@@ -752,6 +761,14 @@
752#define PHY_DUM_REG 0x1F 761#define PHY_DUM_REG 0x1F
753 762
754#define LCTLR 0x80 763#define LCTLR 0x80
764#define LCTLR_EXT_SYNC 0x80
765#define LCTLR_COMMON_CLOCK_CFG 0x40
766#define LCTLR_RETRAIN_LINK 0x20
767#define LCTLR_LINK_DISABLE 0x10
768#define LCTLR_RCB 0x08
769#define LCTLR_RESERVED 0x04
770#define LCTLR_ASPM_CTL_MASK 0x03
771
755#define PCR_SETTING_REG1 0x724 772#define PCR_SETTING_REG1 0x724
756#define PCR_SETTING_REG2 0x814 773#define PCR_SETTING_REG2 0x814
757#define PCR_SETTING_REG3 0x747 774#define PCR_SETTING_REG3 0x747
@@ -967,4 +984,24 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
967 return (u8 *)(pcr->host_cmds_ptr); 984 return (u8 *)(pcr->host_cmds_ptr);
968} 985}
969 986
987static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
988 u8 mask, u8 append)
989{
990 int err;
991 u8 val;
992
993 err = pci_read_config_byte(pcr->pci, addr, &val);
994 if (err < 0)
995 return err;
996 return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
997}
998
999static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
1000{
1001 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
1002 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16);
1003 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8);
1004 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
1005}
1006
970#endif 1007#endif
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 1825edacbda7..3fdb7cfbffb3 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -28,6 +28,7 @@
28#define MIN_800_MV 800000 28#define MIN_800_MV 800000
29#define MIN_750_MV 750000 29#define MIN_750_MV 750000
30#define MIN_600_MV 600000 30#define MIN_600_MV 600000
31#define MIN_500_MV 500000
31 32
32/* Macros to represent steps for LDO/BUCK */ 33/* Macros to represent steps for LDO/BUCK */
33#define STEP_50_MV 50000 34#define STEP_50_MV 50000
@@ -41,6 +42,7 @@ enum sec_device_type {
41 S5M8767X, 42 S5M8767X,
42 S2MPA01, 43 S2MPA01,
43 S2MPS11X, 44 S2MPS11X,
45 S2MPS13X,
44 S2MPS14X, 46 S2MPS14X,
45 S2MPU02, 47 S2MPU02,
46}; 48};
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
new file mode 100644
index 000000000000..ce5dda8958fe
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -0,0 +1,186 @@
1/*
2 * s2mps13.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef __LINUX_MFD_S2MPS13_H
20#define __LINUX_MFD_S2MPS13_H
21
22/* S2MPS13 registers */
23enum s2mps13_reg {
24 S2MPS13_REG_ID,
25 S2MPS13_REG_INT1,
26 S2MPS13_REG_INT2,
27 S2MPS13_REG_INT3,
28 S2MPS13_REG_INT1M,
29 S2MPS13_REG_INT2M,
30 S2MPS13_REG_INT3M,
31 S2MPS13_REG_ST1,
32 S2MPS13_REG_ST2,
33 S2MPS13_REG_PWRONSRC,
34 S2MPS13_REG_OFFSRC,
35 S2MPS13_REG_BU_CHG,
36 S2MPS13_REG_RTCCTRL,
37 S2MPS13_REG_CTRL1,
38 S2MPS13_REG_CTRL2,
39 S2MPS13_REG_RSVD1,
40 S2MPS13_REG_RSVD2,
41 S2MPS13_REG_RSVD3,
42 S2MPS13_REG_RSVD4,
43 S2MPS13_REG_RSVD5,
44 S2MPS13_REG_RSVD6,
45 S2MPS13_REG_CTRL3,
46 S2MPS13_REG_RSVD7,
47 S2MPS13_REG_RSVD8,
48 S2MPS13_REG_WRSTBI,
49 S2MPS13_REG_B1CTRL,
50 S2MPS13_REG_B1OUT,
51 S2MPS13_REG_B2CTRL,
52 S2MPS13_REG_B2OUT,
53 S2MPS13_REG_B3CTRL,
54 S2MPS13_REG_B3OUT,
55 S2MPS13_REG_B4CTRL,
56 S2MPS13_REG_B4OUT,
57 S2MPS13_REG_B5CTRL,
58 S2MPS13_REG_B5OUT,
59 S2MPS13_REG_B6CTRL,
60 S2MPS13_REG_B6OUT,
61 S2MPS13_REG_B7CTRL,
62 S2MPS13_REG_B7OUT,
63 S2MPS13_REG_B8CTRL,
64 S2MPS13_REG_B8OUT,
65 S2MPS13_REG_B9CTRL,
66 S2MPS13_REG_B9OUT,
67 S2MPS13_REG_B10CTRL,
68 S2MPS13_REG_B10OUT,
69 S2MPS13_REG_BB1CTRL,
70 S2MPS13_REG_BB1OUT,
71 S2MPS13_REG_BUCK_RAMP1,
72 S2MPS13_REG_BUCK_RAMP2,
73 S2MPS13_REG_LDO_DVS1,
74 S2MPS13_REG_LDO_DVS2,
75 S2MPS13_REG_LDO_DVS3,
76 S2MPS13_REG_B6OUT2,
77 S2MPS13_REG_L1CTRL,
78 S2MPS13_REG_L2CTRL,
79 S2MPS13_REG_L3CTRL,
80 S2MPS13_REG_L4CTRL,
81 S2MPS13_REG_L5CTRL,
82 S2MPS13_REG_L6CTRL,
83 S2MPS13_REG_L7CTRL,
84 S2MPS13_REG_L8CTRL,
85 S2MPS13_REG_L9CTRL,
86 S2MPS13_REG_L10CTRL,
87 S2MPS13_REG_L11CTRL,
88 S2MPS13_REG_L12CTRL,
89 S2MPS13_REG_L13CTRL,
90 S2MPS13_REG_L14CTRL,
91 S2MPS13_REG_L15CTRL,
92 S2MPS13_REG_L16CTRL,
93 S2MPS13_REG_L17CTRL,
94 S2MPS13_REG_L18CTRL,
95 S2MPS13_REG_L19CTRL,
96 S2MPS13_REG_L20CTRL,
97 S2MPS13_REG_L21CTRL,
98 S2MPS13_REG_L22CTRL,
99 S2MPS13_REG_L23CTRL,
100 S2MPS13_REG_L24CTRL,
101 S2MPS13_REG_L25CTRL,
102 S2MPS13_REG_L26CTRL,
103 S2MPS13_REG_L27CTRL,
104 S2MPS13_REG_L28CTRL,
105 S2MPS13_REG_L30CTRL,
106 S2MPS13_REG_L31CTRL,
107 S2MPS13_REG_L32CTRL,
108 S2MPS13_REG_L33CTRL,
109 S2MPS13_REG_L34CTRL,
110 S2MPS13_REG_L35CTRL,
111 S2MPS13_REG_L36CTRL,
112 S2MPS13_REG_L37CTRL,
113 S2MPS13_REG_L38CTRL,
114 S2MPS13_REG_L39CTRL,
115 S2MPS13_REG_L40CTRL,
116 S2MPS13_REG_LDODSCH1,
117 S2MPS13_REG_LDODSCH2,
118 S2MPS13_REG_LDODSCH3,
119 S2MPS13_REG_LDODSCH4,
120 S2MPS13_REG_LDODSCH5,
121};
122
123/* regulator ids */
124enum s2mps13_regulators {
125 S2MPS13_LDO1,
126 S2MPS13_LDO2,
127 S2MPS13_LDO3,
128 S2MPS13_LDO4,
129 S2MPS13_LDO5,
130 S2MPS13_LDO6,
131 S2MPS13_LDO7,
132 S2MPS13_LDO8,
133 S2MPS13_LDO9,
134 S2MPS13_LDO10,
135 S2MPS13_LDO11,
136 S2MPS13_LDO12,
137 S2MPS13_LDO13,
138 S2MPS13_LDO14,
139 S2MPS13_LDO15,
140 S2MPS13_LDO16,
141 S2MPS13_LDO17,
142 S2MPS13_LDO18,
143 S2MPS13_LDO19,
144 S2MPS13_LDO20,
145 S2MPS13_LDO21,
146 S2MPS13_LDO22,
147 S2MPS13_LDO23,
148 S2MPS13_LDO24,
149 S2MPS13_LDO25,
150 S2MPS13_LDO26,
151 S2MPS13_LDO27,
152 S2MPS13_LDO28,
153 S2MPS13_LDO29,
154 S2MPS13_LDO30,
155 S2MPS13_LDO31,
156 S2MPS13_LDO32,
157 S2MPS13_LDO33,
158 S2MPS13_LDO34,
159 S2MPS13_LDO35,
160 S2MPS13_LDO36,
161 S2MPS13_LDO37,
162 S2MPS13_LDO38,
163 S2MPS13_LDO39,
164 S2MPS13_LDO40,
165 S2MPS13_BUCK1,
166 S2MPS13_BUCK2,
167 S2MPS13_BUCK3,
168 S2MPS13_BUCK4,
169 S2MPS13_BUCK5,
170 S2MPS13_BUCK6,
171 S2MPS13_BUCK7,
172 S2MPS13_BUCK8,
173 S2MPS13_BUCK9,
174 S2MPS13_BUCK10,
175
176 S2MPS13_REGULATOR_MAX,
177};
178
179/*
180 * Default ramp delay in uv/us. Datasheet says that ramp delay can be
181 * controlled however it does not specify which register is used for that.
182 * Let's assume that default value will be set.
183 */
184#define S2MPS13_BUCK_RAMP_DELAY 12500
185
186#endif /* __LINUX_MFD_S2MPS13_H */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index ff44374a1a4e..c877cad61a13 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -395,4 +395,43 @@
395#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17) 395#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
396#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14) 396#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
397 397
398/* For imx6sx iomux gpr register field define */
399#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20)
400#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20)
401#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20)
402#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19)
403#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19)
404#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19)
405#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13)
406#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
407#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
408
409#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
410#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
411
412#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3)
413#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3)
414#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3)
415
416#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27)
417#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27)
418#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27)
419#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27)
420#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27)
421#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26)
422#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26)
423#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26)
424#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4)
425#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4)
426#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4)
427#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4)
428#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4)
429
430#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2)
431#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2)
432#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2)
433#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1)
434#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
435#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
436
398#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ 437#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index e6088c2e2092..e1c12d84c26a 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -164,13 +164,10 @@ struct tc3589x_keypad_platform_data {
164 164
165/** 165/**
166 * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data 166 * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data
167 * @gpio_base: first gpio number assigned to TC3589x. A maximum of
168 * %TC3589x_NR_GPIOS GPIOs will be allocated.
169 * @setup: callback for board-specific initialization 167 * @setup: callback for board-specific initialization
170 * @remove: callback for board-specific teardown 168 * @remove: callback for board-specific teardown
171 */ 169 */
172struct tc3589x_gpio_platform_data { 170struct tc3589x_gpio_platform_data {
173 int gpio_base;
174 void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base); 171 void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base);
175 void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base); 172 void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base);
176}; 173};
@@ -178,18 +175,13 @@ struct tc3589x_gpio_platform_data {
178/** 175/**
179 * struct tc3589x_platform_data - TC3589x platform data 176 * struct tc3589x_platform_data - TC3589x platform data
180 * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) 177 * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
181 * @irq_base: base IRQ number. %TC3589x_NR_IRQS irqs will be used.
182 * @gpio: GPIO-specific platform data 178 * @gpio: GPIO-specific platform data
183 * @keypad: keypad-specific platform data 179 * @keypad: keypad-specific platform data
184 */ 180 */
185struct tc3589x_platform_data { 181struct tc3589x_platform_data {
186 unsigned int block; 182 unsigned int block;
187 int irq_base;
188 struct tc3589x_gpio_platform_data *gpio; 183 struct tc3589x_gpio_platform_data *gpio;
189 const struct tc3589x_keypad_platform_data *keypad; 184 const struct tc3589x_keypad_platform_data *keypad;
190}; 185};
191 186
192#define TC3589x_NR_GPIOS 24
193#define TC3589x_NR_IRQS TC3589x_INT_GPIO(TC3589x_NR_GPIOS)
194
195#endif 187#endif
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 53d33dee70e1..2e5b194b9b19 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -37,7 +37,6 @@
37 37
38/* struct phy_device dev_flags definitions */ 38/* struct phy_device dev_flags definitions */
39#define MICREL_PHY_50MHZ_CLK 0x00000001 39#define MICREL_PHY_50MHZ_CLK 0x00000001
40#define MICREL_PHY_25MHZ_CLK 0x00000002
41 40
42#define MICREL_KSZ9021_EXTREG_CTRL 0xB 41#define MICREL_KSZ9021_EXTREG_CTRL 0xB
43#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC 42#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 379c02648ab3..64d25941b329 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -67,6 +67,8 @@ enum {
67 MLX4_CMD_MAP_ICM_AUX = 0xffc, 67 MLX4_CMD_MAP_ICM_AUX = 0xffc,
68 MLX4_CMD_UNMAP_ICM_AUX = 0xffb, 68 MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
69 MLX4_CMD_SET_ICM_SIZE = 0xffd, 69 MLX4_CMD_SET_ICM_SIZE = 0xffd,
70 MLX4_CMD_ACCESS_REG = 0x3b,
71
70 /*master notify fw on finish for slave's flr*/ 72 /*master notify fw on finish for slave's flr*/
71 MLX4_CMD_INFORM_FLR_DONE = 0x5b, 73 MLX4_CMD_INFORM_FLR_DONE = 0x5b,
72 MLX4_CMD_GET_OP_REQ = 0x59, 74 MLX4_CMD_GET_OP_REQ = 0x59,
@@ -197,6 +199,33 @@ enum {
197 MLX4_CMD_NATIVE 199 MLX4_CMD_NATIVE
198}; 200};
199 201
202/*
203 * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
204 * Receive checksum value is reported in CQE also for non TCP/UDP packets.
205 *
206 * MLX4_RX_CSUM_MODE_L4 -
207 * L4_CSUM bit in CQE, which indicates whether or not L4 checksum
208 * was validated correctly, is supported.
209 *
210 * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
211 * IP_OK CQE's field is supported also for non TCP/UDP IP packets.
212 *
213 * MLX4_RX_CSUM_MODE_MULTI_VLAN -
214 * Receive Checksum offload is supported for packets with more than 2 vlan headers.
215 */
216enum mlx4_rx_csum_mode {
217 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0,
218 MLX4_RX_CSUM_MODE_L4 = 1UL << 1,
219 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2,
220 MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3
221};
222
223struct mlx4_config_dev_params {
224 u16 vxlan_udp_dport;
225 u8 rx_csum_flags_port_1;
226 u8 rx_csum_flags_port_2;
227};
228
200struct mlx4_dev; 229struct mlx4_dev;
201 230
202struct mlx4_cmd_mailbox { 231struct mlx4_cmd_mailbox {
@@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
248int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); 277int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
249int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); 278int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
250int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); 279int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
280int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
281 struct mlx4_config_dev_params *params);
251/* 282/*
252 * mlx4_get_slave_default_vlan - 283 * mlx4_get_slave_default_vlan -
253 * return true if VST ( default vlan) 284 * return true if VST ( default vlan)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 37e4404d0227..25c791e295fd 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -95,7 +95,7 @@ enum {
95 95
96enum { 96enum {
97 MLX4_MAX_NUM_PF = 16, 97 MLX4_MAX_NUM_PF = 16,
98 MLX4_MAX_NUM_VF = 64, 98 MLX4_MAX_NUM_VF = 126,
99 MLX4_MAX_NUM_VF_P_PORT = 64, 99 MLX4_MAX_NUM_VF_P_PORT = 64,
100 MLX4_MFUNC_MAX = 80, 100 MLX4_MFUNC_MAX = 80,
101 MLX4_MAX_EQ_NUM = 1024, 101 MLX4_MAX_EQ_NUM = 1024,
@@ -117,6 +117,14 @@ enum {
117 MLX4_STEERING_MODE_DEVICE_MANAGED 117 MLX4_STEERING_MODE_DEVICE_MANAGED
118}; 118};
119 119
120enum {
121 MLX4_STEERING_DMFS_A0_DEFAULT,
122 MLX4_STEERING_DMFS_A0_DYNAMIC,
123 MLX4_STEERING_DMFS_A0_STATIC,
124 MLX4_STEERING_DMFS_A0_DISABLE,
125 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
126};
127
120static inline const char *mlx4_steering_mode_str(int steering_mode) 128static inline const char *mlx4_steering_mode_str(int steering_mode)
121{ 129{
122 switch (steering_mode) { 130 switch (steering_mode) {
@@ -186,7 +194,31 @@ enum {
186 MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, 194 MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
187 MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, 195 MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
188 MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, 196 MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
189 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 197 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
198 MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
199 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
200 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
203 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
204};
205
206enum {
207 MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0,
208 MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
209};
210
211/* bit enums for an 8-bit flags field indicating special use
212 * QPs which require special handling in qp_reserve_range.
213 * Currently, this only includes QPs used by the ETH interface,
214 * where we expect to use blueflame. These QPs must not have
215 * bits 6 and 7 set in their qp number.
216 *
217 * This enum may use only bits 0..7.
218 */
219enum {
220 MLX4_RESERVE_A0_QP = 1 << 6,
221 MLX4_RESERVE_ETH_BF_QP = 1 << 7,
190}; 222};
191 223
192enum { 224enum {
@@ -202,7 +234,8 @@ enum {
202 234
203enum { 235enum {
204 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, 236 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
205 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 237 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
238 MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
206}; 239};
207 240
208 241
@@ -328,6 +361,8 @@ enum {
328 361
329enum mlx4_qp_region { 362enum mlx4_qp_region {
330 MLX4_QP_REGION_FW = 0, 363 MLX4_QP_REGION_FW = 0,
364 MLX4_QP_REGION_RSS_RAW_ETH,
365 MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH,
331 MLX4_QP_REGION_ETH_ADDR, 366 MLX4_QP_REGION_ETH_ADDR,
332 MLX4_QP_REGION_FC_ADDR, 367 MLX4_QP_REGION_FC_ADDR,
333 MLX4_QP_REGION_FC_EXCH, 368 MLX4_QP_REGION_FC_EXCH,
@@ -379,6 +414,13 @@ enum {
379#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 414#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
380 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) 415 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
381 416
417enum mlx4_module_id {
418 MLX4_MODULE_ID_SFP = 0x3,
419 MLX4_MODULE_ID_QSFP = 0xC,
420 MLX4_MODULE_ID_QSFP_PLUS = 0xD,
421 MLX4_MODULE_ID_QSFP28 = 0x11,
422};
423
382static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 424static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
383{ 425{
384 return (major << 32) | (minor << 16) | subminor; 426 return (major << 32) | (minor << 16) | subminor;
@@ -433,6 +475,7 @@ struct mlx4_caps {
433 int num_cqs; 475 int num_cqs;
434 int max_cqes; 476 int max_cqes;
435 int reserved_cqs; 477 int reserved_cqs;
478 int num_sys_eqs;
436 int num_eqs; 479 int num_eqs;
437 int reserved_eqs; 480 int reserved_eqs;
438 int num_comp_vectors; 481 int num_comp_vectors;
@@ -449,6 +492,7 @@ struct mlx4_caps {
449 int reserved_mcgs; 492 int reserved_mcgs;
450 int num_qp_per_mgm; 493 int num_qp_per_mgm;
451 int steering_mode; 494 int steering_mode;
495 int dmfs_high_steer_mode;
452 int fs_log_max_ucast_qp_range_size; 496 int fs_log_max_ucast_qp_range_size;
453 int num_pds; 497 int num_pds;
454 int reserved_pds; 498 int reserved_pds;
@@ -487,6 +531,10 @@ struct mlx4_caps {
487 u16 hca_core_clock; 531 u16 hca_core_clock;
488 u64 phys_port_id[MLX4_MAX_PORTS + 1]; 532 u64 phys_port_id[MLX4_MAX_PORTS + 1];
489 int tunnel_offload_mode; 533 int tunnel_offload_mode;
534 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
535 u8 alloc_res_qp_mask;
536 u32 dmfs_high_rate_qpn_base;
537 u32 dmfs_high_rate_qpn_range;
490}; 538};
491 539
492struct mlx4_buf_list { 540struct mlx4_buf_list {
@@ -607,6 +655,11 @@ struct mlx4_cq {
607 655
608 atomic_t refcount; 656 atomic_t refcount;
609 struct completion free; 657 struct completion free;
658 struct {
659 struct list_head list;
660 void (*comp)(struct mlx4_cq *);
661 void *priv;
662 } tasklet_ctx;
610}; 663};
611 664
612struct mlx4_qp { 665struct mlx4_qp {
@@ -799,6 +852,26 @@ struct mlx4_init_port_param {
799 u64 si_guid; 852 u64 si_guid;
800}; 853};
801 854
855#define MAD_IFC_DATA_SZ 192
856/* MAD IFC Mailbox */
857struct mlx4_mad_ifc {
858 u8 base_version;
859 u8 mgmt_class;
860 u8 class_version;
861 u8 method;
862 __be16 status;
863 __be16 class_specific;
864 __be64 tid;
865 __be16 attr_id;
866 __be16 resv;
867 __be32 attr_mod;
868 __be64 mkey;
869 __be16 dr_slid;
870 __be16 dr_dlid;
871 u8 reserved[28];
872 u8 data[MAD_IFC_DATA_SZ];
873} __packed;
874
802#define mlx4_foreach_port(port, dev, type) \ 875#define mlx4_foreach_port(port, dev, type) \
803 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 876 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
804 if ((type) == (dev)->caps.port_mask[(port)]) 877 if ((type) == (dev)->caps.port_mask[(port)])
@@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
835static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) 908static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
836{ 909{
837 return (qpn < dev->phys_caps.base_sqpn + 8 + 910 return (qpn < dev->phys_caps.base_sqpn + 8 +
838 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); 911 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) &&
912 qpn >= dev->phys_caps.base_sqpn) ||
913 (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]);
839} 914}
840 915
841static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) 916static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
@@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
911 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 986 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
912 unsigned vector, int collapsed, int timestamp_en); 987 unsigned vector, int collapsed, int timestamp_en);
913void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 988void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
914 989int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
915int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); 990 int *base, u8 flags);
916void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); 991void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
917 992
918int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, 993int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
@@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
1283 u64 iova, u64 size, int npages, 1358 u64 iova, u64 size, int npages,
1284 int page_shift, struct mlx4_mpt_entry *mpt_entry); 1359 int page_shift, struct mlx4_mpt_entry *mpt_entry);
1285 1360
1361int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1362 u16 offset, u16 size, u8 *data);
1363
1286/* Returns true if running in low memory profile (kdump kernel) */ 1364/* Returns true if running in low memory profile (kdump kernel) */
1287static inline bool mlx4_low_memory_profile(void) 1365static inline bool mlx4_low_memory_profile(void)
1288{ 1366{
1289 return is_kdump_kernel(); 1367 return is_kdump_kernel();
1290} 1368}
1291 1369
1370/* ACCESS REG commands */
1371enum mlx4_access_reg_method {
1372 MLX4_ACCESS_REG_QUERY = 0x1,
1373 MLX4_ACCESS_REG_WRITE = 0x2,
1374};
1375
1376/* ACCESS PTYS Reg command */
1377enum mlx4_ptys_proto {
1378 MLX4_PTYS_IB = 1<<0,
1379 MLX4_PTYS_EN = 1<<2,
1380};
1381
1382struct mlx4_ptys_reg {
1383 u8 resrvd1;
1384 u8 local_port;
1385 u8 resrvd2;
1386 u8 proto_mask;
1387 __be32 resrvd3[2];
1388 __be32 eth_proto_cap;
1389 __be16 ib_width_cap;
1390 __be16 ib_speed_cap;
1391 __be32 resrvd4;
1392 __be32 eth_proto_admin;
1393 __be16 ib_width_admin;
1394 __be16 ib_speed_admin;
1395 __be32 resrvd5;
1396 __be32 eth_proto_oper;
1397 __be16 ib_width_oper;
1398 __be16 ib_speed_oper;
1399 __be32 resrvd6;
1400 __be32 eth_proto_lp_adv;
1401} __packed;
1402
1403int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1404 enum mlx4_access_reg_method method,
1405 struct mlx4_ptys_reg *ptys_reg);
1406
1292#endif /* MLX4_DEVICE_H */ 1407#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 5f4e36cf0091..467ccdf94c98 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -120,13 +120,15 @@ enum {
120 MLX4_RSS_QPC_FLAG_OFFSET = 13, 120 MLX4_RSS_QPC_FLAG_OFFSET = 13,
121}; 121};
122 122
123#define MLX4_EN_RSS_KEY_SIZE 40
124
123struct mlx4_rss_context { 125struct mlx4_rss_context {
124 __be32 base_qpn; 126 __be32 base_qpn;
125 __be32 default_qpn; 127 __be32 default_qpn;
126 u16 reserved; 128 u16 reserved;
127 u8 hash_fn; 129 u8 hash_fn;
128 u8 flags; 130 u8 flags;
129 __be32 rss_key[10]; 131 __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)];
130 __be32 base_qpn_udp; 132 __be32 base_qpn_udp;
131}; 133};
132 134
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 1d67fd32e71c..ea4f1c46f761 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -219,23 +219,15 @@ enum {
219}; 219};
220 220
221enum { 221enum {
222 MLX5_DEV_CAP_FLAG_RC = 1LL << 0,
223 MLX5_DEV_CAP_FLAG_UC = 1LL << 1,
224 MLX5_DEV_CAP_FLAG_UD = 1LL << 2,
225 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, 222 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
226 MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6,
227 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 223 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
228 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 224 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
229 MLX5_DEV_CAP_FLAG_APM = 1LL << 17, 225 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
230 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, 226 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
231 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, 227 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
232 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
233 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 228 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
234 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 229 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
235 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
236 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, 230 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
237 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
238 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
239 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 231 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
240 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 232 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
241}; 233};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 246310dc8bef..b1bf41556b32 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -633,14 +633,6 @@ static inline void *mlx5_vzalloc(unsigned long size)
633 return rtn; 633 return rtn;
634} 634}
635 635
636static inline void mlx5_vfree(const void *addr)
637{
638 if (addr && is_vmalloc_addr(addr))
639 vfree(addr);
640 else
641 kfree(addr);
642}
643
644static inline u32 mlx5_base_mkey(const u32 key) 636static inline u32 mlx5_base_mkey(const u32 key)
645{ 637{
646 return key & 0xffffff00u; 638 return key & 0xffffff00u;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 02d11ee7f19d..c0a67b894c4c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,6 +19,7 @@
19#include <linux/bit_spinlock.h> 19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h> 20#include <linux/shrinker.h>
21#include <linux/resource.h> 21#include <linux/resource.h>
22#include <linux/page_ext.h>
22 23
23struct mempolicy; 24struct mempolicy;
24struct anon_vma; 25struct anon_vma;
@@ -56,6 +57,17 @@ extern int sysctl_legacy_va_layout;
56#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 57#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
57#endif 58#endif
58 59
60/*
61 * To prevent common memory management code establishing
62 * a zero page mapping on a read fault.
63 * This macro should be defined within <asm/pgtable.h>.
64 * s390 does this to prevent multiplexing of hardware bits
65 * related to the physical page in case of virtualization.
66 */
67#ifndef mm_forbids_zeropage
68#define mm_forbids_zeropage(X) (0)
69#endif
70
59extern unsigned long sysctl_user_reserve_kbytes; 71extern unsigned long sysctl_user_reserve_kbytes;
60extern unsigned long sysctl_admin_reserve_kbytes; 72extern unsigned long sysctl_admin_reserve_kbytes;
61 73
@@ -128,6 +140,7 @@ extern unsigned int kobjsize(const void *objp);
128#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 140#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
129#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 141#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
130#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 142#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
143#define VM_ARCH_2 0x02000000
131#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 144#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
132 145
133#ifdef CONFIG_MEM_SOFT_DIRTY 146#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -155,6 +168,11 @@ extern unsigned int kobjsize(const void *objp);
155# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 168# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
156#endif 169#endif
157 170
171#if defined(CONFIG_X86)
172/* MPX specific bounds table or bounds directory */
173# define VM_MPX VM_ARCH_2
174#endif
175
158#ifndef VM_GROWSUP 176#ifndef VM_GROWSUP
159# define VM_GROWSUP VM_NONE 177# define VM_GROWSUP VM_NONE
160#endif 178#endif
@@ -1176,6 +1194,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
1176 1194
1177extern void truncate_pagecache(struct inode *inode, loff_t new); 1195extern void truncate_pagecache(struct inode *inode, loff_t new);
1178extern void truncate_setsize(struct inode *inode, loff_t newsize); 1196extern void truncate_setsize(struct inode *inode, loff_t newsize);
1197void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1179void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1198void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1180int truncate_inode_page(struct address_space *mapping, struct page *page); 1199int truncate_inode_page(struct address_space *mapping, struct page *page);
1181int generic_error_remove_page(struct address_space *mapping, struct page *page); 1200int generic_error_remove_page(struct address_space *mapping, struct page *page);
@@ -1234,7 +1253,6 @@ int __set_page_dirty_no_writeback(struct page *page);
1234int redirty_page_for_writepage(struct writeback_control *wbc, 1253int redirty_page_for_writepage(struct writeback_control *wbc,
1235 struct page *page); 1254 struct page *page);
1236void account_page_dirtied(struct page *page, struct address_space *mapping); 1255void account_page_dirtied(struct page *page, struct address_space *mapping);
1237void account_page_writeback(struct page *page);
1238int set_page_dirty(struct page *page); 1256int set_page_dirty(struct page *page);
1239int set_page_dirty_lock(struct page *page); 1257int set_page_dirty_lock(struct page *page);
1240int clear_page_dirty_for_io(struct page *page); 1258int clear_page_dirty_for_io(struct page *page);
@@ -2043,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm,
2043#endif /* CONFIG_PROC_FS */ 2061#endif /* CONFIG_PROC_FS */
2044 2062
2045#ifdef CONFIG_DEBUG_PAGEALLOC 2063#ifdef CONFIG_DEBUG_PAGEALLOC
2046extern void kernel_map_pages(struct page *page, int numpages, int enable); 2064extern bool _debug_pagealloc_enabled;
2065extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2066
2067static inline bool debug_pagealloc_enabled(void)
2068{
2069 return _debug_pagealloc_enabled;
2070}
2071
2072static inline void
2073kernel_map_pages(struct page *page, int numpages, int enable)
2074{
2075 if (!debug_pagealloc_enabled())
2076 return;
2077
2078 __kernel_map_pages(page, numpages, enable);
2079}
2047#ifdef CONFIG_HIBERNATION 2080#ifdef CONFIG_HIBERNATION
2048extern bool kernel_page_present(struct page *page); 2081extern bool kernel_page_present(struct page *page);
2049#endif /* CONFIG_HIBERNATION */ 2082#endif /* CONFIG_HIBERNATION */
@@ -2077,9 +2110,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
2077 void __user *, size_t *, loff_t *); 2110 void __user *, size_t *, loff_t *);
2078#endif 2111#endif
2079 2112
2080unsigned long shrink_slab(struct shrink_control *shrink, 2113unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
2081 unsigned long nr_pages_scanned, 2114 unsigned long nr_scanned,
2082 unsigned long lru_pages); 2115 unsigned long nr_eligible);
2083 2116
2084#ifndef CONFIG_MMU 2117#ifndef CONFIG_MMU
2085#define randomize_va_space 0 2118#define randomize_va_space 0
@@ -2138,20 +2171,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
2138 unsigned int pages_per_huge_page); 2171 unsigned int pages_per_huge_page);
2139#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2172#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2140 2173
2174extern struct page_ext_operations debug_guardpage_ops;
2175extern struct page_ext_operations page_poisoning_ops;
2176
2141#ifdef CONFIG_DEBUG_PAGEALLOC 2177#ifdef CONFIG_DEBUG_PAGEALLOC
2142extern unsigned int _debug_guardpage_minorder; 2178extern unsigned int _debug_guardpage_minorder;
2179extern bool _debug_guardpage_enabled;
2143 2180
2144static inline unsigned int debug_guardpage_minorder(void) 2181static inline unsigned int debug_guardpage_minorder(void)
2145{ 2182{
2146 return _debug_guardpage_minorder; 2183 return _debug_guardpage_minorder;
2147} 2184}
2148 2185
2186static inline bool debug_guardpage_enabled(void)
2187{
2188 return _debug_guardpage_enabled;
2189}
2190
2149static inline bool page_is_guard(struct page *page) 2191static inline bool page_is_guard(struct page *page)
2150{ 2192{
2151 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 2193 struct page_ext *page_ext;
2194
2195 if (!debug_guardpage_enabled())
2196 return false;
2197
2198 page_ext = lookup_page_ext(page);
2199 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2152} 2200}
2153#else 2201#else
2154static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2202static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2203static inline bool debug_guardpage_enabled(void) { return false; }
2155static inline bool page_is_guard(struct page *page) { return false; } 2204static inline bool page_is_guard(struct page *page) { return false; }
2156#endif /* CONFIG_DEBUG_PAGEALLOC */ 2205#endif /* CONFIG_DEBUG_PAGEALLOC */
2157 2206
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6e0b286649f1..6d34aa266a8c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -10,7 +10,6 @@
10#include <linux/rwsem.h> 10#include <linux/rwsem.h>
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/page-debug-flags.h>
14#include <linux/uprobes.h> 13#include <linux/uprobes.h>
15#include <linux/page-flags-layout.h> 14#include <linux/page-flags-layout.h>
16#include <asm/page.h> 15#include <asm/page.h>
@@ -22,6 +21,7 @@
22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) 21#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
23 22
24struct address_space; 23struct address_space;
24struct mem_cgroup;
25 25
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
@@ -167,6 +167,10 @@ struct page {
167 struct page *first_page; /* Compound tail pages */ 167 struct page *first_page; /* Compound tail pages */
168 }; 168 };
169 169
170#ifdef CONFIG_MEMCG
171 struct mem_cgroup *mem_cgroup;
172#endif
173
170 /* 174 /*
171 * On machines where all RAM is mapped into kernel address space, 175 * On machines where all RAM is mapped into kernel address space,
172 * we can simply calculate the virtual address. On machines with 176 * we can simply calculate the virtual address. On machines with
@@ -181,9 +185,6 @@ struct page {
181 void *virtual; /* Kernel virtual address (NULL if 185 void *virtual; /* Kernel virtual address (NULL if
182 not kmapped, ie. highmem) */ 186 not kmapped, ie. highmem) */
183#endif /* WANT_PAGE_VIRTUAL */ 187#endif /* WANT_PAGE_VIRTUAL */
184#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
185 unsigned long debug_flags; /* Use atomic bitops on this */
186#endif
187 188
188#ifdef CONFIG_KMEMCHECK 189#ifdef CONFIG_KMEMCHECK
189 /* 190 /*
@@ -454,6 +455,10 @@ struct mm_struct {
454 bool tlb_flush_pending; 455 bool tlb_flush_pending;
455#endif 456#endif
456 struct uprobes_state uprobes_state; 457 struct uprobes_state uprobes_state;
458#ifdef CONFIG_X86_INTEL_MPX
459 /* address of the bounds directory */
460 void __user *bd_addr;
461#endif
457}; 462};
458 463
459static inline void mm_init_cpumask(struct mm_struct *mm) 464static inline void mm_init_cpumask(struct mm_struct *mm)
@@ -525,4 +530,12 @@ enum tlb_flush_reason {
525 NR_TLB_FLUSH_REASONS, 530 NR_TLB_FLUSH_REASONS,
526}; 531};
527 532
533 /*
534 * A swap entry has to fit into a "unsigned long", as the entry is hidden
535 * in the "index" field of the swapper address space.
536 */
537typedef struct {
538 unsigned long val;
539} swp_entry_t;
540
528#endif /* _LINUX_MM_TYPES_H */ 541#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index b0692d28f8e6..4d69c00497bd 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -88,6 +88,9 @@ struct mmc_ext_csd {
88 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ 88 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
89 unsigned int boot_ro_lock; /* ro lock support */ 89 unsigned int boot_ro_lock; /* ro lock support */
90 bool boot_ro_lockable; 90 bool boot_ro_lockable;
91 bool ffu_capable; /* Firmware upgrade support */
92#define MMC_FIRMWARE_LEN 8
93 u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
91 u8 raw_exception_status; /* 54 */ 94 u8 raw_exception_status; /* 54 */
92 u8 raw_partition_support; /* 160 */ 95 u8 raw_partition_support; /* 160 */
93 u8 raw_rpmb_size_mult; /* 168 */ 96 u8 raw_rpmb_size_mult; /* 168 */
@@ -509,24 +512,8 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
509 512
510#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) 513#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
511 514
512#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) 515extern int mmc_register_driver(struct device_driver *);
513#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) 516extern void mmc_unregister_driver(struct device_driver *);
514#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d)
515
516/*
517 * MMC device driver (e.g., Flash card, I/O card...)
518 */
519struct mmc_driver {
520 struct device_driver drv;
521 int (*probe)(struct mmc_card *);
522 void (*remove)(struct mmc_card *);
523 int (*suspend)(struct mmc_card *);
524 int (*resume)(struct mmc_card *);
525 void (*shutdown)(struct mmc_card *);
526};
527
528extern int mmc_register_driver(struct mmc_driver *);
529extern void mmc_unregister_driver(struct mmc_driver *);
530 517
531extern void mmc_fixup_device(struct mmc_card *card, 518extern void mmc_fixup_device(struct mmc_card *card,
532 const struct mmc_fixup *table); 519 const struct mmc_fixup *table);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index f206e29f94d7..cb2b0400d284 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -154,7 +154,8 @@ extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
154extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, 154extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
155 bool, bool); 155 bool, bool);
156extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); 156extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
157extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); 157extern int mmc_send_tuning(struct mmc_host *host);
158extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
158 159
159#define MMC_ERASE_ARG 0x00000000 160#define MMC_ERASE_ARG 0x00000000
160#define MMC_SECURE_ERASE_ARG 0x80000000 161#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 001366927cf4..42b724e8d503 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -54,6 +54,7 @@ struct mmc_data;
54 * transfer is in progress. 54 * transfer is in progress.
55 * @use_dma: Whether DMA channel is initialized or not. 55 * @use_dma: Whether DMA channel is initialized or not.
56 * @using_dma: Whether DMA is in use for the current transfer. 56 * @using_dma: Whether DMA is in use for the current transfer.
57 * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
57 * @sg_dma: Bus address of DMA buffer. 58 * @sg_dma: Bus address of DMA buffer.
58 * @sg_cpu: Virtual address of DMA buffer. 59 * @sg_cpu: Virtual address of DMA buffer.
59 * @dma_ops: Pointer to platform-specific DMA callbacks. 60 * @dma_ops: Pointer to platform-specific DMA callbacks.
@@ -96,6 +97,7 @@ struct mmc_data;
96 * @quirks: Set of quirks that apply to specific versions of the IP. 97 * @quirks: Set of quirks that apply to specific versions of the IP.
97 * @irq_flags: The flags to be passed to request_irq. 98 * @irq_flags: The flags to be passed to request_irq.
98 * @irq: The irq value to be passed to request_irq. 99 * @irq: The irq value to be passed to request_irq.
100 * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
99 * 101 *
100 * Locking 102 * Locking
101 * ======= 103 * =======
@@ -135,11 +137,11 @@ struct dw_mci {
135 struct mmc_command stop_abort; 137 struct mmc_command stop_abort;
136 unsigned int prev_blksz; 138 unsigned int prev_blksz;
137 unsigned char timing; 139 unsigned char timing;
138 struct workqueue_struct *card_workqueue;
139 140
140 /* DMA interface members*/ 141 /* DMA interface members*/
141 int use_dma; 142 int use_dma;
142 int using_dma; 143 int using_dma;
144 int dma_64bit_address;
143 145
144 dma_addr_t sg_dma; 146 dma_addr_t sg_dma;
145 void *sg_cpu; 147 void *sg_cpu;
@@ -154,7 +156,6 @@ struct dw_mci {
154 u32 stop_cmdr; 156 u32 stop_cmdr;
155 u32 dir_status; 157 u32 dir_status;
156 struct tasklet_struct tasklet; 158 struct tasklet_struct tasklet;
157 struct work_struct card_work;
158 unsigned long pending_events; 159 unsigned long pending_events;
159 unsigned long completed_events; 160 unsigned long completed_events;
160 enum dw_mci_state state; 161 enum dw_mci_state state;
@@ -193,6 +194,8 @@ struct dw_mci {
193 bool vqmmc_enabled; 194 bool vqmmc_enabled;
194 unsigned long irq_flags; /* IRQ flags */ 195 unsigned long irq_flags; /* IRQ flags */
195 int irq; 196 int irq;
197
198 int sdio_id0;
196}; 199};
197 200
198/* DMA ops for Internal/External DMAC interface */ 201/* DMA ops for Internal/External DMAC interface */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index df0c15396bbf..9f322706f7cb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -289,6 +289,7 @@ struct mmc_host {
289#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ 289#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
290#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ 290#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
291 MMC_CAP2_HS400_1_2V) 291 MMC_CAP2_HS400_1_2V)
292#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
292#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) 293#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
293 294
294 mmc_pm_flag_t pm_caps; /* supported pm features */ 295 mmc_pm_flag_t pm_caps; /* supported pm features */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 1cd00b3a75b9..49ad7a943638 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -296,6 +296,7 @@ struct _mmc_csd {
296#define EXT_CSD_SANITIZE_START 165 /* W */ 296#define EXT_CSD_SANITIZE_START 165 /* W */
297#define EXT_CSD_WR_REL_PARAM 166 /* RO */ 297#define EXT_CSD_WR_REL_PARAM 166 /* RO */
298#define EXT_CSD_RPMB_MULT 168 /* RO */ 298#define EXT_CSD_RPMB_MULT 168 /* RO */
299#define EXT_CSD_FW_CONFIG 169 /* R/W */
299#define EXT_CSD_BOOT_WP 173 /* R/W */ 300#define EXT_CSD_BOOT_WP 173 /* R/W */
300#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ 301#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
301#define EXT_CSD_PART_CONFIG 179 /* R/W */ 302#define EXT_CSD_PART_CONFIG 179 /* R/W */
@@ -332,6 +333,8 @@ struct _mmc_csd {
332#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ 333#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
333#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ 334#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
334#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ 335#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
336#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
337#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
335#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ 338#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
336#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ 339#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
337#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ 340#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index dba793e3a331..375af80bde7d 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -100,6 +100,12 @@ struct sdhci_host {
100#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) 100#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
101/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ 101/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
102#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) 102#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
103/* Controller does not support 64-bit DMA */
104#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9)
105/* need clear transfer mode register before send cmd */
106#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10)
107/* Capability register bit-63 indicates HS400 support */
108#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11)
103 109
104 int irq; /* Device IRQ */ 110 int irq; /* Device IRQ */
105 void __iomem *ioaddr; /* Mapped address */ 111 void __iomem *ioaddr; /* Mapped address */
@@ -130,6 +136,7 @@ struct sdhci_host {
130#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ 136#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
131#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ 137#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
132#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ 138#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
139#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
133 140
134 unsigned int version; /* SDHCI spec. version */ 141 unsigned int version; /* SDHCI spec. version */
135 142
@@ -155,12 +162,19 @@ struct sdhci_host {
155 162
156 int sg_count; /* Mapped sg entries */ 163 int sg_count; /* Mapped sg entries */
157 164
158 u8 *adma_desc; /* ADMA descriptor table */ 165 void *adma_table; /* ADMA descriptor table */
159 u8 *align_buffer; /* Bounce buffer */ 166 void *align_buffer; /* Bounce buffer */
167
168 size_t adma_table_sz; /* ADMA descriptor table size */
169 size_t align_buffer_sz; /* Bounce buffer size */
160 170
161 dma_addr_t adma_addr; /* Mapped ADMA descr. table */ 171 dma_addr_t adma_addr; /* Mapped ADMA descr. table */
162 dma_addr_t align_addr; /* Mapped bounce buffer */ 172 dma_addr_t align_addr; /* Mapped bounce buffer */
163 173
174 unsigned int desc_sz; /* ADMA descriptor size */
175 unsigned int align_sz; /* ADMA alignment */
176 unsigned int align_mask; /* ADMA alignment mask */
177
164 struct tasklet_struct finish_tasklet; /* Tasklet structures */ 178 struct tasklet_struct finish_tasklet; /* Tasklet structures */
165 179
166 struct timer_list timer; /* Timer for timeouts */ 180 struct timer_list timer; /* Timer for timeouts */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 50f0bc952328..aab032a6ae61 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -84,8 +84,6 @@ struct sdio_driver {
84 struct device_driver drv; 84 struct device_driver drv;
85}; 85};
86 86
87#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
88
89/** 87/**
90 * SDIO_DEVICE - macro used to describe a specific SDIO device 88 * SDIO_DEVICE - macro used to describe a specific SDIO device
91 * @vend: the 16 bit manufacturer code 89 * @vend: the 16 bit manufacturer code
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 88787bb4b3b9..ab8564b03468 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -154,7 +154,7 @@ struct mmu_notifier_ops {
154 * Therefore notifier chains can only be traversed when either 154 * Therefore notifier chains can only be traversed when either
155 * 155 *
156 * 1. mmap_sem is held. 156 * 1. mmap_sem is held.
157 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). 157 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
158 * 3. No other concurrent thread can access the list (release) 158 * 3. No other concurrent thread can access the list (release)
159 */ 159 */
160struct mmu_notifier { 160struct mmu_notifier {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 48bf12ef6620..2f0856d14b21 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -431,6 +431,15 @@ struct zone {
431 */ 431 */
432 int nr_migrate_reserve_block; 432 int nr_migrate_reserve_block;
433 433
434#ifdef CONFIG_MEMORY_ISOLATION
435 /*
436 * Number of isolated pageblock. It is used to solve incorrect
437 * freepage counting problem due to racy retrieving migratetype
438 * of pageblock. Protected by zone->lock.
439 */
440 unsigned long nr_isolate_pageblock;
441#endif
442
434#ifdef CONFIG_MEMORY_HOTPLUG 443#ifdef CONFIG_MEMORY_HOTPLUG
435 /* see spanned/present_pages for more description */ 444 /* see spanned/present_pages for more description */
436 seqlock_t span_seqlock; 445 seqlock_t span_seqlock;
@@ -713,8 +722,8 @@ typedef struct pglist_data {
713 int nr_zones; 722 int nr_zones;
714#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 723#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
715 struct page *node_mem_map; 724 struct page *node_mem_map;
716#ifdef CONFIG_MEMCG 725#ifdef CONFIG_PAGE_EXTENSION
717 struct page_cgroup *node_page_cgroup; 726 struct page_ext *node_page_ext;
718#endif 727#endif
719#endif 728#endif
720#ifndef CONFIG_NO_BOOTMEM 729#ifndef CONFIG_NO_BOOTMEM
@@ -1069,7 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1069#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1078#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1070 1079
1071struct page; 1080struct page;
1072struct page_cgroup; 1081struct page_ext;
1073struct mem_section { 1082struct mem_section {
1074 /* 1083 /*
1075 * This is, logically, a pointer to an array of struct 1084 * This is, logically, a pointer to an array of struct
@@ -1087,12 +1096,12 @@ struct mem_section {
1087 1096
1088 /* See declaration of similar field in struct zone */ 1097 /* See declaration of similar field in struct zone */
1089 unsigned long *pageblock_flags; 1098 unsigned long *pageblock_flags;
1090#ifdef CONFIG_MEMCG 1099#ifdef CONFIG_PAGE_EXTENSION
1091 /* 1100 /*
1092 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use 1101 * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1093 * section. (see memcontrol.h/page_cgroup.h about this.) 1102 * section. (see page_ext.h about this.)
1094 */ 1103 */
1095 struct page_cgroup *page_cgroup; 1104 struct page_ext *page_ext;
1096 unsigned long pad; 1105 unsigned long pad;
1097#endif 1106#endif
1098 /* 1107 /*
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 44eeef0da186..745def862580 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -69,7 +69,7 @@ struct ieee1394_device_id {
69 * @bDeviceClass: Class of device; numbers are assigned 69 * @bDeviceClass: Class of device; numbers are assigned
70 * by the USB forum. Products may choose to implement classes, 70 * by the USB forum. Products may choose to implement classes,
71 * or be vendor-specific. Device classes specify behavior of all 71 * or be vendor-specific. Device classes specify behavior of all
72 * the interfaces on a devices. 72 * the interfaces on a device.
73 * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. 73 * @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
74 * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. 74 * @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
75 * @bInterfaceClass: Class of interface; numbers are assigned 75 * @bInterfaceClass: Class of interface; numbers are assigned
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 9262e4bf0cc3..c2c561dc0114 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -81,6 +81,9 @@ extern struct vfsmount *mntget(struct vfsmount *mnt);
81extern struct vfsmount *mnt_clone_internal(struct path *path); 81extern struct vfsmount *mnt_clone_internal(struct path *path);
82extern int __mnt_is_readonly(struct vfsmount *mnt); 82extern int __mnt_is_readonly(struct vfsmount *mnt);
83 83
84struct path;
85extern struct vfsmount *clone_private_mount(struct path *path);
86
84struct file_system_type; 87struct file_system_type;
85extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, 88extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
86 int flags, const char *name, 89 int flags, const char *name,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 44f4746d033b..8ac4a68ffae2 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,17 +10,12 @@ struct msi_msg {
10 u32 data; /* 16 bits of msi message data */ 10 u32 data; /* 16 bits of msi message data */
11}; 11};
12 12
13extern int pci_msi_ignore_mask;
13/* Helper functions */ 14/* Helper functions */
14struct irq_data; 15struct irq_data;
15struct msi_desc; 16struct msi_desc;
16void mask_msi_irq(struct irq_data *data);
17void unmask_msi_irq(struct irq_data *data);
18void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 17void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
20void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21void read_msi_msg(unsigned int irq, struct msi_msg *msg);
22void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 18void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23void write_msi_msg(unsigned int irq, struct msi_msg *msg);
24 19
25struct msi_desc { 20struct msi_desc {
26 struct { 21 struct {
@@ -48,6 +43,52 @@ struct msi_desc {
48 struct msi_msg msg; 43 struct msi_msg msg;
49}; 44};
50 45
46/* Helpers to hide struct msi_desc implementation details */
47#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
48#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
49#define first_msi_entry(dev) \
50 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
51#define for_each_msi_entry(desc, dev) \
52 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
53
54#ifdef CONFIG_PCI_MSI
55#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
56#define for_each_pci_msi_entry(desc, pdev) \
57 for_each_msi_entry((desc), &(pdev)->dev)
58
59static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
60{
61 return desc->dev;
62}
63#endif /* CONFIG_PCI_MSI */
64
65void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
66void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
67void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
68
69u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
70u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
71void pci_msi_mask_irq(struct irq_data *data);
72void pci_msi_unmask_irq(struct irq_data *data);
73
74/* Conversion helpers. Should be removed after merging */
75static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
76{
77 __pci_write_msi_msg(entry, msg);
78}
79static inline void write_msi_msg(int irq, struct msi_msg *msg)
80{
81 pci_write_msi_msg(irq, msg);
82}
83static inline void mask_msi_irq(struct irq_data *data)
84{
85 pci_msi_mask_irq(data);
86}
87static inline void unmask_msi_irq(struct irq_data *data)
88{
89 pci_msi_unmask_irq(data);
90}
91
51/* 92/*
52 * The arch hooks to setup up msi irqs. Those functions are 93 * The arch hooks to setup up msi irqs. Those functions are
53 * implemented as weak symbols so that they /can/ be overriden by 94 * implemented as weak symbols so that they /can/ be overriden by
@@ -61,18 +102,142 @@ void arch_restore_msi_irqs(struct pci_dev *dev);
61 102
62void default_teardown_msi_irqs(struct pci_dev *dev); 103void default_teardown_msi_irqs(struct pci_dev *dev);
63void default_restore_msi_irqs(struct pci_dev *dev); 104void default_restore_msi_irqs(struct pci_dev *dev);
64u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
65u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
66 105
67struct msi_chip { 106struct msi_controller {
68 struct module *owner; 107 struct module *owner;
69 struct device *dev; 108 struct device *dev;
70 struct device_node *of_node; 109 struct device_node *of_node;
71 struct list_head list; 110 struct list_head list;
111#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
112 struct irq_domain *domain;
113#endif
72 114
73 int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, 115 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
74 struct msi_desc *desc); 116 struct msi_desc *desc);
75 void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); 117 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
118};
119
120#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
121
122#include <linux/irqhandler.h>
123#include <asm/msi.h>
124
125struct irq_domain;
126struct irq_chip;
127struct device_node;
128struct msi_domain_info;
129
130/**
131 * struct msi_domain_ops - MSI interrupt domain callbacks
132 * @get_hwirq: Retrieve the resulting hw irq number
133 * @msi_init: Domain specific init function for MSI interrupts
134 * @msi_free: Domain specific function to free a MSI interrupts
135 * @msi_check: Callback for verification of the domain/info/dev data
136 * @msi_prepare: Prepare the allocation of the interrupts in the domain
137 * @msi_finish: Optional callbacl to finalize the allocation
138 * @set_desc: Set the msi descriptor for an interrupt
139 * @handle_error: Optional error handler if the allocation fails
140 *
141 * @get_hwirq, @msi_init and @msi_free are callbacks used by
142 * msi_create_irq_domain() and related interfaces
143 *
144 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
145 * are callbacks used by msi_irq_domain_alloc_irqs() and related
146 * interfaces which are based on msi_desc.
147 */
148struct msi_domain_ops {
149 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
150 msi_alloc_info_t *arg);
151 int (*msi_init)(struct irq_domain *domain,
152 struct msi_domain_info *info,
153 unsigned int virq, irq_hw_number_t hwirq,
154 msi_alloc_info_t *arg);
155 void (*msi_free)(struct irq_domain *domain,
156 struct msi_domain_info *info,
157 unsigned int virq);
158 int (*msi_check)(struct irq_domain *domain,
159 struct msi_domain_info *info,
160 struct device *dev);
161 int (*msi_prepare)(struct irq_domain *domain,
162 struct device *dev, int nvec,
163 msi_alloc_info_t *arg);
164 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
165 void (*set_desc)(msi_alloc_info_t *arg,
166 struct msi_desc *desc);
167 int (*handle_error)(struct irq_domain *domain,
168 struct msi_desc *desc, int error);
169};
170
171/**
172 * struct msi_domain_info - MSI interrupt domain data
173 * @flags: Flags to decribe features and capabilities
174 * @ops: The callback data structure
175 * @chip: Optional: associated interrupt chip
176 * @chip_data: Optional: associated interrupt chip data
177 * @handler: Optional: associated interrupt flow handler
178 * @handler_data: Optional: associated interrupt flow handler data
179 * @handler_name: Optional: associated interrupt flow handler name
180 * @data: Optional: domain specific data
181 */
182struct msi_domain_info {
183 u32 flags;
184 struct msi_domain_ops *ops;
185 struct irq_chip *chip;
186 void *chip_data;
187 irq_flow_handler_t handler;
188 void *handler_data;
189 const char *handler_name;
190 void *data;
191};
192
193/* Flags for msi_domain_info */
194enum {
195 /*
196 * Init non implemented ops callbacks with default MSI domain
197 * callbacks.
198 */
199 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
200 /*
201 * Init non implemented chip callbacks with default MSI chip
202 * callbacks.
203 */
204 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
205 /* Build identity map between hwirq and irq */
206 MSI_FLAG_IDENTITY_MAP = (1 << 2),
207 /* Support multiple PCI MSI interrupts */
208 MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
209 /* Support PCI MSIX interrupts */
210 MSI_FLAG_PCI_MSIX = (1 << 4),
76}; 211};
77 212
213int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
214 bool force);
215
216struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
217 struct msi_domain_info *info,
218 struct irq_domain *parent);
219int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
220 int nvec);
221void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
222struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
223
224#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
225
226#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
227void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
228struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
229 struct msi_domain_info *info,
230 struct irq_domain *parent);
231int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
232 int nvec, int type);
233void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
234struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
235 struct msi_domain_info *info, struct irq_domain *parent);
236
237irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
238 struct msi_desc *desc);
239int pci_msi_domain_check_cap(struct irq_domain *domain,
240 struct msi_domain_info *info, struct device *dev);
241#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
242
78#endif /* LINUX_MSI_H */ 243#endif /* LINUX_MSI_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 9e6294f32ba8..046a0a2e4c4e 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -187,32 +187,17 @@ struct spi_nor {
187/** 187/**
188 * spi_nor_scan() - scan the SPI NOR 188 * spi_nor_scan() - scan the SPI NOR
189 * @nor: the spi_nor structure 189 * @nor: the spi_nor structure
190 * @id: the spi_device_id provided by the driver 190 * @name: the chip type name
191 * @mode: the read mode supported by the driver 191 * @mode: the read mode supported by the driver
192 * 192 *
193 * The drivers can use this fuction to scan the SPI NOR. 193 * The drivers can use this fuction to scan the SPI NOR.
194 * In the scanning, it will try to get all the necessary information to 194 * In the scanning, it will try to get all the necessary information to
195 * fill the mtd_info{} and the spi_nor{}. 195 * fill the mtd_info{} and the spi_nor{}.
196 * 196 *
197 * The board may assigns a spi_device_id with @id which be used to compared with 197 * The chip type name can be provided through the @name parameter.
198 * the spi_device_id detected by the scanning.
199 * 198 *
200 * Return: 0 for success, others for failure. 199 * Return: 0 for success, others for failure.
201 */ 200 */
202int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, 201int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
203 enum read_mode mode);
204extern const struct spi_device_id spi_nor_ids[];
205
206/**
207 * spi_nor_match_id() - find the spi_device_id by the name
208 * @name: the name of the spi_device_id
209 *
210 * The drivers use this function to find the spi_device_id
211 * specified by the @name.
212 *
213 * Return: returns the right spi_device_id pointer on success,
214 * and returns NULL on failure.
215 */
216const struct spi_device_id *spi_nor_match_id(char *name);
217 202
218#endif 203#endif
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index dcfdecbfa0b7..8e30685affeb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -47,9 +47,9 @@ enum {
47 NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ 47 NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
48 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ 48 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
49 NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ 49 NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
50 NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ 50 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
51 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ 51 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
52 NETIF_F_GSO_MPLS_BIT, 52 NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
53 53
54 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ 54 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
55 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ 55 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
@@ -118,7 +118,7 @@ enum {
118#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) 118#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
119#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) 119#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
120#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) 120#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
121#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) 121#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
122#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 122#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
123#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 123#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -181,7 +181,6 @@ enum {
181 NETIF_F_GSO_IPIP | \ 181 NETIF_F_GSO_IPIP | \
182 NETIF_F_GSO_SIT | \ 182 NETIF_F_GSO_SIT | \
183 NETIF_F_GSO_UDP_TUNNEL | \ 183 NETIF_F_GSO_UDP_TUNNEL | \
184 NETIF_F_GSO_UDP_TUNNEL_CSUM | \ 184 NETIF_F_GSO_UDP_TUNNEL_CSUM)
185 NETIF_F_GSO_MPLS)
186 185
187#endif /* _LINUX_NETDEV_FEATURES_H */ 186#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 74fd5d37f15a..c31f74d76ebd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -57,6 +57,8 @@ struct device;
57struct phy_device; 57struct phy_device;
58/* 802.11 specific */ 58/* 802.11 specific */
59struct wireless_dev; 59struct wireless_dev;
60/* 802.15.4 specific */
61struct wpan_dev;
60 62
61void netdev_set_default_ethtool_ops(struct net_device *dev, 63void netdev_set_default_ethtool_ops(struct net_device *dev,
62 const struct ethtool_ops *ops); 64 const struct ethtool_ops *ops);
@@ -314,6 +316,7 @@ struct napi_struct {
314 struct net_device *dev; 316 struct net_device *dev;
315 struct sk_buff *gro_list; 317 struct sk_buff *gro_list;
316 struct sk_buff *skb; 318 struct sk_buff *skb;
319 struct hrtimer timer;
317 struct list_head dev_list; 320 struct list_head dev_list;
318 struct hlist_node napi_hash_node; 321 struct hlist_node napi_hash_node;
319 unsigned int napi_id; 322 unsigned int napi_id;
@@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t;
386typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 389typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
387 390
388void __napi_schedule(struct napi_struct *n); 391void __napi_schedule(struct napi_struct *n);
392void __napi_schedule_irqoff(struct napi_struct *n);
389 393
390static inline bool napi_disable_pending(struct napi_struct *n) 394static inline bool napi_disable_pending(struct napi_struct *n)
391{ 395{
@@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n)
420 __napi_schedule(n); 424 __napi_schedule(n);
421} 425}
422 426
427/**
428 * napi_schedule_irqoff - schedule NAPI poll
429 * @n: napi context
430 *
431 * Variant of napi_schedule(), assuming hard irqs are masked.
432 */
433static inline void napi_schedule_irqoff(struct napi_struct *n)
434{
435 if (napi_schedule_prep(n))
436 __napi_schedule_irqoff(n);
437}
438
423/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 439/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
424static inline bool napi_reschedule(struct napi_struct *napi) 440static inline bool napi_reschedule(struct napi_struct *napi)
425{ 441{
@@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi)
430 return false; 446 return false;
431} 447}
432 448
449void __napi_complete(struct napi_struct *n);
450void napi_complete_done(struct napi_struct *n, int work_done);
433/** 451/**
434 * napi_complete - NAPI processing complete 452 * napi_complete - NAPI processing complete
435 * @n: napi context 453 * @n: napi context
436 * 454 *
437 * Mark NAPI processing as complete. 455 * Mark NAPI processing as complete.
456 * Consider using napi_complete_done() instead.
438 */ 457 */
439void __napi_complete(struct napi_struct *n); 458static inline void napi_complete(struct napi_struct *n)
440void napi_complete(struct napi_struct *n); 459{
460 return napi_complete_done(n, 0);
461}
441 462
442/** 463/**
443 * napi_by_id - lookup a NAPI by napi_id 464 * napi_by_id - lookup a NAPI by napi_id
@@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi);
472 * Stop NAPI from being scheduled on this context. 493 * Stop NAPI from being scheduled on this context.
473 * Waits till any outstanding processing completes. 494 * Waits till any outstanding processing completes.
474 */ 495 */
475static inline void napi_disable(struct napi_struct *n) 496void napi_disable(struct napi_struct *n);
476{
477 might_sleep();
478 set_bit(NAPI_STATE_DISABLE, &n->state);
479 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
480 msleep(1);
481 clear_bit(NAPI_STATE_DISABLE, &n->state);
482}
483 497
484/** 498/**
485 * napi_enable - enable NAPI scheduling 499 * napi_enable - enable NAPI scheduling
@@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo {
740}; 754};
741#endif 755#endif
742 756
743#define MAX_PHYS_PORT_ID_LEN 32 757#define MAX_PHYS_ITEM_ID_LEN 32
744 758
745/* This structure holds a unique identifier to identify the 759/* This structure holds a unique identifier to identify some
746 * physical port used by a netdevice. 760 * physical item (port for example) used by a netdevice.
747 */ 761 */
748struct netdev_phys_port_id { 762struct netdev_phys_item_id {
749 unsigned char id[MAX_PHYS_PORT_ID_LEN]; 763 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
750 unsigned char id_len; 764 unsigned char id_len;
751}; 765};
752 766
@@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
937 * 951 *
938 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 952 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
939 * struct net_device *dev, 953 * struct net_device *dev,
940 * const unsigned char *addr, u16 flags) 954 * const unsigned char *addr, u16 vid, u16 flags)
941 * Adds an FDB entry to dev for addr. 955 * Adds an FDB entry to dev for addr.
942 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 956 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
943 * struct net_device *dev, 957 * struct net_device *dev,
944 * const unsigned char *addr) 958 * const unsigned char *addr, u16 vid)
945 * Deletes the FDB entry from dev coresponding to addr. 959 * Deletes the FDB entry from dev coresponding to addr.
946 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 960 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
947 * struct net_device *dev, struct net_device *filter_dev, 961 * struct net_device *dev, struct net_device *filter_dev,
@@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
962 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 976 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
963 * 977 *
964 * int (*ndo_get_phys_port_id)(struct net_device *dev, 978 * int (*ndo_get_phys_port_id)(struct net_device *dev,
965 * struct netdev_phys_port_id *ppid); 979 * struct netdev_phys_item_id *ppid);
966 * Called to get ID of physical port of this device. If driver does 980 * Called to get ID of physical port of this device. If driver does
967 * not implement this, it is assumed that the hw is not able to have 981 * not implement this, it is assumed that the hw is not able to have
968 * multiple net devices on single physical port. 982 * multiple net devices on single physical port.
@@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1004 * performing GSO on a packet. The device returns true if it is 1018 * performing GSO on a packet. The device returns true if it is
1005 * able to GSO the packet, false otherwise. If the return value is 1019 * able to GSO the packet, false otherwise. If the return value is
1006 * false the stack will do software GSO. 1020 * false the stack will do software GSO.
1021 *
1022 * int (*ndo_switch_parent_id_get)(struct net_device *dev,
1023 * struct netdev_phys_item_id *psid);
1024 * Called to get an ID of the switch chip this port is part of.
1025 * If driver implements this, it indicates that it represents a port
1026 * of a switch chip.
1027 * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
1028 * Called to notify switch device port of bridge port STP
1029 * state change.
1007 */ 1030 */
1008struct net_device_ops { 1031struct net_device_ops {
1009 int (*ndo_init)(struct net_device *dev); 1032 int (*ndo_init)(struct net_device *dev);
@@ -1114,11 +1137,13 @@ struct net_device_ops {
1114 struct nlattr *tb[], 1137 struct nlattr *tb[],
1115 struct net_device *dev, 1138 struct net_device *dev,
1116 const unsigned char *addr, 1139 const unsigned char *addr,
1140 u16 vid,
1117 u16 flags); 1141 u16 flags);
1118 int (*ndo_fdb_del)(struct ndmsg *ndm, 1142 int (*ndo_fdb_del)(struct ndmsg *ndm,
1119 struct nlattr *tb[], 1143 struct nlattr *tb[],
1120 struct net_device *dev, 1144 struct net_device *dev,
1121 const unsigned char *addr); 1145 const unsigned char *addr,
1146 u16 vid);
1122 int (*ndo_fdb_dump)(struct sk_buff *skb, 1147 int (*ndo_fdb_dump)(struct sk_buff *skb,
1123 struct netlink_callback *cb, 1148 struct netlink_callback *cb,
1124 struct net_device *dev, 1149 struct net_device *dev,
@@ -1136,7 +1161,7 @@ struct net_device_ops {
1136 int (*ndo_change_carrier)(struct net_device *dev, 1161 int (*ndo_change_carrier)(struct net_device *dev,
1137 bool new_carrier); 1162 bool new_carrier);
1138 int (*ndo_get_phys_port_id)(struct net_device *dev, 1163 int (*ndo_get_phys_port_id)(struct net_device *dev,
1139 struct netdev_phys_port_id *ppid); 1164 struct netdev_phys_item_id *ppid);
1140 void (*ndo_add_vxlan_port)(struct net_device *dev, 1165 void (*ndo_add_vxlan_port)(struct net_device *dev,
1141 sa_family_t sa_family, 1166 sa_family_t sa_family,
1142 __be16 port); 1167 __be16 port);
@@ -1155,6 +1180,12 @@ struct net_device_ops {
1155 int (*ndo_get_lock_subclass)(struct net_device *dev); 1180 int (*ndo_get_lock_subclass)(struct net_device *dev);
1156 bool (*ndo_gso_check) (struct sk_buff *skb, 1181 bool (*ndo_gso_check) (struct sk_buff *skb,
1157 struct net_device *dev); 1182 struct net_device *dev);
1183#ifdef CONFIG_NET_SWITCHDEV
1184 int (*ndo_switch_parent_id_get)(struct net_device *dev,
1185 struct netdev_phys_item_id *psid);
1186 int (*ndo_switch_port_stp_update)(struct net_device *dev,
1187 u8 state);
1188#endif
1158}; 1189};
1159 1190
1160/** 1191/**
@@ -1216,6 +1247,8 @@ enum netdev_priv_flags {
1216 IFF_LIVE_ADDR_CHANGE = 1<<20, 1247 IFF_LIVE_ADDR_CHANGE = 1<<20,
1217 IFF_MACVLAN = 1<<21, 1248 IFF_MACVLAN = 1<<21,
1218 IFF_XMIT_DST_RELEASE_PERM = 1<<22, 1249 IFF_XMIT_DST_RELEASE_PERM = 1<<22,
1250 IFF_IPVLAN_MASTER = 1<<23,
1251 IFF_IPVLAN_SLAVE = 1<<24,
1219}; 1252};
1220 1253
1221#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1254#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1241,6 +1274,8 @@ enum netdev_priv_flags {
1241#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1274#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1242#define IFF_MACVLAN IFF_MACVLAN 1275#define IFF_MACVLAN IFF_MACVLAN
1243#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1276#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1277#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1278#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1244 1279
1245/** 1280/**
1246 * struct net_device - The DEVICE structure. 1281 * struct net_device - The DEVICE structure.
@@ -1572,6 +1607,7 @@ struct net_device {
1572 struct inet6_dev __rcu *ip6_ptr; 1607 struct inet6_dev __rcu *ip6_ptr;
1573 void *ax25_ptr; 1608 void *ax25_ptr;
1574 struct wireless_dev *ieee80211_ptr; 1609 struct wireless_dev *ieee80211_ptr;
1610 struct wpan_dev *ieee802154_ptr;
1575 1611
1576/* 1612/*
1577 * Cache lines mostly used on receive path (including eth_type_trans()) 1613 * Cache lines mostly used on receive path (including eth_type_trans())
@@ -1590,6 +1626,7 @@ struct net_device {
1590 1626
1591#endif 1627#endif
1592 1628
1629 unsigned long gro_flush_timeout;
1593 rx_handler_func_t __rcu *rx_handler; 1630 rx_handler_func_t __rcu *rx_handler;
1594 void __rcu *rx_handler_data; 1631 void __rcu *rx_handler_data;
1595 1632
@@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len;
2316 * Incoming packets are placed on per-cpu queues 2353 * Incoming packets are placed on per-cpu queues
2317 */ 2354 */
2318struct softnet_data { 2355struct softnet_data {
2319 struct Qdisc *output_queue;
2320 struct Qdisc **output_queue_tailp;
2321 struct list_head poll_list; 2356 struct list_head poll_list;
2322 struct sk_buff *completion_queue;
2323 struct sk_buff_head process_queue; 2357 struct sk_buff_head process_queue;
2324 2358
2325 /* stats */ 2359 /* stats */
@@ -2327,10 +2361,17 @@ struct softnet_data {
2327 unsigned int time_squeeze; 2361 unsigned int time_squeeze;
2328 unsigned int cpu_collision; 2362 unsigned int cpu_collision;
2329 unsigned int received_rps; 2363 unsigned int received_rps;
2330
2331#ifdef CONFIG_RPS 2364#ifdef CONFIG_RPS
2332 struct softnet_data *rps_ipi_list; 2365 struct softnet_data *rps_ipi_list;
2366#endif
2367#ifdef CONFIG_NET_FLOW_LIMIT
2368 struct sd_flow_limit __rcu *flow_limit;
2369#endif
2370 struct Qdisc *output_queue;
2371 struct Qdisc **output_queue_tailp;
2372 struct sk_buff *completion_queue;
2333 2373
2374#ifdef CONFIG_RPS
2334 /* Elements below can be accessed between CPUs for RPS */ 2375 /* Elements below can be accessed between CPUs for RPS */
2335 struct call_single_data csd ____cacheline_aligned_in_smp; 2376 struct call_single_data csd ____cacheline_aligned_in_smp;
2336 struct softnet_data *rps_ipi_next; 2377 struct softnet_data *rps_ipi_next;
@@ -2342,9 +2383,6 @@ struct softnet_data {
2342 struct sk_buff_head input_pkt_queue; 2383 struct sk_buff_head input_pkt_queue;
2343 struct napi_struct backlog; 2384 struct napi_struct backlog;
2344 2385
2345#ifdef CONFIG_NET_FLOW_LIMIT
2346 struct sd_flow_limit __rcu *flow_limit;
2347#endif
2348}; 2386};
2349 2387
2350static inline void input_queue_head_incr(struct softnet_data *sd) 2388static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2748} 2786}
2749#endif 2787#endif
2750 2788
2751static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2752 const struct net_device *from_dev)
2753{
2754 int err;
2755
2756 err = netif_set_real_num_tx_queues(to_dev,
2757 from_dev->real_num_tx_queues);
2758 if (err)
2759 return err;
2760#ifdef CONFIG_SYSFS
2761 return netif_set_real_num_rx_queues(to_dev,
2762 from_dev->real_num_rx_queues);
2763#else
2764 return 0;
2765#endif
2766}
2767
2768#ifdef CONFIG_SYSFS 2789#ifdef CONFIG_SYSFS
2769static inline unsigned int get_netdev_rx_queue_index( 2790static inline unsigned int get_netdev_rx_queue_index(
2770 struct netdev_rx_queue *queue) 2791 struct netdev_rx_queue *queue)
@@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int);
2864int dev_set_mac_address(struct net_device *, struct sockaddr *); 2885int dev_set_mac_address(struct net_device *, struct sockaddr *);
2865int dev_change_carrier(struct net_device *, bool new_carrier); 2886int dev_change_carrier(struct net_device *, bool new_carrier);
2866int dev_get_phys_port_id(struct net_device *dev, 2887int dev_get_phys_port_id(struct net_device *dev,
2867 struct netdev_phys_port_id *ppid); 2888 struct netdev_phys_item_id *ppid);
2868struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 2889struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2869struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2890struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2870 struct netdev_queue *txq, int *ret); 2891 struct netdev_queue *txq, int *ret);
@@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev,
3425void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 3446void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3426void *netdev_lower_dev_get_private(struct net_device *dev, 3447void *netdev_lower_dev_get_private(struct net_device *dev,
3427 struct net_device *lower_dev); 3448 struct net_device *lower_dev);
3449
3450/* RSS keys are 40 or 52 bytes long */
3451#define NETDEV_RSS_KEY_LEN 52
3452extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
3453void netdev_rss_key_fill(void *buffer, size_t len);
3454
3428int dev_get_nest_level(struct net_device *dev, 3455int dev_get_nest_level(struct net_device *dev,
3429 bool (*type_check)(struct net_device *dev)); 3456 bool (*type_check)(struct net_device *dev));
3430int skb_checksum_help(struct sk_buff *skb); 3457int skb_checksum_help(struct sk_buff *skb);
@@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3569 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); 3596 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
3570 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 3597 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
3571 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 3598 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3572 BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); 3599 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
3573 3600
3574 return (features & feature) == feature; 3601 return (features & feature) == feature;
3575} 3602}
@@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev)
3614 return dev->priv_flags & IFF_MACVLAN; 3641 return dev->priv_flags & IFF_MACVLAN;
3615} 3642}
3616 3643
3644static inline bool netif_is_macvlan_port(struct net_device *dev)
3645{
3646 return dev->priv_flags & IFF_MACVLAN_PORT;
3647}
3648
3649static inline bool netif_is_ipvlan(struct net_device *dev)
3650{
3651 return dev->priv_flags & IFF_IPVLAN_SLAVE;
3652}
3653
3654static inline bool netif_is_ipvlan_port(struct net_device *dev)
3655{
3656 return dev->priv_flags & IFF_IPVLAN_MASTER;
3657}
3658
3617static inline bool netif_is_bond_master(struct net_device *dev) 3659static inline bool netif_is_bond_master(struct net_device *dev)
3618{ 3660{
3619 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 3661 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 356acc2846fd..022b761dbf0a 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -490,6 +490,8 @@ enum {
490 490
491 /* nfs42 */ 491 /* nfs42 */
492 NFSPROC4_CLNT_SEEK, 492 NFSPROC4_CLNT_SEEK,
493 NFSPROC4_CLNT_ALLOCATE,
494 NFSPROC4_CLNT_DEALLOCATE,
493}; 495};
494 496
495/* nfs41 types */ 497/* nfs41 types */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index c72d1ad41ad4..6d627b92df53 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -163,7 +163,7 @@ struct nfs_inode {
163 */ 163 */
164 __be32 cookieverf[2]; 164 __be32 cookieverf[2];
165 165
166 unsigned long npages; 166 unsigned long nrequests;
167 struct nfs_mds_commit_info commit_info; 167 struct nfs_mds_commit_info commit_info;
168 168
169 /* Open contexts for shared mmap writes */ 169 /* Open contexts for shared mmap writes */
@@ -520,7 +520,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data);
520static inline int 520static inline int
521nfs_have_writebacks(struct inode *inode) 521nfs_have_writebacks(struct inode *inode)
522{ 522{
523 return NFS_I(inode)->npages != 0; 523 return NFS_I(inode)->nrequests != 0;
524} 524}
525 525
526/* 526/*
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a32ba0d7a98f..1e37fbb78f7a 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -231,5 +231,7 @@ struct nfs_server {
231#define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) 231#define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17)
232#define NFS_CAP_SECURITY_LABEL (1U << 18) 232#define NFS_CAP_SECURITY_LABEL (1U << 18)
233#define NFS_CAP_SEEK (1U << 19) 233#define NFS_CAP_SEEK (1U << 19)
234#define NFS_CAP_ALLOCATE (1U << 20)
235#define NFS_CAP_DEALLOCATE (1U << 21)
234 236
235#endif 237#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 983876f24aed..467c84efb596 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1224,14 +1224,39 @@ struct nfs41_free_stateid_res {
1224 unsigned int status; 1224 unsigned int status;
1225}; 1225};
1226 1226
1227static inline void
1228nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
1229{
1230 kfree(cinfo->buckets);
1231}
1232
1227#else 1233#else
1228 1234
1229struct pnfs_ds_commit_info { 1235struct pnfs_ds_commit_info {
1230}; 1236};
1231 1237
1238static inline void
1239nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
1240{
1241}
1242
1232#endif /* CONFIG_NFS_V4_1 */ 1243#endif /* CONFIG_NFS_V4_1 */
1233 1244
1234#ifdef CONFIG_NFS_V4_2 1245#ifdef CONFIG_NFS_V4_2
1246struct nfs42_falloc_args {
1247 struct nfs4_sequence_args seq_args;
1248
1249 struct nfs_fh *falloc_fh;
1250 nfs4_stateid falloc_stateid;
1251 u64 falloc_offset;
1252 u64 falloc_length;
1253};
1254
1255struct nfs42_falloc_res {
1256 struct nfs4_sequence_res seq_res;
1257 unsigned int status;
1258};
1259
1235struct nfs42_seek_args { 1260struct nfs42_seek_args {
1236 struct nfs4_sequence_args seq_args; 1261 struct nfs4_sequence_args seq_args;
1237 1262
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 20163b9a0eae..167342c2ce6b 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */ 15 */
20 16
21#ifndef NL802154_H 17#ifndef NL802154_H
diff --git a/include/linux/of.h b/include/linux/of.h
index 6545e7aec7bb..dfde07e77a63 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -23,6 +23,8 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/topology.h> 24#include <linux/topology.h>
25#include <linux/notifier.h> 25#include <linux/notifier.h>
26#include <linux/property.h>
27#include <linux/list.h>
26 28
27#include <asm/byteorder.h> 29#include <asm/byteorder.h>
28#include <asm/errno.h> 30#include <asm/errno.h>
@@ -49,14 +51,13 @@ struct device_node {
49 const char *type; 51 const char *type;
50 phandle phandle; 52 phandle phandle;
51 const char *full_name; 53 const char *full_name;
54 struct fwnode_handle fwnode;
52 55
53 struct property *properties; 56 struct property *properties;
54 struct property *deadprops; /* removed properties */ 57 struct property *deadprops; /* removed properties */
55 struct device_node *parent; 58 struct device_node *parent;
56 struct device_node *child; 59 struct device_node *child;
57 struct device_node *sibling; 60 struct device_node *sibling;
58 struct device_node *next; /* next device of same type */
59 struct device_node *allnext; /* next in list of all nodes */
60 struct kobject kobj; 61 struct kobject kobj;
61 unsigned long _flags; 62 unsigned long _flags;
62 void *data; 63 void *data;
@@ -74,11 +75,18 @@ struct of_phandle_args {
74 uint32_t args[MAX_PHANDLE_ARGS]; 75 uint32_t args[MAX_PHANDLE_ARGS];
75}; 76};
76 77
78struct of_reconfig_data {
79 struct device_node *dn;
80 struct property *prop;
81 struct property *old_prop;
82};
83
77/* initialize a node */ 84/* initialize a node */
78extern struct kobj_type of_node_ktype; 85extern struct kobj_type of_node_ktype;
79static inline void of_node_init(struct device_node *node) 86static inline void of_node_init(struct device_node *node)
80{ 87{
81 kobject_init(&node->kobj, &of_node_ktype); 88 kobject_init(&node->kobj, &of_node_ktype);
89 node->fwnode.type = FWNODE_OF;
82} 90}
83 91
84/* true when node is initialized */ 92/* true when node is initialized */
@@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node)
105static inline void of_node_put(struct device_node *node) { } 113static inline void of_node_put(struct device_node *node) { }
106#endif /* !CONFIG_OF_DYNAMIC */ 114#endif /* !CONFIG_OF_DYNAMIC */
107 115
108#ifdef CONFIG_OF
109
110/* Pointer for first entry in chain of all nodes. */ 116/* Pointer for first entry in chain of all nodes. */
111extern struct device_node *of_allnodes; 117extern struct device_node *of_root;
112extern struct device_node *of_chosen; 118extern struct device_node *of_chosen;
113extern struct device_node *of_aliases; 119extern struct device_node *of_aliases;
114extern struct device_node *of_stdout; 120extern struct device_node *of_stdout;
115extern raw_spinlock_t devtree_lock; 121extern raw_spinlock_t devtree_lock;
116 122
123#ifdef CONFIG_OF
124static inline bool is_of_node(struct fwnode_handle *fwnode)
125{
126 return fwnode && fwnode->type == FWNODE_OF;
127}
128
129static inline struct device_node *of_node(struct fwnode_handle *fwnode)
130{
131 return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
132}
133
117static inline bool of_have_populated_dt(void) 134static inline bool of_have_populated_dt(void)
118{ 135{
119 return of_allnodes != NULL; 136 return of_root != NULL;
120} 137}
121 138
122static inline bool of_node_is_root(const struct device_node *node) 139static inline bool of_node_is_root(const struct device_node *node)
@@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
160 clear_bit(flag, &p->_flags); 177 clear_bit(flag, &p->_flags);
161} 178}
162 179
180extern struct device_node *__of_find_all_nodes(struct device_node *prev);
163extern struct device_node *of_find_all_nodes(struct device_node *prev); 181extern struct device_node *of_find_all_nodes(struct device_node *prev);
164 182
165/* 183/*
@@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np)
215 return np ? np->full_name : "<no-node>"; 233 return np ? np->full_name : "<no-node>";
216} 234}
217 235
218#define for_each_of_allnodes(dn) \ 236#define for_each_of_allnodes_from(from, dn) \
219 for (dn = of_allnodes; dn; dn = dn->allnext) 237 for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn))
238#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn)
220extern struct device_node *of_find_node_by_name(struct device_node *from, 239extern struct device_node *of_find_node_by_name(struct device_node *from,
221 const char *name); 240 const char *name);
222extern struct device_node *of_find_node_by_type(struct device_node *from, 241extern struct device_node *of_find_node_by_type(struct device_node *from,
@@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match(
228 const struct of_device_id *matches, 247 const struct of_device_id *matches,
229 const struct of_device_id **match); 248 const struct of_device_id **match);
230 249
231extern struct device_node *of_find_node_by_path(const char *path); 250extern struct device_node *of_find_node_opts_by_path(const char *path,
251 const char **opts);
252static inline struct device_node *of_find_node_by_path(const char *path)
253{
254 return of_find_node_opts_by_path(path, NULL);
255}
256
232extern struct device_node *of_find_node_by_phandle(phandle handle); 257extern struct device_node *of_find_node_by_phandle(phandle handle);
233extern struct device_node *of_get_parent(const struct device_node *node); 258extern struct device_node *of_get_parent(const struct device_node *node);
234extern struct device_node *of_get_next_parent(struct device_node *node); 259extern struct device_node *of_get_next_parent(struct device_node *node);
@@ -263,21 +288,23 @@ extern int of_property_read_u32_array(const struct device_node *np,
263 size_t sz); 288 size_t sz);
264extern int of_property_read_u64(const struct device_node *np, 289extern int of_property_read_u64(const struct device_node *np,
265 const char *propname, u64 *out_value); 290 const char *propname, u64 *out_value);
291extern int of_property_read_u64_array(const struct device_node *np,
292 const char *propname,
293 u64 *out_values,
294 size_t sz);
266 295
267extern int of_property_read_string(struct device_node *np, 296extern int of_property_read_string(struct device_node *np,
268 const char *propname, 297 const char *propname,
269 const char **out_string); 298 const char **out_string);
270extern int of_property_read_string_index(struct device_node *np,
271 const char *propname,
272 int index, const char **output);
273extern int of_property_match_string(struct device_node *np, 299extern int of_property_match_string(struct device_node *np,
274 const char *propname, 300 const char *propname,
275 const char *string); 301 const char *string);
276extern int of_property_count_strings(struct device_node *np, 302extern int of_property_read_string_helper(struct device_node *np,
277 const char *propname); 303 const char *propname,
304 const char **out_strs, size_t sz, int index);
278extern int of_device_is_compatible(const struct device_node *device, 305extern int of_device_is_compatible(const struct device_node *device,
279 const char *); 306 const char *);
280extern int of_device_is_available(const struct device_node *device); 307extern bool of_device_is_available(const struct device_node *device);
281extern const void *of_get_property(const struct device_node *node, 308extern const void *of_get_property(const struct device_node *node,
282 const char *name, 309 const char *name,
283 int *lenp); 310 int *lenp);
@@ -319,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop);
319#define OF_RECONFIG_REMOVE_PROPERTY 0x0004 346#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
320#define OF_RECONFIG_UPDATE_PROPERTY 0x0005 347#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
321 348
322struct of_prop_reconfig {
323 struct device_node *dn;
324 struct property *prop;
325 struct property *old_prop;
326};
327
328extern int of_reconfig_notifier_register(struct notifier_block *);
329extern int of_reconfig_notifier_unregister(struct notifier_block *);
330extern int of_reconfig_notify(unsigned long, void *);
331
332extern int of_attach_node(struct device_node *); 349extern int of_attach_node(struct device_node *);
333extern int of_detach_node(struct device_node *); 350extern int of_detach_node(struct device_node *);
334 351
@@ -357,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index);
357 374
358#else /* CONFIG_OF */ 375#else /* CONFIG_OF */
359 376
377static inline bool is_of_node(struct fwnode_handle *fwnode)
378{
379 return false;
380}
381
382static inline struct device_node *of_node(struct fwnode_handle *fwnode)
383{
384 return NULL;
385}
386
360static inline const char* of_node_full_name(const struct device_node *np) 387static inline const char* of_node_full_name(const struct device_node *np)
361{ 388{
362 return "<no-node>"; 389 return "<no-node>";
@@ -387,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path)
387 return NULL; 414 return NULL;
388} 415}
389 416
417static inline struct device_node *of_find_node_opts_by_path(const char *path,
418 const char **opts)
419{
420 return NULL;
421}
422
390static inline struct device_node *of_get_parent(const struct device_node *node) 423static inline struct device_node *of_get_parent(const struct device_node *node)
391{ 424{
392 return NULL; 425 return NULL;
@@ -428,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device,
428 return 0; 461 return 0;
429} 462}
430 463
431static inline int of_device_is_available(const struct device_node *device) 464static inline bool of_device_is_available(const struct device_node *device)
432{ 465{
433 return 0; 466 return false;
434} 467}
435 468
436static inline struct property *of_find_property(const struct device_node *np, 469static inline struct property *of_find_property(const struct device_node *np,
@@ -479,22 +512,23 @@ static inline int of_property_read_u32_array(const struct device_node *np,
479 return -ENOSYS; 512 return -ENOSYS;
480} 513}
481 514
482static inline int of_property_read_string(struct device_node *np, 515static inline int of_property_read_u64_array(const struct device_node *np,
483 const char *propname, 516 const char *propname,
484 const char **out_string) 517 u64 *out_values, size_t sz)
485{ 518{
486 return -ENOSYS; 519 return -ENOSYS;
487} 520}
488 521
489static inline int of_property_read_string_index(struct device_node *np, 522static inline int of_property_read_string(struct device_node *np,
490 const char *propname, int index, 523 const char *propname,
491 const char **out_string) 524 const char **out_string)
492{ 525{
493 return -ENOSYS; 526 return -ENOSYS;
494} 527}
495 528
496static inline int of_property_count_strings(struct device_node *np, 529static inline int of_property_read_string_helper(struct device_node *np,
497 const char *propname) 530 const char *propname,
531 const char **out_strs, size_t sz, int index)
498{ 532{
499 return -ENOSYS; 533 return -ENOSYS;
500} 534}
@@ -668,6 +702,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
668} 702}
669 703
670/** 704/**
705 * of_property_read_string_array() - Read an array of strings from a multiple
706 * strings property.
707 * @np: device node from which the property value is to be read.
708 * @propname: name of the property to be searched.
709 * @out_strs: output array of string pointers.
710 * @sz: number of array elements to read.
711 *
712 * Search for a property in a device tree node and retrieve a list of
713 * terminated string values (pointer to data, not a copy) in that property.
714 *
715 * If @out_strs is NULL, the number of strings in the property is returned.
716 */
717static inline int of_property_read_string_array(struct device_node *np,
718 const char *propname, const char **out_strs,
719 size_t sz)
720{
721 return of_property_read_string_helper(np, propname, out_strs, sz, 0);
722}
723
724/**
725 * of_property_count_strings() - Find and return the number of strings from a
726 * multiple strings property.
727 * @np: device node from which the property value is to be read.
728 * @propname: name of the property to be searched.
729 *
730 * Search for a property in a device tree node and retrieve the number of null
731 * terminated string contain in it. Returns the number of strings on
732 * success, -EINVAL if the property does not exist, -ENODATA if property
733 * does not have a value, and -EILSEQ if the string is not null-terminated
734 * within the length of the property data.
735 */
736static inline int of_property_count_strings(struct device_node *np,
737 const char *propname)
738{
739 return of_property_read_string_helper(np, propname, NULL, 0, 0);
740}
741
742/**
743 * of_property_read_string_index() - Find and read a string from a multiple
744 * strings property.
745 * @np: device node from which the property value is to be read.
746 * @propname: name of the property to be searched.
747 * @index: index of the string in the list of strings
748 * @out_string: pointer to null terminated return string, modified only if
749 * return value is 0.
750 *
751 * Search for a property in a device tree node and retrieve a null
752 * terminated string value (pointer to data, not a copy) in the list of strings
753 * contained in that property.
754 * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
755 * property does not have a value, and -EILSEQ if the string is not
756 * null-terminated within the length of the property data.
757 *
758 * The out_string pointer is modified only if a valid string can be decoded.
759 */
760static inline int of_property_read_string_index(struct device_node *np,
761 const char *propname,
762 int index, const char **output)
763{
764 int rc = of_property_read_string_helper(np, propname, output, 1, index);
765 return rc < 0 ? rc : 0;
766}
767
768/**
671 * of_property_read_bool - Findfrom a property 769 * of_property_read_bool - Findfrom a property
672 * @np: device node from which the property value is to be read. 770 * @np: device node from which the property value is to be read.
673 * @propname: name of the property to be searched. 771 * @propname: name of the property to be searched.
@@ -704,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np,
704 return of_property_read_u32_array(np, propname, out_value, 1); 802 return of_property_read_u32_array(np, propname, out_value, 1);
705} 803}
706 804
805static inline int of_property_read_s32(const struct device_node *np,
806 const char *propname,
807 s32 *out_value)
808{
809 return of_property_read_u32(np, propname, (u32*) out_value);
810}
811
707#define of_property_for_each_u32(np, propname, prop, p, u) \ 812#define of_property_for_each_u32(np, propname, prop, p, u) \
708 for (prop = of_find_property(np, propname, NULL), \ 813 for (prop = of_find_property(np, propname, NULL), \
709 p = of_prop_next_u32(prop, NULL, &u); \ 814 p = of_prop_next_u32(prop, NULL, &u); \
@@ -772,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
772 = { .compatible = compat, \ 877 = { .compatible = compat, \
773 .data = (fn == (fn_type)NULL) ? fn : fn } 878 .data = (fn == (fn_type)NULL) ? fn : fn }
774#else 879#else
775#define _OF_DECLARE(table, name, compat, fn, fn_type) \ 880#define _OF_DECLARE(table, name, compat, fn, fn_type) \
776 static const struct of_device_id __of_table_##name \ 881 static const struct of_device_id __of_table_##name \
777 __attribute__((unused)) \ 882 __attribute__((unused)) \
778 = { .compatible = compat, \ 883 = { .compatible = compat, \
@@ -823,7 +928,19 @@ struct of_changeset {
823 struct list_head entries; 928 struct list_head entries;
824}; 929};
825 930
931enum of_reconfig_change {
932 OF_RECONFIG_NO_CHANGE = 0,
933 OF_RECONFIG_CHANGE_ADD,
934 OF_RECONFIG_CHANGE_REMOVE,
935};
936
826#ifdef CONFIG_OF_DYNAMIC 937#ifdef CONFIG_OF_DYNAMIC
938extern int of_reconfig_notifier_register(struct notifier_block *);
939extern int of_reconfig_notifier_unregister(struct notifier_block *);
940extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
941extern int of_reconfig_get_state_change(unsigned long action,
942 struct of_reconfig_data *arg);
943
827extern void of_changeset_init(struct of_changeset *ocs); 944extern void of_changeset_init(struct of_changeset *ocs);
828extern void of_changeset_destroy(struct of_changeset *ocs); 945extern void of_changeset_destroy(struct of_changeset *ocs);
829extern int of_changeset_apply(struct of_changeset *ocs); 946extern int of_changeset_apply(struct of_changeset *ocs);
@@ -861,9 +978,69 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
861{ 978{
862 return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); 979 return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
863} 980}
864#endif 981#else /* CONFIG_OF_DYNAMIC */
982static inline int of_reconfig_notifier_register(struct notifier_block *nb)
983{
984 return -EINVAL;
985}
986static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
987{
988 return -EINVAL;
989}
990static inline int of_reconfig_notify(unsigned long action,
991 struct of_reconfig_data *arg)
992{
993 return -EINVAL;
994}
995static inline int of_reconfig_get_state_change(unsigned long action,
996 struct of_reconfig_data *arg)
997{
998 return -EINVAL;
999}
1000#endif /* CONFIG_OF_DYNAMIC */
865 1001
866/* CONFIG_OF_RESOLVE api */ 1002/* CONFIG_OF_RESOLVE api */
867extern int of_resolve_phandles(struct device_node *tree); 1003extern int of_resolve_phandles(struct device_node *tree);
868 1004
1005/**
1006 * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
1007 * @np: Pointer to the given device_node
1008 *
1009 * return true if present false otherwise
1010 */
1011static inline bool of_device_is_system_power_controller(const struct device_node *np)
1012{
1013 return of_property_read_bool(np, "system-power-controller");
1014}
1015
1016/**
1017 * Overlay support
1018 */
1019
1020#ifdef CONFIG_OF_OVERLAY
1021
1022/* ID based overlays; the API for external users */
1023int of_overlay_create(struct device_node *tree);
1024int of_overlay_destroy(int id);
1025int of_overlay_destroy_all(void);
1026
1027#else
1028
1029static inline int of_overlay_create(struct device_node *tree)
1030{
1031 return -ENOTSUPP;
1032}
1033
1034static inline int of_overlay_destroy(int id)
1035{
1036 return -ENOTSUPP;
1037}
1038
1039static inline int of_overlay_destroy_all(void)
1040{
1041 return -ENOTSUPP;
1042}
1043
1044#endif
1045
869#endif /* _LINUX_OF_H */ 1046#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 8cb14eb393d6..d88e81be6368 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index,
106 struct resource *r); 106 struct resource *r);
107void __iomem *of_iomap(struct device_node *node, int index); 107void __iomem *of_iomap(struct device_node *node, int index);
108void __iomem *of_io_request_and_map(struct device_node *device, 108void __iomem *of_io_request_and_map(struct device_node *device,
109 int index, char *name); 109 int index, const char *name);
110#else 110#else
111 111
112#include <linux/io.h> 112#include <linux/io.h>
@@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
123} 123}
124 124
125static inline void __iomem *of_io_request_and_map(struct device_node *device, 125static inline void __iomem *of_io_request_and_map(struct device_node *device,
126 int index, char *name) 126 int index, const char *name)
127{ 127{
128 return IOMEM_ERR_PTR(-EINVAL); 128 return IOMEM_ERR_PTR(-EINVAL);
129} 129}
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 1fd207e7a847..ce0e5abeb454 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -59,13 +59,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
59#endif 59#endif
60 60
61#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) 61#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
62int of_pci_msi_chip_add(struct msi_chip *chip); 62int of_pci_msi_chip_add(struct msi_controller *chip);
63void of_pci_msi_chip_remove(struct msi_chip *chip); 63void of_pci_msi_chip_remove(struct msi_controller *chip);
64struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); 64struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
65#else 65#else
66static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } 66static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
67static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } 67static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
68static inline struct msi_chip * 68static inline struct msi_controller *
69of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } 69of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
70#endif 70#endif
71 71
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index c65a18a0cfdf..7e09244bb679 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size);
39/* for building the device tree */ 39/* for building the device tree */
40extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); 40extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
41 41
42extern void (*of_pdt_build_more)(struct device_node *dp, 42extern void (*of_pdt_build_more)(struct device_node *dp);
43 struct device_node ***nextp);
44 43
45#endif /* _LINUX_OF_PDT_H */ 44#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index c2b0627a2317..8a860f096c35 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root,
84static inline void of_platform_depopulate(struct device *parent) { } 84static inline void of_platform_depopulate(struct device *parent) { }
85#endif 85#endif
86 86
87#ifdef CONFIG_OF_DYNAMIC
88extern void of_platform_register_reconfig_notifier(void);
89#else
90static inline void of_platform_register_reconfig_notifier(void) { }
91#endif
92
87#endif /* _LINUX_OF_PLATFORM_H */ 93#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 5b5efae09135..ad2f67054372 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -16,7 +16,7 @@ struct reserved_mem {
16}; 16};
17 17
18struct reserved_mem_ops { 18struct reserved_mem_ops {
19 void (*device_init)(struct reserved_mem *rmem, 19 int (*device_init)(struct reserved_mem *rmem,
20 struct device *dev); 20 struct device *dev);
21 void (*device_release)(struct reserved_mem *rmem, 21 void (*device_release)(struct reserved_mem *rmem,
22 struct device *dev); 22 struct device *dev);
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) 28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
29 29
30#ifdef CONFIG_OF_RESERVED_MEM 30#ifdef CONFIG_OF_RESERVED_MEM
31void of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34void fdt_init_reserved_mem(void); 34void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 35void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 36 phys_addr_t base, phys_addr_t size);
37#else 37#else
38static inline void of_reserved_mem_device_init(struct device *dev) { } 38static inline int of_reserved_mem_device_init(struct device *dev)
39{
40 return -ENOSYS;
41}
39static inline void of_reserved_mem_device_release(struct device *pdev) { } 42static inline void of_reserved_mem_device_release(struct device *pdev) { }
40 43
41static inline void fdt_init_reserved_mem(void) { } 44static inline void fdt_init_reserved_mem(void) { }
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
new file mode 100644
index 000000000000..c2080eebbb47
--- /dev/null
+++ b/include/linux/omap-gpmc.h
@@ -0,0 +1,199 @@
1/*
2 * OMAP GPMC (General Purpose Memory Controller) defines
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10/* Maximum Number of Chip Selects */
11#define GPMC_CS_NUM 8
12
13#define GPMC_CONFIG_WP 0x00000005
14
15#define GPMC_IRQ_FIFOEVENTENABLE 0x01
16#define GPMC_IRQ_COUNT_EVENT 0x02
17
18#define GPMC_BURST_4 4 /* 4 word burst */
19#define GPMC_BURST_8 8 /* 8 word burst */
20#define GPMC_BURST_16 16 /* 16 word burst */
21#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */
22#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */
23#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
24#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
25
26/* bool type time settings */
27struct gpmc_bool_timings {
28 bool cycle2cyclediffcsen;
29 bool cycle2cyclesamecsen;
30 bool we_extra_delay;
31 bool oe_extra_delay;
32 bool adv_extra_delay;
33 bool cs_extra_delay;
34 bool time_para_granularity;
35};
36
37/*
38 * Note that all values in this struct are in nanoseconds except sync_clk
39 * (which is in picoseconds), while the register values are in gpmc_fck cycles.
40 */
41struct gpmc_timings {
42 /* Minimum clock period for synchronous mode (in picoseconds) */
43 u32 sync_clk;
44
45 /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
46 u32 cs_on; /* Assertion time */
47 u32 cs_rd_off; /* Read deassertion time */
48 u32 cs_wr_off; /* Write deassertion time */
49
50 /* ADV signal timings corresponding to GPMC_CONFIG3 */
51 u32 adv_on; /* Assertion time */
52 u32 adv_rd_off; /* Read deassertion time */
53 u32 adv_wr_off; /* Write deassertion time */
54
55 /* WE signals timings corresponding to GPMC_CONFIG4 */
56 u32 we_on; /* WE assertion time */
57 u32 we_off; /* WE deassertion time */
58
59 /* OE signals timings corresponding to GPMC_CONFIG4 */
60 u32 oe_on; /* OE assertion time */
61 u32 oe_off; /* OE deassertion time */
62
63 /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
64 u32 page_burst_access; /* Multiple access word delay */
65 u32 access; /* Start-cycle to first data valid delay */
66 u32 rd_cycle; /* Total read cycle time */
67 u32 wr_cycle; /* Total write cycle time */
68
69 u32 bus_turnaround;
70 u32 cycle2cycle_delay;
71
72 u32 wait_monitoring;
73 u32 clk_activation;
74
75 /* The following are only on OMAP3430 */
76 u32 wr_access; /* WRACCESSTIME */
77 u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
78
79 struct gpmc_bool_timings bool_timings;
80};
81
82/* Device timings in picoseconds */
83struct gpmc_device_timings {
84 u32 t_ceasu; /* address setup to CS valid */
85 u32 t_avdasu; /* address setup to ADV valid */
86 /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
87 * of tusb using these timings even for sync whilst
88 * ideally for adv_rd/(wr)_off it should have considered
89 * t_avdh instead. This indirectly necessitates r/w
90 * variations of t_avdp as it is possible to have one
91 * sync & other async
92 */
93 u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
94 u32 t_avdp_w;
95 u32 t_aavdh; /* address hold time */
96 u32 t_oeasu; /* address setup to OE valid */
97 u32 t_aa; /* access time from ADV assertion */
98 u32 t_iaa; /* initial access time */
99 u32 t_oe; /* access time from OE assertion */
100 u32 t_ce; /* access time from CS asertion */
101 u32 t_rd_cycle; /* read cycle time */
102 u32 t_cez_r; /* read CS deassertion to high Z */
103 u32 t_cez_w; /* write CS deassertion to high Z */
104 u32 t_oez; /* OE deassertion to high Z */
105 u32 t_weasu; /* address setup to WE valid */
106 u32 t_wpl; /* write assertion time */
107 u32 t_wph; /* write deassertion time */
108 u32 t_wr_cycle; /* write cycle time */
109
110 u32 clk;
111 u32 t_bacc; /* burst access valid clock to output delay */
112 u32 t_ces; /* CS setup time to clk */
113 u32 t_avds; /* ADV setup time to clk */
114 u32 t_avdh; /* ADV hold time from clk */
115 u32 t_ach; /* address hold time from clk */
116 u32 t_rdyo; /* clk to ready valid */
117
118 u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
119 u32 t_ce_avd; /* CS on to ADV on delay */
120
121 /* XXX: check the possibility of combining
122 * cyc_aavhd_oe & cyc_aavdh_we
123 */
124 u8 cyc_aavdh_oe;/* read address hold time in cycles */
125 u8 cyc_aavdh_we;/* write address hold time in cycles */
126 u8 cyc_oe; /* access time from OE assertion in cycles */
127 u8 cyc_wpl; /* write deassertion time in cycles */
128 u32 cyc_iaa; /* initial access time in cycles */
129
130 /* extra delays */
131 bool ce_xdelay;
132 bool avd_xdelay;
133 bool oe_xdelay;
134 bool we_xdelay;
135};
136
137struct gpmc_settings {
138 bool burst_wrap; /* enables wrap bursting */
139 bool burst_read; /* enables read page/burst mode */
140 bool burst_write; /* enables write page/burst mode */
141 bool device_nand; /* device is NAND */
142 bool sync_read; /* enables synchronous reads */
143 bool sync_write; /* enables synchronous writes */
144 bool wait_on_read; /* monitor wait on reads */
145 bool wait_on_write; /* monitor wait on writes */
146 u32 burst_len; /* page/burst length */
147 u32 device_width; /* device bus width (8 or 16 bit) */
148 u32 mux_add_data; /* multiplex address & data */
149 u32 wait_pin; /* wait-pin to be used */
150};
151
152extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
153 struct gpmc_settings *gpmc_s,
154 struct gpmc_device_timings *dev_t);
155
156struct gpmc_nand_regs;
157struct device_node;
158
159extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
160extern int gpmc_get_client_irq(unsigned irq_config);
161
162extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
163
164extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
165extern int gpmc_calc_divider(unsigned int sync_clk);
166extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t);
167extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
168extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
169extern void gpmc_cs_free(int cs);
170extern int gpmc_configure(int cmd, int wval);
171extern void gpmc_read_settings_dt(struct device_node *np,
172 struct gpmc_settings *p);
173
174extern void omap3_gpmc_save_context(void);
175extern void omap3_gpmc_restore_context(void);
176
177struct gpmc_timings;
178struct omap_nand_platform_data;
179struct omap_onenand_platform_data;
180
181#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
182extern int gpmc_nand_init(struct omap_nand_platform_data *d,
183 struct gpmc_timings *gpmc_t);
184#else
185static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
186 struct gpmc_timings *gpmc_t)
187{
188 return 0;
189}
190#endif
191
192#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
193extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
194#else
195#define board_onenand_data NULL
196static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
197{
198}
199#endif
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index f8322d9cd235..587bbdd31f5a 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -10,20 +10,20 @@
10#define OMAP_MAILBOX_H 10#define OMAP_MAILBOX_H
11 11
12typedef u32 mbox_msg_t; 12typedef u32 mbox_msg_t;
13struct omap_mbox;
14 13
15typedef int __bitwise omap_mbox_irq_t; 14typedef int __bitwise omap_mbox_irq_t;
16#define IRQ_TX ((__force omap_mbox_irq_t) 1) 15#define IRQ_TX ((__force omap_mbox_irq_t) 1)
17#define IRQ_RX ((__force omap_mbox_irq_t) 2) 16#define IRQ_RX ((__force omap_mbox_irq_t) 2)
18 17
19int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg); 18struct mbox_chan;
19struct mbox_client;
20 20
21struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb); 21struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
22void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb); 22 const char *chan_name);
23 23
24void omap_mbox_save_ctx(struct omap_mbox *mbox); 24void omap_mbox_save_ctx(struct mbox_chan *chan);
25void omap_mbox_restore_ctx(struct omap_mbox *mbox); 25void omap_mbox_restore_ctx(struct mbox_chan *chan);
26void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); 26void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
27void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); 27void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
28 28
29#endif /* OMAP_MAILBOX_H */ 29#endif /* OMAP_MAILBOX_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 647395a1a550..853698c721f7 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
50extern unsigned long oom_badness(struct task_struct *p, 50extern unsigned long oom_badness(struct task_struct *p,
51 struct mem_cgroup *memcg, const nodemask_t *nodemask, 51 struct mem_cgroup *memcg, const nodemask_t *nodemask,
52 unsigned long totalpages); 52 unsigned long totalpages);
53
54extern int oom_kills_count(void);
55extern void note_oom_kill(void);
53extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 56extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
54 unsigned int points, unsigned long totalpages, 57 unsigned int points, unsigned long totalpages,
55 struct mem_cgroup *memcg, nodemask_t *nodemask, 58 struct mem_cgroup *memcg, nodemask_t *nodemask,
@@ -89,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask)
89 92
90extern struct task_struct *find_lock_task_mm(struct task_struct *p); 93extern struct task_struct *find_lock_task_mm(struct task_struct *p);
91 94
95static inline bool task_will_free_mem(struct task_struct *task)
96{
97 /*
98 * A coredumping process may sleep for an extended period in exit_mm(),
99 * so the oom killer cannot assume that the process will promptly exit
100 * and release memory.
101 */
102 return (task->flags & PF_EXITING) &&
103 !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
104}
105
92/* sysctls */ 106/* sysctls */
93extern int sysctl_oom_dump_tasks; 107extern int sysctl_oom_dump_tasks;
94extern int sysctl_oom_kill_allocating_task; 108extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
deleted file mode 100644
index 22691f614043..000000000000
--- a/include/linux/page-debug-flags.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef LINUX_PAGE_DEBUG_FLAGS_H
2#define LINUX_PAGE_DEBUG_FLAGS_H
3
4/*
5 * page->debug_flags bits:
6 *
7 * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
8 * implement generic debug pagealloc feature. The pages are filled with
9 * poison patterns and set this flag after free_pages(). The poisoned
10 * pages are verified whether the patterns are not corrupted and clear
11 * the flag before alloc_pages().
12 */
13
14enum page_debug_flags {
15 PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
16 PAGE_DEBUG_FLAG_GUARD,
17};
18
19/*
20 * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
21 * gets turned off when no debug features are enabling it!
22 */
23
24#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
25#if !defined(CONFIG_PAGE_POISONING) && \
26 !defined(CONFIG_PAGE_GUARD) \
27/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
28#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
29#endif
30#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
31
32#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3fff8e774067..2dc1e1697b45 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -2,6 +2,10 @@
2#define __LINUX_PAGEISOLATION_H 2#define __LINUX_PAGEISOLATION_H
3 3
4#ifdef CONFIG_MEMORY_ISOLATION 4#ifdef CONFIG_MEMORY_ISOLATION
5static inline bool has_isolate_pageblock(struct zone *zone)
6{
7 return zone->nr_isolate_pageblock;
8}
5static inline bool is_migrate_isolate_page(struct page *page) 9static inline bool is_migrate_isolate_page(struct page *page)
6{ 10{
7 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; 11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
11 return migratetype == MIGRATE_ISOLATE; 15 return migratetype == MIGRATE_ISOLATE;
12} 16}
13#else 17#else
18static inline bool has_isolate_pageblock(struct zone *zone)
19{
20 return false;
21}
14static inline bool is_migrate_isolate_page(struct page *page) 22static inline bool is_migrate_isolate_page(struct page *page)
15{ 23{
16 return false; 24 return false;
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
deleted file mode 100644
index 5c831f1eca79..000000000000
--- a/include/linux/page_cgroup.h
+++ /dev/null
@@ -1,105 +0,0 @@
1#ifndef __LINUX_PAGE_CGROUP_H
2#define __LINUX_PAGE_CGROUP_H
3
4enum {
5 /* flags for mem_cgroup */
6 PCG_USED = 0x01, /* This page is charged to a memcg */
7 PCG_MEM = 0x02, /* This page holds a memory charge */
8 PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */
9};
10
11struct pglist_data;
12
13#ifdef CONFIG_MEMCG
14struct mem_cgroup;
15
16/*
17 * Page Cgroup can be considered as an extended mem_map.
18 * A page_cgroup page is associated with every page descriptor. The
19 * page_cgroup helps us identify information about the cgroup
20 * All page cgroups are allocated at boot or memory hotplug event,
21 * then the page cgroup for pfn always exists.
22 */
23struct page_cgroup {
24 unsigned long flags;
25 struct mem_cgroup *mem_cgroup;
26};
27
28extern void pgdat_page_cgroup_init(struct pglist_data *pgdat);
29
30#ifdef CONFIG_SPARSEMEM
31static inline void page_cgroup_init_flatmem(void)
32{
33}
34extern void page_cgroup_init(void);
35#else
36extern void page_cgroup_init_flatmem(void);
37static inline void page_cgroup_init(void)
38{
39}
40#endif
41
42struct page_cgroup *lookup_page_cgroup(struct page *page);
43
44static inline int PageCgroupUsed(struct page_cgroup *pc)
45{
46 return !!(pc->flags & PCG_USED);
47}
48#else /* !CONFIG_MEMCG */
49struct page_cgroup;
50
51static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat)
52{
53}
54
55static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
56{
57 return NULL;
58}
59
60static inline void page_cgroup_init(void)
61{
62}
63
64static inline void page_cgroup_init_flatmem(void)
65{
66}
67#endif /* CONFIG_MEMCG */
68
69#include <linux/swap.h>
70
71#ifdef CONFIG_MEMCG_SWAP
72extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
73 unsigned short old, unsigned short new);
74extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
75extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
76extern int swap_cgroup_swapon(int type, unsigned long max_pages);
77extern void swap_cgroup_swapoff(int type);
78#else
79
80static inline
81unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
82{
83 return 0;
84}
85
86static inline
87unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
88{
89 return 0;
90}
91
92static inline int
93swap_cgroup_swapon(int type, unsigned long max_pages)
94{
95 return 0;
96}
97
98static inline void swap_cgroup_swapoff(int type)
99{
100 return;
101}
102
103#endif /* CONFIG_MEMCG_SWAP */
104
105#endif /* __LINUX_PAGE_CGROUP_H */
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
new file mode 100644
index 000000000000..955421575d16
--- /dev/null
+++ b/include/linux/page_counter.h
@@ -0,0 +1,51 @@
1#ifndef _LINUX_PAGE_COUNTER_H
2#define _LINUX_PAGE_COUNTER_H
3
4#include <linux/atomic.h>
5#include <linux/kernel.h>
6#include <asm/page.h>
7
8struct page_counter {
9 atomic_long_t count;
10 unsigned long limit;
11 struct page_counter *parent;
12
13 /* legacy */
14 unsigned long watermark;
15 unsigned long failcnt;
16};
17
18#if BITS_PER_LONG == 32
19#define PAGE_COUNTER_MAX LONG_MAX
20#else
21#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
22#endif
23
24static inline void page_counter_init(struct page_counter *counter,
25 struct page_counter *parent)
26{
27 atomic_long_set(&counter->count, 0);
28 counter->limit = PAGE_COUNTER_MAX;
29 counter->parent = parent;
30}
31
32static inline unsigned long page_counter_read(struct page_counter *counter)
33{
34 return atomic_long_read(&counter->count);
35}
36
37void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
38void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
39int page_counter_try_charge(struct page_counter *counter,
40 unsigned long nr_pages,
41 struct page_counter **fail);
42void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
43int page_counter_limit(struct page_counter *counter, unsigned long limit);
44int page_counter_memparse(const char *buf, unsigned long *nr_pages);
45
46static inline void page_counter_reset_watermark(struct page_counter *counter)
47{
48 counter->watermark = page_counter_read(counter);
49}
50
51#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
new file mode 100644
index 000000000000..d2a2c84c72d0
--- /dev/null
+++ b/include/linux/page_ext.h
@@ -0,0 +1,84 @@
1#ifndef __LINUX_PAGE_EXT_H
2#define __LINUX_PAGE_EXT_H
3
4#include <linux/types.h>
5#include <linux/stacktrace.h>
6
7struct pglist_data;
8struct page_ext_operations {
9 bool (*need)(void);
10 void (*init)(void);
11};
12
13#ifdef CONFIG_PAGE_EXTENSION
14
15/*
16 * page_ext->flags bits:
17 *
18 * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
19 * implement generic debug pagealloc feature. The pages are filled with
20 * poison patterns and set this flag after free_pages(). The poisoned
21 * pages are verified whether the patterns are not corrupted and clear
22 * the flag before alloc_pages().
23 */
24
25enum page_ext_flags {
26 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
27 PAGE_EXT_DEBUG_GUARD,
28 PAGE_EXT_OWNER,
29};
30
31/*
32 * Page Extension can be considered as an extended mem_map.
33 * A page_ext page is associated with every page descriptor. The
34 * page_ext helps us add more information about the page.
35 * All page_ext are allocated at boot or memory hotplug event,
36 * then the page_ext for pfn always exists.
37 */
38struct page_ext {
39 unsigned long flags;
40#ifdef CONFIG_PAGE_OWNER
41 unsigned int order;
42 gfp_t gfp_mask;
43 struct stack_trace trace;
44 unsigned long trace_entries[8];
45#endif
46};
47
48extern void pgdat_page_ext_init(struct pglist_data *pgdat);
49
50#ifdef CONFIG_SPARSEMEM
51static inline void page_ext_init_flatmem(void)
52{
53}
54extern void page_ext_init(void);
55#else
56extern void page_ext_init_flatmem(void);
57static inline void page_ext_init(void)
58{
59}
60#endif
61
62struct page_ext *lookup_page_ext(struct page *page);
63
64#else /* !CONFIG_PAGE_EXTENSION */
65struct page_ext;
66
67static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
68{
69}
70
71static inline struct page_ext *lookup_page_ext(struct page *page)
72{
73 return NULL;
74}
75
76static inline void page_ext_init(void)
77{
78}
79
80static inline void page_ext_init_flatmem(void)
81{
82}
83#endif /* CONFIG_PAGE_EXTENSION */
84#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
new file mode 100644
index 000000000000..b48c3471c254
--- /dev/null
+++ b/include/linux/page_owner.h
@@ -0,0 +1,38 @@
1#ifndef __LINUX_PAGE_OWNER_H
2#define __LINUX_PAGE_OWNER_H
3
4#ifdef CONFIG_PAGE_OWNER
5extern bool page_owner_inited;
6extern struct page_ext_operations page_owner_ops;
7
8extern void __reset_page_owner(struct page *page, unsigned int order);
9extern void __set_page_owner(struct page *page,
10 unsigned int order, gfp_t gfp_mask);
11
12static inline void reset_page_owner(struct page *page, unsigned int order)
13{
14 if (likely(!page_owner_inited))
15 return;
16
17 __reset_page_owner(page, order);
18}
19
20static inline void set_page_owner(struct page *page,
21 unsigned int order, gfp_t gfp_mask)
22{
23 if (likely(!page_owner_inited))
24 return;
25
26 __set_page_owner(page, order, gfp_mask);
27}
28#else
29static inline void reset_page_owner(struct page *page, unsigned int order)
30{
31}
32static inline void set_page_owner(struct page *page,
33 unsigned int order, gfp_t gfp_mask)
34{
35}
36
37#endif /* CONFIG_PAGE_OWNER */
38#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 64dacb7288a6..24c7728ca681 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -41,8 +41,13 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
41 41
42 if (pci_is_root_bus(pbus)) 42 if (pci_is_root_bus(pbus))
43 dev = pbus->bridge; 43 dev = pbus->bridge;
44 else 44 else {
45 /* If pbus is a virtual bus, there is no bridge to it */
46 if (!pbus->self)
47 return NULL;
48
45 dev = &pbus->self->dev; 49 dev = &pbus->self->dev;
50 }
46 51
47 return ACPI_HANDLE(dev); 52 return ACPI_HANDLE(dev);
48} 53}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5be8db45e368..44a27696ab6c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -331,6 +331,7 @@ struct pci_dev {
331 unsigned int is_added:1; 331 unsigned int is_added:1;
332 unsigned int is_busmaster:1; /* device is busmaster */ 332 unsigned int is_busmaster:1; /* device is busmaster */
333 unsigned int no_msi:1; /* device may not use msi */ 333 unsigned int no_msi:1; /* device may not use msi */
334 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
334 unsigned int block_cfg_access:1; /* config space access is blocked */ 335 unsigned int block_cfg_access:1; /* config space access is blocked */
335 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 336 unsigned int broken_parity_status:1; /* Device generates false positive parity */
336 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 337 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
@@ -449,7 +450,7 @@ struct pci_bus {
449 struct resource busn_res; /* bus numbers routed to this bus */ 450 struct resource busn_res; /* bus numbers routed to this bus */
450 451
451 struct pci_ops *ops; /* configuration access functions */ 452 struct pci_ops *ops; /* configuration access functions */
452 struct msi_chip *msi; /* MSI controller */ 453 struct msi_controller *msi; /* MSI controller */
453 void *sysdata; /* hook for sys-specific extension */ 454 void *sysdata; /* hook for sys-specific extension */
454 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ 455 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
455 456
@@ -1003,6 +1004,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
1003int pci_save_state(struct pci_dev *dev); 1004int pci_save_state(struct pci_dev *dev);
1004void pci_restore_state(struct pci_dev *dev); 1005void pci_restore_state(struct pci_dev *dev);
1005struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); 1006struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1007int pci_load_saved_state(struct pci_dev *dev,
1008 struct pci_saved_state *state);
1006int pci_load_and_free_saved_state(struct pci_dev *dev, 1009int pci_load_and_free_saved_state(struct pci_dev *dev,
1007 struct pci_saved_state **state); 1010 struct pci_saved_state **state);
1008struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); 1011struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 2706ee9a4327..8c7895061121 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -109,7 +109,6 @@ struct hotplug_slot {
109 struct list_head slot_list; 109 struct list_head slot_list;
110 struct pci_slot *pci_slot; 110 struct pci_slot *pci_slot;
111}; 111};
112#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
113 112
114static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) 113static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
115{ 114{
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1fa99a301817..97fb9f69aaed 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -522,6 +522,8 @@
522#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 522#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
523#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d 523#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
524#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e 524#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
525#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573
526#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574
525#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 527#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
526#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 528#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
527#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 529#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 420032d41d27..57f3a1c550dc 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -254,8 +254,6 @@ do { \
254#endif /* CONFIG_SMP */ 254#endif /* CONFIG_SMP */
255 255
256#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) 256#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
257#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
258#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
259 257
260/* 258/*
261 * Must be an lvalue. Since @var must be a simple identifier, 259 * Must be an lvalue. Since @var must be a simple identifier,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index d5c89e0dd0e6..b4337646388b 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,12 +128,16 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
128static inline bool __ref_is_percpu(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp) 129 unsigned long __percpu **percpu_countp)
130{ 130{
131 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
132
133 /* paired with smp_store_release() in percpu_ref_reinit() */ 131 /* paired with smp_store_release() in percpu_ref_reinit() */
134 smp_read_barrier_depends(); 132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
135 133
136 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) 134 /*
135 * Theoretically, the following could test just ATOMIC; however,
136 * then we'd have to mask off DEAD separately as DEAD may be
137 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
138 * implies ATOMIC anyway. Test them together.
139 */
140 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
137 return false; 141 return false;
138 142
139 *percpu_countp = (unsigned long __percpu *)percpu_ptr; 143 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
@@ -141,28 +145,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
141} 145}
142 146
143/** 147/**
144 * percpu_ref_get - increment a percpu refcount 148 * percpu_ref_get_many - increment a percpu refcount
145 * @ref: percpu_ref to get 149 * @ref: percpu_ref to get
150 * @nr: number of references to get
146 * 151 *
147 * Analagous to atomic_long_inc(). 152 * Analogous to atomic_long_add().
148 * 153 *
149 * This function is safe to call as long as @ref is between init and exit. 154 * This function is safe to call as long as @ref is between init and exit.
150 */ 155 */
151static inline void percpu_ref_get(struct percpu_ref *ref) 156static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
152{ 157{
153 unsigned long __percpu *percpu_count; 158 unsigned long __percpu *percpu_count;
154 159
155 rcu_read_lock_sched(); 160 rcu_read_lock_sched();
156 161
157 if (__ref_is_percpu(ref, &percpu_count)) 162 if (__ref_is_percpu(ref, &percpu_count))
158 this_cpu_inc(*percpu_count); 163 this_cpu_add(*percpu_count, nr);
159 else 164 else
160 atomic_long_inc(&ref->count); 165 atomic_long_add(nr, &ref->count);
161 166
162 rcu_read_unlock_sched(); 167 rcu_read_unlock_sched();
163} 168}
164 169
165/** 170/**
171 * percpu_ref_get - increment a percpu refcount
172 * @ref: percpu_ref to get
173 *
174 * Analagous to atomic_long_inc().
175 *
176 * This function is safe to call as long as @ref is between init and exit.
177 */
178static inline void percpu_ref_get(struct percpu_ref *ref)
179{
180 percpu_ref_get_many(ref, 1);
181}
182
183/**
166 * percpu_ref_tryget - try to increment a percpu refcount 184 * percpu_ref_tryget - try to increment a percpu refcount
167 * @ref: percpu_ref to try-get 185 * @ref: percpu_ref to try-get
168 * 186 *
@@ -225,29 +243,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
225} 243}
226 244
227/** 245/**
228 * percpu_ref_put - decrement a percpu refcount 246 * percpu_ref_put_many - decrement a percpu refcount
229 * @ref: percpu_ref to put 247 * @ref: percpu_ref to put
248 * @nr: number of references to put
230 * 249 *
231 * Decrement the refcount, and if 0, call the release function (which was passed 250 * Decrement the refcount, and if 0, call the release function (which was passed
232 * to percpu_ref_init()) 251 * to percpu_ref_init())
233 * 252 *
234 * This function is safe to call as long as @ref is between init and exit. 253 * This function is safe to call as long as @ref is between init and exit.
235 */ 254 */
236static inline void percpu_ref_put(struct percpu_ref *ref) 255static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
237{ 256{
238 unsigned long __percpu *percpu_count; 257 unsigned long __percpu *percpu_count;
239 258
240 rcu_read_lock_sched(); 259 rcu_read_lock_sched();
241 260
242 if (__ref_is_percpu(ref, &percpu_count)) 261 if (__ref_is_percpu(ref, &percpu_count))
243 this_cpu_dec(*percpu_count); 262 this_cpu_sub(*percpu_count, nr);
244 else if (unlikely(atomic_long_dec_and_test(&ref->count))) 263 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
245 ref->release(ref); 264 ref->release(ref);
246 265
247 rcu_read_unlock_sched(); 266 rcu_read_unlock_sched();
248} 267}
249 268
250/** 269/**
270 * percpu_ref_put - decrement a percpu refcount
271 * @ref: percpu_ref to put
272 *
273 * Decrement the refcount, and if 0, call the release function (which was passed
274 * to percpu_ref_init())
275 *
276 * This function is safe to call as long as @ref is between init and exit.
277 */
278static inline void percpu_ref_put(struct percpu_ref *ref)
279{
280 percpu_ref_put_many(ref, 1);
281}
282
283/**
251 * percpu_ref_is_zero - test whether a percpu refcount reached zero 284 * percpu_ref_is_zero - test whether a percpu refcount reached zero
252 * @ref: percpu_ref to test 285 * @ref: percpu_ref to test
253 * 286 *
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 893a0d07986f..486e84ccb1f9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -79,7 +79,7 @@ struct perf_branch_stack {
79 struct perf_branch_entry entries[0]; 79 struct perf_branch_entry entries[0];
80}; 80};
81 81
82struct perf_regs_user { 82struct perf_regs {
83 __u64 abi; 83 __u64 abi;
84 struct pt_regs *regs; 84 struct pt_regs *regs;
85}; 85};
@@ -580,34 +580,40 @@ extern u64 perf_event_read_value(struct perf_event *event,
580 580
581 581
582struct perf_sample_data { 582struct perf_sample_data {
583 u64 type; 583 /*
584 * Fields set by perf_sample_data_init(), group so as to
585 * minimize the cachelines touched.
586 */
587 u64 addr;
588 struct perf_raw_record *raw;
589 struct perf_branch_stack *br_stack;
590 u64 period;
591 u64 weight;
592 u64 txn;
593 union perf_mem_data_src data_src;
584 594
595 /*
596 * The other fields, optionally {set,used} by
597 * perf_{prepare,output}_sample().
598 */
599 u64 type;
585 u64 ip; 600 u64 ip;
586 struct { 601 struct {
587 u32 pid; 602 u32 pid;
588 u32 tid; 603 u32 tid;
589 } tid_entry; 604 } tid_entry;
590 u64 time; 605 u64 time;
591 u64 addr;
592 u64 id; 606 u64 id;
593 u64 stream_id; 607 u64 stream_id;
594 struct { 608 struct {
595 u32 cpu; 609 u32 cpu;
596 u32 reserved; 610 u32 reserved;
597 } cpu_entry; 611 } cpu_entry;
598 u64 period;
599 union perf_mem_data_src data_src;
600 struct perf_callchain_entry *callchain; 612 struct perf_callchain_entry *callchain;
601 struct perf_raw_record *raw; 613 struct perf_regs regs_user;
602 struct perf_branch_stack *br_stack; 614 struct perf_regs regs_intr;
603 struct perf_regs_user regs_user;
604 u64 stack_user_size; 615 u64 stack_user_size;
605 u64 weight; 616} ____cacheline_aligned;
606 /*
607 * Transaction flags for abort events:
608 */
609 u64 txn;
610};
611 617
612/* default value for data source */ 618/* default value for data source */
613#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ 619#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
@@ -624,9 +630,6 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
624 data->raw = NULL; 630 data->raw = NULL;
625 data->br_stack = NULL; 631 data->br_stack = NULL;
626 data->period = period; 632 data->period = period;
627 data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
628 data->regs_user.regs = NULL;
629 data->stack_user_size = 0;
630 data->weight = 0; 633 data->weight = 0;
631 data->data_src.val = PERF_MEM_NA; 634 data->data_src.val = PERF_MEM_NA;
632 data->txn = 0; 635 data->txn = 0;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d090cfcaa167..22af8f8f5802 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -433,6 +433,7 @@ struct phy_device {
433 * by this PHY 433 * by this PHY
434 * flags: A bitfield defining certain other features this PHY 434 * flags: A bitfield defining certain other features this PHY
435 * supports (like interrupts) 435 * supports (like interrupts)
436 * driver_data: static driver data
436 * 437 *
437 * The drivers must implement config_aneg and read_status. All 438 * The drivers must implement config_aneg and read_status. All
438 * other functions are optional. Note that none of these 439 * other functions are optional. Note that none of these
@@ -448,6 +449,7 @@ struct phy_driver {
448 unsigned int phy_id_mask; 449 unsigned int phy_id_mask;
449 u32 features; 450 u32 features;
450 u32 flags; 451 u32 flags;
452 const void *driver_data;
451 453
452 /* 454 /*
453 * Called to issue a PHY software reset 455 * Called to issue a PHY software reset
@@ -772,4 +774,28 @@ int __init mdio_bus_init(void);
772void mdio_bus_exit(void); 774void mdio_bus_exit(void);
773 775
774extern struct bus_type mdio_bus_type; 776extern struct bus_type mdio_bus_type;
777
778/**
779 * module_phy_driver() - Helper macro for registering PHY drivers
780 * @__phy_drivers: array of PHY drivers to register
781 *
782 * Helper macro for PHY drivers which do not do anything special in module
783 * init/exit. Each module may only use this macro once, and calling it
784 * replaces module_init() and module_exit().
785 */
786#define phy_module_driver(__phy_drivers, __count) \
787static int __init phy_module_init(void) \
788{ \
789 return phy_drivers_register(__phy_drivers, __count); \
790} \
791module_init(phy_module_init); \
792static void __exit phy_module_exit(void) \
793{ \
794 phy_drivers_unregister(__phy_drivers, __count); \
795} \
796module_exit(phy_module_exit)
797
798#define module_phy_driver(__phy_drivers) \
799 phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
800
775#endif /* __PHY_H */ 801#endif /* __PHY_H */
diff --git a/include/linux/mailbox.h b/include/linux/pl320-ipc.h
index 5161f63ec1c8..5161f63ec1c8 100644
--- a/include/linux/mailbox.h
+++ b/include/linux/pl320-ipc.h
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index a6591c693ebb..5e0bc779e6c5 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -27,6 +27,7 @@ struct samsung_i2s {
27#define QUIRK_NO_MUXPSR (1 << 2) 27#define QUIRK_NO_MUXPSR (1 << 2)
28#define QUIRK_NEED_RSTCLR (1 << 3) 28#define QUIRK_NEED_RSTCLR (1 << 3)
29#define QUIRK_SUPPORTS_TDM (1 << 4) 29#define QUIRK_SUPPORTS_TDM (1 << 4)
30#define QUIRK_SUPPORTS_IDMA (1 << 5)
30 /* Quirks of the I2S controller */ 31 /* Quirks of the I2S controller */
31 u32 quirks; 32 u32 quirks;
32 dma_addr_t idma_addr; 33 dma_addr_t idma_addr;
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
new file mode 100644
index 000000000000..26af54321958
--- /dev/null
+++ b/include/linux/platform_data/bcmgenet.h
@@ -0,0 +1,18 @@
1#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
2#define __LINUX_PLATFORM_DATA_BCMGENET_H__
3
4#include <linux/types.h>
5#include <linux/if_ether.h>
6#include <linux/phy.h>
7
8struct bcmgenet_platform_data {
9 bool mdio_enabled;
10 phy_interface_t phy_interface;
11 int phy_address;
12 int phy_speed;
13 int phy_duplex;
14 u8 mac_address[ETH_ALEN];
15 int genet_version;
16};
17
18#endif
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 6a1357d31871..7d964e787299 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -41,6 +41,7 @@ enum sdma_peripheral_type {
41 IMX_DMATYPE_ESAI, /* ESAI */ 41 IMX_DMATYPE_ESAI, /* ESAI */
42 IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ 42 IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
43 IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ 43 IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
44 IMX_DMATYPE_SAI, /* SAI */
44}; 45};
45 46
46enum imx_dma_prio { 47enum imx_dma_prio {
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
new file mode 100644
index 000000000000..67bbcf0785f6
--- /dev/null
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -0,0 +1,90 @@
1/*
2 * MMC definitions for OMAP2
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * struct omap_hsmmc_dev_attr.flags possibilities
13 *
14 * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can
15 * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag
16 * should be set if this is the case. See for example Section 22.5.3
17 * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia
18 * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R).
19 *
20 * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers
21 * don't work correctly on some MMC controller instances on some
22 * OMAP3 SoCs; this flag should be set if this is the case. See
23 * for example Advisory 2.1.1.128 "MMC: Multiple Block Read
24 * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_
25 * Revision F (October 2010) (SPRZ278F).
26 */
27#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
28#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
29#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2)
30
31struct omap_hsmmc_dev_attr {
32 u8 flags;
33};
34
35struct mmc_card;
36
37struct omap_hsmmc_platform_data {
38 /* back-link to device */
39 struct device *dev;
40
41 /* set if your board has components or wiring that limits the
42 * maximum frequency on the MMC bus */
43 unsigned int max_freq;
44
45 /* Integrating attributes from the omap_hwmod layer */
46 u8 controller_flags;
47
48 /* Register offset deviation */
49 u16 reg_offset;
50
51 /*
52 * 4/8 wires and any additional host capabilities
53 * need to OR'd all capabilities (ref. linux/mmc/host.h)
54 */
55 u32 caps; /* Used for the MMC driver on 2430 and later */
56 u32 pm_caps; /* PM capabilities of the mmc */
57
58 /* switch pin can be for card detect (default) or card cover */
59 unsigned cover:1;
60
61 /* use the internal clock */
62 unsigned internal_clock:1;
63
64 /* nonremovable e.g. eMMC */
65 unsigned nonremovable:1;
66
67 /* eMMC does not handle power off when not in sleep state */
68 unsigned no_regulator_off_init:1;
69
70 /* we can put the features above into this variable */
71#define HSMMC_HAS_PBIAS (1 << 0)
72#define HSMMC_HAS_UPDATED_RESET (1 << 1)
73#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
74 unsigned features;
75
76 int switch_pin; /* gpio (card detect) */
77 int gpio_wp; /* gpio (write protect) */
78
79 int (*set_power)(struct device *dev, int power_on, int vdd);
80 void (*remux)(struct device *dev, int power_on);
81 /* Call back before enabling / disabling regulators */
82 void (*before_set_reg)(struct device *dev, int power_on, int vdd);
83 /* Call back after enabling / disabling regulators */
84 void (*after_set_reg)(struct device *dev, int power_on, int vdd);
85 /* if we have special card, init it using this callback */
86 void (*init_card)(struct mmc_card *card);
87
88 const char *name;
89 u32 ocr_mask;
90};
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 1b2ba24e4e03..9c7fd1efe495 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,6 +136,7 @@ struct lp855x_rom_data {
136 Only valid when mode is PWM_BASED. 136 Only valid when mode is PWM_BASED.
137 * @size_program : total size of lp855x_rom_data 137 * @size_program : total size of lp855x_rom_data
138 * @rom_data : list of new eeprom/eprom registers 138 * @rom_data : list of new eeprom/eprom registers
139 * @supply : regulator that supplies 3V input
139 */ 140 */
140struct lp855x_platform_data { 141struct lp855x_platform_data {
141 const char *name; 142 const char *name;
@@ -144,6 +145,7 @@ struct lp855x_platform_data {
144 unsigned int period_ns; 145 unsigned int period_ns;
145 int size_program; 146 int size_program;
146 struct lp855x_rom_data *rom_data; 147 struct lp855x_rom_data *rom_data;
148 struct regulator *supply;
147}; 149};
148 150
149#endif 151#endif
diff --git a/include/linux/platform_data/mmc-atmel-mci.h b/include/linux/platform_data/mmc-atmel-mci.h
new file mode 100644
index 000000000000..399a2d5a14bd
--- /dev/null
+++ b/include/linux/platform_data/mmc-atmel-mci.h
@@ -0,0 +1,22 @@
1#ifndef __MMC_ATMEL_MCI_H
2#define __MMC_ATMEL_MCI_H
3
4#include <linux/platform_data/dma-atmel.h>
5#include <linux/platform_data/dma-dw.h>
6
7/**
8 * struct mci_dma_data - DMA data for MCI interface
9 */
10struct mci_dma_data {
11#ifdef CONFIG_ARM
12 struct at_dma_slave sdata;
13#else
14 struct dw_dma_slave sdata;
15#endif
16};
17
18/* accessor macros */
19#define slave_data_ptr(s) (&(s)->sdata)
20#define find_slave_dev(s) ((s)->sdata.dma_dev)
21
22#endif /* __MMC_ATMEL_MCI_H */
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 51e70cf25cbc..5c188f4e9bec 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -10,32 +10,8 @@
10 10
11#define OMAP_MMC_MAX_SLOTS 2 11#define OMAP_MMC_MAX_SLOTS 2
12 12
13/*
14 * struct omap_mmc_dev_attr.flags possibilities
15 *
16 * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can
17 * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag
18 * should be set if this is the case. See for example Section 22.5.3
19 * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia
20 * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R).
21 *
22 * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers
23 * don't work correctly on some MMC controller instances on some
24 * OMAP3 SoCs; this flag should be set if this is the case. See
25 * for example Advisory 2.1.1.128 "MMC: Multiple Block Read
26 * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_
27 * Revision F (October 2010) (SPRZ278F).
28 */
29#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
30#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
31#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2)
32
33struct mmc_card; 13struct mmc_card;
34 14
35struct omap_mmc_dev_attr {
36 u8 flags;
37};
38
39struct omap_mmc_platform_data { 15struct omap_mmc_platform_data {
40 /* back-link to device */ 16 /* back-link to device */
41 struct device *dev; 17 struct device *dev;
@@ -106,9 +82,6 @@ struct omap_mmc_platform_data {
106 unsigned vcc_aux_disable_is_sleep:1; 82 unsigned vcc_aux_disable_is_sleep:1;
107 83
108 /* we can put the features above into this variable */ 84 /* we can put the features above into this variable */
109#define HSMMC_HAS_PBIAS (1 << 0)
110#define HSMMC_HAS_UPDATED_RESET (1 << 1)
111#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
112#define MMC_OMAP7XX (1 << 3) 85#define MMC_OMAP7XX (1 << 3)
113#define MMC_OMAP15XX (1 << 4) 86#define MMC_OMAP15XX (1 << 4)
114#define MMC_OMAP16XX (1 << 5) 87#define MMC_OMAP16XX (1 << 5)
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 27d3156d093a..9e20c2fb4ffd 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -55,9 +55,4 @@ struct sdhci_pxa_platdata {
55 unsigned int quirks2; 55 unsigned int quirks2;
56 unsigned int pm_caps; 56 unsigned int pm_caps;
57}; 57};
58
59struct sdhci_pxa {
60 u8 clk_enable;
61 u8 power_mode;
62};
63#endif /* _PXA_SDHCI_H_ */ 58#endif /* _PXA_SDHCI_H_ */
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
index c860c1b314c0..d09275f3cde3 100644
--- a/include/linux/platform_data/serial-omap.h
+++ b/include/linux/platform_data/serial-omap.h
@@ -38,9 +38,6 @@ struct omap_uart_port_info {
38 unsigned int dma_rx_timeout; 38 unsigned int dma_rx_timeout;
39 unsigned int autosuspend_timeout; 39 unsigned int autosuspend_timeout;
40 unsigned int dma_rx_poll_rate; 40 unsigned int dma_rx_poll_rate;
41 int DTR_gpio;
42 int DTR_inverted;
43 int DTR_present;
44 41
45 int (*get_context_loss_count)(struct device *); 42 int (*get_context_loss_count)(struct device *);
46 void (*enable_wakeup)(struct device *, bool); 43 void (*enable_wakeup)(struct device *, bool);
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 1730312398ff..5087fff96d86 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -24,7 +24,6 @@
24#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" 24#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
25 25
26struct st21nfca_nfc_platform_data { 26struct st21nfca_nfc_platform_data {
27 unsigned int gpio_irq;
28 unsigned int gpio_ena; 27 unsigned int gpio_ena;
29 unsigned int irq_polarity; 28 unsigned int irq_polarity;
30}; 29};
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
index 2d11f1f5efab..c3b432f5b63e 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st21nfcb.h
@@ -24,7 +24,6 @@
24#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" 24#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
25 25
26struct st21nfcb_nfc_platform_data { 26struct st21nfcb_nfc_platform_data {
27 unsigned int gpio_irq;
28 unsigned int gpio_reset; 27 unsigned int gpio_reset;
29 unsigned int irq_polarity; 28 unsigned int irq_polarity;
30}; 29};
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 8b6c970cff6c..97883604a3c5 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
176 * plist_for_each_entry - iterate over list of given type 176 * plist_for_each_entry - iterate over list of given type
177 * @pos: the type * to use as a loop counter 177 * @pos: the type * to use as a loop counter
178 * @head: the head for your list 178 * @head: the head for your list
179 * @mem: the name of the list_struct within the struct 179 * @mem: the name of the list_head within the struct
180 */ 180 */
181#define plist_for_each_entry(pos, head, mem) \ 181#define plist_for_each_entry(pos, head, mem) \
182 list_for_each_entry(pos, &(head)->node_list, mem.node_list) 182 list_for_each_entry(pos, &(head)->node_list, mem.node_list)
@@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
185 * plist_for_each_entry_continue - continue iteration over list of given type 185 * plist_for_each_entry_continue - continue iteration over list of given type
186 * @pos: the type * to use as a loop cursor 186 * @pos: the type * to use as a loop cursor
187 * @head: the head for your list 187 * @head: the head for your list
188 * @m: the name of the list_struct within the struct 188 * @m: the name of the list_head within the struct
189 * 189 *
190 * Continue to iterate over list of given type, continuing after 190 * Continue to iterate over list of given type, continuing after
191 * the current position. 191 * the current position.
@@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
198 * @pos: the type * to use as a loop counter 198 * @pos: the type * to use as a loop counter
199 * @n: another type * to use as temporary storage 199 * @n: another type * to use as temporary storage
200 * @head: the head for your list 200 * @head: the head for your list
201 * @m: the name of the list_struct within the struct 201 * @m: the name of the list_head within the struct
202 * 202 *
203 * Iterate over list of given type, safe against removal of list entry. 203 * Iterate over list of given type, safe against removal of list entry.
204 */ 204 */
@@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node)
229 * plist_first_entry - get the struct for the first entry 229 * plist_first_entry - get the struct for the first entry
230 * @head: the &struct plist_head pointer 230 * @head: the &struct plist_head pointer
231 * @type: the type of the struct this is embedded in 231 * @type: the type of the struct this is embedded in
232 * @member: the name of the list_struct within the struct 232 * @member: the name of the list_head within the struct
233 */ 233 */
234#ifdef CONFIG_DEBUG_PI_LIST 234#ifdef CONFIG_DEBUG_PI_LIST
235# define plist_first_entry(head, type, member) \ 235# define plist_first_entry(head, type, member) \
@@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node)
246 * plist_last_entry - get the struct for the last entry 246 * plist_last_entry - get the struct for the last entry
247 * @head: the &struct plist_head pointer 247 * @head: the &struct plist_head pointer
248 * @type: the type of the struct this is embedded in 248 * @type: the type of the struct this is embedded in
249 * @member: the name of the list_struct within the struct 249 * @member: the name of the list_head within the struct
250 */ 250 */
251#ifdef CONFIG_DEBUG_PI_LIST 251#ifdef CONFIG_DEBUG_PI_LIST
252# define plist_last_entry(head, type, member) \ 252# define plist_last_entry(head, type, member) \
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 383fd68aaee1..66a656eb335b 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,7 +342,7 @@ struct dev_pm_ops {
342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
343#endif 343#endif
344 344
345#ifdef CONFIG_PM_RUNTIME 345#ifdef CONFIG_PM
346#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 346#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
347 .runtime_suspend = suspend_fn, \ 347 .runtime_suspend = suspend_fn, \
348 .runtime_resume = resume_fn, \ 348 .runtime_resume = resume_fn, \
@@ -351,14 +351,7 @@ struct dev_pm_ops {
351#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 351#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
352#endif 352#endif
353 353
354#ifdef CONFIG_PM 354#define SET_PM_RUNTIME_PM_OPS SET_RUNTIME_PM_OPS
355#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
356 .runtime_suspend = suspend_fn, \
357 .runtime_resume = resume_fn, \
358 .runtime_idle = idle_fn,
359#else
360#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
361#endif
362 355
363/* 356/*
364 * Use this if you want to use the same suspend and resume callbacks for suspend 357 * Use this if you want to use the same suspend and resume callbacks for suspend
@@ -538,11 +531,7 @@ enum rpm_request {
538}; 531};
539 532
540struct wakeup_source; 533struct wakeup_source;
541 534struct pm_domain_data;
542struct pm_domain_data {
543 struct list_head list_node;
544 struct device *dev;
545};
546 535
547struct pm_subsys_data { 536struct pm_subsys_data {
548 spinlock_t lock; 537 spinlock_t lock;
@@ -576,7 +565,7 @@ struct dev_pm_info {
576#else 565#else
577 unsigned int should_wakeup:1; 566 unsigned int should_wakeup:1;
578#endif 567#endif
579#ifdef CONFIG_PM_RUNTIME 568#ifdef CONFIG_PM
580 struct timer_list suspend_timer; 569 struct timer_list suspend_timer;
581 unsigned long timer_expires; 570 unsigned long timer_expires;
582 struct work_struct work; 571 struct work_struct work;
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 8348866e7b05..0b0039634410 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -18,6 +18,8 @@ struct pm_clk_notifier_block {
18 char *con_ids[]; 18 char *con_ids[];
19}; 19};
20 20
21struct clk;
22
21#ifdef CONFIG_PM_CLK 23#ifdef CONFIG_PM_CLK
22static inline bool pm_clk_no_clocks(struct device *dev) 24static inline bool pm_clk_no_clocks(struct device *dev)
23{ 25{
@@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev);
29extern int pm_clk_create(struct device *dev); 31extern int pm_clk_create(struct device *dev);
30extern void pm_clk_destroy(struct device *dev); 32extern void pm_clk_destroy(struct device *dev);
31extern int pm_clk_add(struct device *dev, const char *con_id); 33extern int pm_clk_add(struct device *dev, const char *con_id);
34extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
32extern void pm_clk_remove(struct device *dev, const char *con_id); 35extern void pm_clk_remove(struct device *dev, const char *con_id);
33extern int pm_clk_suspend(struct device *dev); 36extern int pm_clk_suspend(struct device *dev);
34extern int pm_clk_resume(struct device *dev); 37extern int pm_clk_resume(struct device *dev);
@@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id)
51{ 54{
52 return -EINVAL; 55 return -EINVAL;
53} 56}
57
58static inline int pm_clk_add_clk(struct device *dev, struct clk *clk)
59{
60 return -EINVAL;
61}
54static inline void pm_clk_remove(struct device *dev, const char *con_id) 62static inline void pm_clk_remove(struct device *dev, const char *con_id)
55{ 63{
56} 64}
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 73e938b7e937..6cd20d5e651b 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -17,6 +17,9 @@
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/cpuidle.h> 18#include <linux/cpuidle.h>
19 19
20/* Defines used for the flags field in the struct generic_pm_domain */
21#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
22
20enum gpd_status { 23enum gpd_status {
21 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 24 GPD_STATE_ACTIVE = 0, /* PM domain is active */
22 GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ 25 GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
@@ -72,8 +75,11 @@ struct generic_pm_domain {
72 bool max_off_time_changed; 75 bool max_off_time_changed;
73 bool cached_power_down_ok; 76 bool cached_power_down_ok;
74 struct gpd_cpuidle_data *cpuidle_data; 77 struct gpd_cpuidle_data *cpuidle_data;
75 void (*attach_dev)(struct device *dev); 78 int (*attach_dev)(struct generic_pm_domain *domain,
76 void (*detach_dev)(struct device *dev); 79 struct device *dev);
80 void (*detach_dev)(struct generic_pm_domain *domain,
81 struct device *dev);
82 unsigned int flags; /* Bit field of configs for genpd */
77}; 83};
78 84
79static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 85static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -98,13 +104,18 @@ struct gpd_timing_data {
98 bool cached_stop_ok; 104 bool cached_stop_ok;
99}; 105};
100 106
107struct pm_domain_data {
108 struct list_head list_node;
109 struct device *dev;
110};
111
101struct generic_pm_domain_data { 112struct generic_pm_domain_data {
102 struct pm_domain_data base; 113 struct pm_domain_data base;
103 struct gpd_timing_data td; 114 struct gpd_timing_data td;
104 struct notifier_block nb; 115 struct notifier_block nb;
105 struct mutex lock; 116 struct mutex lock;
106 unsigned int refcount; 117 unsigned int refcount;
107 bool need_restore; 118 int need_restore;
108}; 119};
109 120
110#ifdef CONFIG_PM_GENERIC_DOMAINS 121#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -145,6 +156,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd,
145 156
146extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 157extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
147extern int pm_genpd_name_poweron(const char *domain_name); 158extern int pm_genpd_name_poweron(const char *domain_name);
159extern void pm_genpd_poweroff_unused(void);
148 160
149extern struct dev_power_governor simple_qos_governor; 161extern struct dev_power_governor simple_qos_governor;
150extern struct dev_power_governor pm_domain_always_on_gov; 162extern struct dev_power_governor pm_domain_always_on_gov;
@@ -219,6 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
219{ 231{
220 return -ENOSYS; 232 return -ENOSYS;
221} 233}
234static inline void pm_genpd_poweroff_unused(void) {}
222#define simple_qos_governor NULL 235#define simple_qos_governor NULL
223#define pm_domain_always_on_gov NULL 236#define pm_domain_always_on_gov NULL
224#endif 237#endif
@@ -235,12 +248,6 @@ static inline int pm_genpd_name_add_device(const char *domain_name,
235 return __pm_genpd_name_add_device(domain_name, dev, NULL); 248 return __pm_genpd_name_add_device(domain_name, dev, NULL);
236} 249}
237 250
238#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
239extern void pm_genpd_poweroff_unused(void);
240#else
241static inline void pm_genpd_poweroff_unused(void) {}
242#endif
243
244#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP 251#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
245extern void pm_genpd_syscore_poweroff(struct device *dev); 252extern void pm_genpd_syscore_poweroff(struct device *dev);
246extern void pm_genpd_syscore_poweron(struct device *dev); 253extern void pm_genpd_syscore_poweron(struct device *dev);
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0330217abfad..cec2d4540914 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -21,7 +21,7 @@ struct dev_pm_opp;
21struct device; 21struct device;
22 22
23enum dev_pm_opp_event { 23enum dev_pm_opp_event {
24 OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, 24 OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
25}; 25};
26 26
27#if defined(CONFIG_PM_OPP) 27#if defined(CONFIG_PM_OPP)
@@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
44 44
45int dev_pm_opp_add(struct device *dev, unsigned long freq, 45int dev_pm_opp_add(struct device *dev, unsigned long freq,
46 unsigned long u_volt); 46 unsigned long u_volt);
47void dev_pm_opp_remove(struct device *dev, unsigned long freq);
47 48
48int dev_pm_opp_enable(struct device *dev, unsigned long freq); 49int dev_pm_opp_enable(struct device *dev, unsigned long freq);
49 50
@@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
90 return -EINVAL; 91 return -EINVAL;
91} 92}
92 93
94static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
95{
96}
97
93static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) 98static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
94{ 99{
95 return 0; 100 return 0;
@@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
109 114
110#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 115#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
111int of_init_opp_table(struct device *dev); 116int of_init_opp_table(struct device *dev);
117void of_free_opp_table(struct device *dev);
112#else 118#else
113static inline int of_init_opp_table(struct device *dev) 119static inline int of_init_opp_table(struct device *dev)
114{ 120{
115 return -EINVAL; 121 return -EINVAL;
116} 122}
123
124static inline void of_free_opp_table(struct device *dev)
125{
126}
117#endif 127#endif
118 128
119#endif /* __LINUX_OPP_H__ */ 129#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 9ab4bf7c4646..7b3ae0cffc05 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -15,6 +15,7 @@ enum {
15 PM_QOS_CPU_DMA_LATENCY, 15 PM_QOS_CPU_DMA_LATENCY,
16 PM_QOS_NETWORK_LATENCY, 16 PM_QOS_NETWORK_LATENCY,
17 PM_QOS_NETWORK_THROUGHPUT, 17 PM_QOS_NETWORK_THROUGHPUT,
18 PM_QOS_MEMORY_BANDWIDTH,
18 19
19 /* insert new class ID */ 20 /* insert new class ID */
20 PM_QOS_NUM_CLASSES, 21 PM_QOS_NUM_CLASSES,
@@ -32,6 +33,7 @@ enum pm_qos_flags_status {
32#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) 33#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
33#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) 34#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
34#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 35#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
36#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
35#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 37#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
36#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 38#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
37#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) 39#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
@@ -69,7 +71,8 @@ struct dev_pm_qos_request {
69enum pm_qos_type { 71enum pm_qos_type {
70 PM_QOS_UNITIALIZED, 72 PM_QOS_UNITIALIZED,
71 PM_QOS_MAX, /* return the largest value */ 73 PM_QOS_MAX, /* return the largest value */
72 PM_QOS_MIN /* return the smallest value */ 74 PM_QOS_MIN, /* return the smallest value */
75 PM_QOS_SUM /* return the sum */
73}; 76};
74 77
75/* 78/*
@@ -151,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev);
151int dev_pm_qos_add_ancestor_request(struct device *dev, 154int dev_pm_qos_add_ancestor_request(struct device *dev,
152 struct dev_pm_qos_request *req, 155 struct dev_pm_qos_request *req,
153 enum dev_pm_qos_req_type type, s32 value); 156 enum dev_pm_qos_req_type type, s32 value);
157int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
158void dev_pm_qos_hide_latency_limit(struct device *dev);
159int dev_pm_qos_expose_flags(struct device *dev, s32 value);
160void dev_pm_qos_hide_flags(struct device *dev);
161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
164
165static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
166{
167 return dev->power.qos->resume_latency_req->data.pnode.prio;
168}
169
170static inline s32 dev_pm_qos_requested_flags(struct device *dev)
171{
172 return dev->power.qos->flags_req->data.flr.flags;
173}
154#else 174#else
155static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, 175static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
156 s32 mask) 176 s32 mask)
@@ -197,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
197 enum dev_pm_qos_req_type type, 217 enum dev_pm_qos_req_type type,
198 s32 value) 218 s32 value)
199 { return 0; } 219 { return 0; }
200#endif
201
202#ifdef CONFIG_PM_RUNTIME
203int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
204void dev_pm_qos_hide_latency_limit(struct device *dev);
205int dev_pm_qos_expose_flags(struct device *dev, s32 value);
206void dev_pm_qos_hide_flags(struct device *dev);
207int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
208s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
209int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
210
211static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
212{
213 return dev->power.qos->resume_latency_req->data.pnode.prio;
214}
215
216static inline s32 dev_pm_qos_requested_flags(struct device *dev)
217{
218 return dev->power.qos->flags_req->data.flr.flags;
219}
220#else
221static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) 220static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
222 { return 0; } 221 { return 0; }
223static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} 222static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 367f49b9a1c9..30e84d48bfea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev);
35extern int pm_generic_runtime_resume(struct device *dev); 35extern int pm_generic_runtime_resume(struct device *dev);
36extern int pm_runtime_force_suspend(struct device *dev); 36extern int pm_runtime_force_suspend(struct device *dev);
37extern int pm_runtime_force_resume(struct device *dev); 37extern int pm_runtime_force_resume(struct device *dev);
38#else
39static inline bool queue_pm_work(struct work_struct *work) { return false; }
40
41static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
42static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
43static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
44static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
45#endif
46
47#ifdef CONFIG_PM_RUNTIME
48 38
49extern int __pm_runtime_idle(struct device *dev, int rpmflags); 39extern int __pm_runtime_idle(struct device *dev, int rpmflags);
50extern int __pm_runtime_suspend(struct device *dev, int rpmflags); 40extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
@@ -128,7 +118,19 @@ static inline void pm_runtime_mark_last_busy(struct device *dev)
128 ACCESS_ONCE(dev->power.last_busy) = jiffies; 118 ACCESS_ONCE(dev->power.last_busy) = jiffies;
129} 119}
130 120
131#else /* !CONFIG_PM_RUNTIME */ 121static inline bool pm_runtime_is_irq_safe(struct device *dev)
122{
123 return dev->power.irq_safe;
124}
125
126#else /* !CONFIG_PM */
127
128static inline bool queue_pm_work(struct work_struct *work) { return false; }
129
130static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
131static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
132static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
133static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
132 134
133static inline int __pm_runtime_idle(struct device *dev, int rpmflags) 135static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
134{ 136{
@@ -167,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; }
167 169
168static inline void pm_runtime_no_callbacks(struct device *dev) {} 170static inline void pm_runtime_no_callbacks(struct device *dev) {}
169static inline void pm_runtime_irq_safe(struct device *dev) {} 171static inline void pm_runtime_irq_safe(struct device *dev) {}
172static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
170 173
171static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } 174static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
172static inline void pm_runtime_mark_last_busy(struct device *dev) {} 175static inline void pm_runtime_mark_last_busy(struct device *dev) {}
@@ -179,7 +182,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
179static inline void pm_runtime_set_memalloc_noio(struct device *dev, 182static inline void pm_runtime_set_memalloc_noio(struct device *dev,
180 bool enable){} 183 bool enable){}
181 184
182#endif /* !CONFIG_PM_RUNTIME */ 185#endif /* !CONFIG_PM */
183 186
184static inline int pm_runtime_idle(struct device *dev) 187static inline int pm_runtime_idle(struct device *dev)
185{ 188{
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
index fe25876c1a5d..17d7d0d20eca 100644
--- a/include/linux/pnfs_osd_xdr.h
+++ b/include/linux/pnfs_osd_xdr.h
@@ -5,7 +5,7 @@
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Benny Halevy <bhalevy@panasas.com> 7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com> 8 * Boaz Harrosh <ooo@electrozaur.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 11 * it under the terms of the GNU General Public License version 2
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 07e7945a1ff2..e97fc656a058 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -253,9 +253,6 @@ struct charger_manager {
253 struct device *dev; 253 struct device *dev;
254 struct charger_desc *desc; 254 struct charger_desc *desc;
255 255
256 struct power_supply *fuel_gauge;
257 struct power_supply **charger_stat;
258
259#ifdef CONFIG_THERMAL 256#ifdef CONFIG_THERMAL
260 struct thermal_zone_device *tzd_batt; 257 struct thermal_zone_device *tzd_batt;
261#endif 258#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 3ed049673022..096dbced02ac 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -200,6 +200,12 @@ struct power_supply {
200 void (*external_power_changed)(struct power_supply *psy); 200 void (*external_power_changed)(struct power_supply *psy);
201 void (*set_charged)(struct power_supply *psy); 201 void (*set_charged)(struct power_supply *psy);
202 202
203 /*
204 * Set if thermal zone should not be created for this power supply.
205 * For example for virtual supplies forwarding calls to actual
206 * sensors or other supplies.
207 */
208 bool no_thermal;
203 /* For APM emulation, think legacy userspace. */ 209 /* For APM emulation, think legacy userspace. */
204 int use_for_apm; 210 int use_for_apm;
205 211
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c69be9ee8f48..c8f170324e64 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -118,7 +118,6 @@ int no_printk(const char *fmt, ...)
118#ifdef CONFIG_EARLY_PRINTK 118#ifdef CONFIG_EARLY_PRINTK
119extern asmlinkage __printf(1, 2) 119extern asmlinkage __printf(1, 2)
120void early_printk(const char *fmt, ...); 120void early_printk(const char *fmt, ...);
121void early_vprintk(const char *fmt, va_list ap);
122#else 121#else
123static inline __printf(1, 2) __cold 122static inline __printf(1, 2) __cold
124void early_printk(const char *s, ...) { } 123void early_printk(const char *s, ...) { }
diff --git a/include/linux/property.h b/include/linux/property.h
new file mode 100644
index 000000000000..a6a3d98bd7e9
--- /dev/null
+++ b/include/linux/property.h
@@ -0,0 +1,143 @@
1/*
2 * property.h - Unified device property interface.
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef _LINUX_PROPERTY_H_
14#define _LINUX_PROPERTY_H_
15
16#include <linux/types.h>
17
18struct device;
19
20enum dev_prop_type {
21 DEV_PROP_U8,
22 DEV_PROP_U16,
23 DEV_PROP_U32,
24 DEV_PROP_U64,
25 DEV_PROP_STRING,
26 DEV_PROP_MAX,
27};
28
29bool device_property_present(struct device *dev, const char *propname);
30int device_property_read_u8_array(struct device *dev, const char *propname,
31 u8 *val, size_t nval);
32int device_property_read_u16_array(struct device *dev, const char *propname,
33 u16 *val, size_t nval);
34int device_property_read_u32_array(struct device *dev, const char *propname,
35 u32 *val, size_t nval);
36int device_property_read_u64_array(struct device *dev, const char *propname,
37 u64 *val, size_t nval);
38int device_property_read_string_array(struct device *dev, const char *propname,
39 const char **val, size_t nval);
40int device_property_read_string(struct device *dev, const char *propname,
41 const char **val);
42
43enum fwnode_type {
44 FWNODE_INVALID = 0,
45 FWNODE_OF,
46 FWNODE_ACPI,
47};
48
49struct fwnode_handle {
50 enum fwnode_type type;
51};
52
53bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
54int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
55 const char *propname, u8 *val,
56 size_t nval);
57int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
58 const char *propname, u16 *val,
59 size_t nval);
60int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
61 const char *propname, u32 *val,
62 size_t nval);
63int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
64 const char *propname, u64 *val,
65 size_t nval);
66int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
67 const char *propname, const char **val,
68 size_t nval);
69int fwnode_property_read_string(struct fwnode_handle *fwnode,
70 const char *propname, const char **val);
71
72struct fwnode_handle *device_get_next_child_node(struct device *dev,
73 struct fwnode_handle *child);
74
75#define device_for_each_child_node(dev, child) \
76 for (child = device_get_next_child_node(dev, NULL); child; \
77 child = device_get_next_child_node(dev, child))
78
79void fwnode_handle_put(struct fwnode_handle *fwnode);
80
81unsigned int device_get_child_node_count(struct device *dev);
82
83static inline bool device_property_read_bool(struct device *dev,
84 const char *propname)
85{
86 return device_property_present(dev, propname);
87}
88
89static inline int device_property_read_u8(struct device *dev,
90 const char *propname, u8 *val)
91{
92 return device_property_read_u8_array(dev, propname, val, 1);
93}
94
95static inline int device_property_read_u16(struct device *dev,
96 const char *propname, u16 *val)
97{
98 return device_property_read_u16_array(dev, propname, val, 1);
99}
100
101static inline int device_property_read_u32(struct device *dev,
102 const char *propname, u32 *val)
103{
104 return device_property_read_u32_array(dev, propname, val, 1);
105}
106
107static inline int device_property_read_u64(struct device *dev,
108 const char *propname, u64 *val)
109{
110 return device_property_read_u64_array(dev, propname, val, 1);
111}
112
113static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode,
114 const char *propname)
115{
116 return fwnode_property_present(fwnode, propname);
117}
118
119static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode,
120 const char *propname, u8 *val)
121{
122 return fwnode_property_read_u8_array(fwnode, propname, val, 1);
123}
124
125static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode,
126 const char *propname, u16 *val)
127{
128 return fwnode_property_read_u16_array(fwnode, propname, val, 1);
129}
130
131static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode,
132 const char *propname, u32 *val)
133{
134 return fwnode_property_read_u32_array(fwnode, propname, val, 1);
135}
136
137static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
138 const char *propname, u64 *val)
139{
140 return fwnode_property_read_u64_array(fwnode, propname, val, 1);
141}
142
143#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9974975d40db..4af3fdc85b01 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
53}; 53};
54 54
55struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, 55struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
56 u32 sig, struct persistent_ram_ecc_info *ecc_info); 56 u32 sig, struct persistent_ram_ecc_info *ecc_info,
57 unsigned int memtype);
57void persistent_ram_free(struct persistent_ram_zone *prz); 58void persistent_ram_free(struct persistent_ram_zone *prz);
58void persistent_ram_zap(struct persistent_ram_zone *prz); 59void persistent_ram_zap(struct persistent_ram_zone *prz);
59 60
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
76struct ramoops_platform_data { 77struct ramoops_platform_data {
77 unsigned long mem_size; 78 unsigned long mem_size;
78 unsigned long mem_address; 79 unsigned long mem_address;
80 unsigned int mem_type;
79 unsigned long record_size; 81 unsigned long record_size;
80 unsigned long console_size; 82 unsigned long console_size;
81 unsigned long ftrace_size; 83 unsigned long ftrace_size;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index cc79eff4a1ad..987a73a40ef8 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code);
52extern void __ptrace_link(struct task_struct *child, 52extern void __ptrace_link(struct task_struct *child,
53 struct task_struct *new_parent); 53 struct task_struct *new_parent);
54extern void __ptrace_unlink(struct task_struct *child); 54extern void __ptrace_unlink(struct task_struct *child);
55extern void exit_ptrace(struct task_struct *tracer); 55extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
56#define PTRACE_MODE_READ 0x01 56#define PTRACE_MODE_READ 0x01
57#define PTRACE_MODE_ATTACH 0x02 57#define PTRACE_MODE_ATTACH 0x02
58#define PTRACE_MODE_NOAUDIT 0x04 58#define PTRACE_MODE_NOAUDIT 0x04
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
index 18d75e795606..e1ab6e86cdb3 100644
--- a/include/linux/pxa168_eth.h
+++ b/include/linux/pxa168_eth.h
@@ -4,6 +4,8 @@
4#ifndef __LINUX_PXA168_ETH_H 4#ifndef __LINUX_PXA168_ETH_H
5#define __LINUX_PXA168_ETH_H 5#define __LINUX_PXA168_ETH_H
6 6
7#include <linux/phy.h>
8
7struct pxa168_eth_platform_data { 9struct pxa168_eth_platform_data {
8 int port_number; 10 int port_number;
9 int phy_addr; 11 int phy_addr;
@@ -13,6 +15,7 @@ struct pxa168_eth_platform_data {
13 */ 15 */
14 int speed; /* 0, SPEED_10, SPEED_100 */ 16 int speed; /* 0, SPEED_10, SPEED_100 */
15 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ 17 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
18 phy_interface_t intf;
16 19
17 /* 20 /*
18 * Override default RX/TX queue sizes if nonzero. 21 * Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index f2b405116166..77aed9ea1d26 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -108,6 +108,25 @@
108#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ 108#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
109#endif 109#endif
110 110
111/* QUARK_X1000 SSCR0 bit definition */
112#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
113#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
114#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
115#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
116
117#define RX_THRESH_QUARK_X1000_DFLT 1
118#define TX_THRESH_QUARK_X1000_DFLT 16
119
120#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
121#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
122
123#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
124#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
125#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
126#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
127#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
128#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
129
111/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ 130/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
112#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ 131#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
113#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ 132#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@@ -175,6 +194,7 @@ enum pxa_ssp_type {
175 PXA910_SSP, 194 PXA910_SSP,
176 CE4100_SSP, 195 CE4100_SSP,
177 LPSS_SSP, 196 LPSS_SSP,
197 QUARK_X1000_SSP,
178}; 198};
179 199
180struct ssp_device { 200struct ssp_device {
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 80d345a3524c..50978b781a19 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -56,6 +56,11 @@ enum quota_type {
56 PRJQUOTA = 2, /* element used for project quotas */ 56 PRJQUOTA = 2, /* element used for project quotas */
57}; 57};
58 58
59/* Masks for quota types when used as a bitmask */
60#define QTYPE_MASK_USR (1 << USRQUOTA)
61#define QTYPE_MASK_GRP (1 << GRPQUOTA)
62#define QTYPE_MASK_PRJ (1 << PRJQUOTA)
63
59typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ 64typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
60typedef long long qsize_t; /* Type in which we store sizes */ 65typedef long long qsize_t; /* Type in which we store sizes */
61 66
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 1d3eee594cd6..f23538a6e411 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot);
64int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); 64int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
65void __dquot_free_space(struct inode *inode, qsize_t number, int flags); 65void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
66 66
67int dquot_alloc_inode(const struct inode *inode); 67int dquot_alloc_inode(struct inode *inode);
68 68
69int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); 69int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
70void dquot_free_inode(const struct inode *inode); 70void dquot_free_inode(struct inode *inode);
71void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); 71void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
72 72
73int dquot_disable(struct super_block *sb, int type, unsigned int flags); 73int dquot_disable(struct super_block *sb, int type, unsigned int flags);
@@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode)
213{ 213{
214} 214}
215 215
216static inline int dquot_alloc_inode(const struct inode *inode) 216static inline int dquot_alloc_inode(struct inode *inode)
217{ 217{
218 return 0; 218 return 0;
219} 219}
220 220
221static inline void dquot_free_inode(const struct inode *inode) 221static inline void dquot_free_inode(struct inode *inode)
222{ 222{
223} 223}
224 224
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 0a260d8a18bf..18102529254e 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -17,14 +17,20 @@ struct ratelimit_state {
17 unsigned long begin; 17 unsigned long begin;
18}; 18};
19 19
20#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ 20#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
21 \
22 struct ratelimit_state name = { \
23 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 21 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
24 .interval = interval_init, \ 22 .interval = interval_init, \
25 .burst = burst_init, \ 23 .burst = burst_init, \
26 } 24 }
27 25
26#define RATELIMIT_STATE_INIT_DISABLED \
27 RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
28
29#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
30 \
31 struct ratelimit_state name = \
32 RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
33
28static inline void ratelimit_state_init(struct ratelimit_state *rs, 34static inline void ratelimit_state_init(struct ratelimit_state *rs,
29 int interval, int burst) 35 int interval, int burst)
30{ 36{
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 372ad5e0dcb8..529bc946f450 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
241 * list_entry_rcu - get the struct for this entry 241 * list_entry_rcu - get the struct for this entry
242 * @ptr: the &struct list_head pointer. 242 * @ptr: the &struct list_head pointer.
243 * @type: the type of the struct this is embedded in. 243 * @type: the type of the struct this is embedded in.
244 * @member: the name of the list_struct within the struct. 244 * @member: the name of the list_head within the struct.
245 * 245 *
246 * This primitive may safely run concurrently with the _rcu list-mutation 246 * This primitive may safely run concurrently with the _rcu list-mutation
247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
@@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
278 * list_first_or_null_rcu - get the first element from a list 278 * list_first_or_null_rcu - get the first element from a list
279 * @ptr: the list head to take the element from. 279 * @ptr: the list head to take the element from.
280 * @type: the type of the struct this is embedded in. 280 * @type: the type of the struct this is embedded in.
281 * @member: the name of the list_struct within the struct. 281 * @member: the name of the list_head within the struct.
282 * 282 *
283 * Note that if the list is empty, it returns NULL. 283 * Note that if the list is empty, it returns NULL.
284 * 284 *
@@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
296 * list_for_each_entry_rcu - iterate over rcu list of given type 296 * list_for_each_entry_rcu - iterate over rcu list of given type
297 * @pos: the type * to use as a loop cursor. 297 * @pos: the type * to use as a loop cursor.
298 * @head: the head for your list. 298 * @head: the head for your list.
299 * @member: the name of the list_struct within the struct. 299 * @member: the name of the list_head within the struct.
300 * 300 *
301 * This list-traversal primitive may safely run concurrently with 301 * This list-traversal primitive may safely run concurrently with
302 * the _rcu list-mutation primitives such as list_add_rcu() 302 * the _rcu list-mutation primitives such as list_add_rcu()
@@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
311 * list_for_each_entry_continue_rcu - continue iteration over list of given type 311 * list_for_each_entry_continue_rcu - continue iteration over list of given type
312 * @pos: the type * to use as a loop cursor. 312 * @pos: the type * to use as a loop cursor.
313 * @head: the head for your list. 313 * @head: the head for your list.
314 * @member: the name of the list_struct within the struct. 314 * @member: the name of the list_head within the struct.
315 * 315 *
316 * Continue to iterate over list of given type, continuing after 316 * Continue to iterate over list of given type, continuing after
317 * the current position. 317 * the current position.
@@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
543 typeof(*(pos)), member)) 543 typeof(*(pos)), member))
544 544
545/**
546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
547 * @pos: the type * to use as a loop cursor.
548 * @member: the name of the hlist_node within the struct.
549 */
550#define hlist_for_each_entry_from_rcu(pos, member) \
551 for (; pos; \
552 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
553 typeof(*(pos)), member))
545 554
546#endif /* __KERNEL__ */ 555#endif /* __KERNEL__ */
547#endif 556#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a4a819ffb2d1..ed4f5939a452 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -57,7 +57,7 @@ enum rcutorture_type {
57 INVALID_RCU_FLAVOR 57 INVALID_RCU_FLAVOR
58}; 58};
59 59
60#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 60#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
61void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 61void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
62 unsigned long *gpnum, unsigned long *completed); 62 unsigned long *gpnum, unsigned long *completed);
63void rcutorture_record_test_transition(void); 63void rcutorture_record_test_transition(void);
@@ -260,7 +260,7 @@ static inline int rcu_preempt_depth(void)
260void rcu_init(void); 260void rcu_init(void);
261void rcu_sched_qs(void); 261void rcu_sched_qs(void);
262void rcu_bh_qs(void); 262void rcu_bh_qs(void);
263void rcu_check_callbacks(int cpu, int user); 263void rcu_check_callbacks(int user);
264struct notifier_block; 264struct notifier_block;
265void rcu_idle_enter(void); 265void rcu_idle_enter(void);
266void rcu_idle_exit(void); 266void rcu_idle_exit(void);
@@ -348,8 +348,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
348 */ 348 */
349#define cond_resched_rcu_qs() \ 349#define cond_resched_rcu_qs() \
350do { \ 350do { \
351 rcu_note_voluntary_context_switch(current); \ 351 if (!cond_resched()) \
352 cond_resched(); \ 352 rcu_note_voluntary_context_switch(current); \
353} while (0) 353} while (0)
354 354
355#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 355#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
@@ -365,7 +365,7 @@ typedef void call_rcu_func_t(struct rcu_head *head,
365 void (*func)(struct rcu_head *head)); 365 void (*func)(struct rcu_head *head));
366void wait_rcu_gp(call_rcu_func_t crf); 366void wait_rcu_gp(call_rcu_func_t crf);
367 367
368#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 368#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
369#include <linux/rcutree.h> 369#include <linux/rcutree.h>
370#elif defined(CONFIG_TINY_RCU) 370#elif defined(CONFIG_TINY_RCU)
371#include <linux/rcutiny.h> 371#include <linux/rcutiny.h>
@@ -617,6 +617,21 @@ static inline void rcu_preempt_sleep_check(void)
617#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) 617#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
618 618
619/** 619/**
620 * lockless_dereference() - safely load a pointer for later dereference
621 * @p: The pointer to load
622 *
623 * Similar to rcu_dereference(), but for situations where the pointed-to
624 * object's lifetime is managed by something other than RCU. That
625 * "something other" might be reference counting or simple immortality.
626 */
627#define lockless_dereference(p) \
628({ \
629 typeof(p) _________p1 = ACCESS_ONCE(p); \
630 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
631 (_________p1); \
632})
633
634/**
620 * rcu_assign_pointer() - assign to RCU-protected pointer 635 * rcu_assign_pointer() - assign to RCU-protected pointer
621 * @p: pointer to assign to 636 * @p: pointer to assign to
622 * @v: value to assign (publish) 637 * @v: value to assign (publish)
@@ -852,7 +867,7 @@ static inline void rcu_preempt_sleep_check(void)
852 * 867 *
853 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), 868 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
854 * it is illegal to block while in an RCU read-side critical section. 869 * it is illegal to block while in an RCU read-side critical section.
855 * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT 870 * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
856 * kernel builds, RCU read-side critical sections may be preempted, 871 * kernel builds, RCU read-side critical sections may be preempted,
857 * but explicit blocking is illegal. Finally, in preemptible RCU 872 * but explicit blocking is illegal. Finally, in preemptible RCU
858 * implementations in real-time (with -rt patchset) kernel builds, RCU 873 * implementations in real-time (with -rt patchset) kernel builds, RCU
@@ -887,7 +902,9 @@ static inline void rcu_read_lock(void)
887 * Unfortunately, this function acquires the scheduler's runqueue and 902 * Unfortunately, this function acquires the scheduler's runqueue and
888 * priority-inheritance spinlocks. This means that deadlock could result 903 * priority-inheritance spinlocks. This means that deadlock could result
889 * if the caller of rcu_read_unlock() already holds one of these locks or 904 * if the caller of rcu_read_unlock() already holds one of these locks or
890 * any lock that is ever acquired while holding them. 905 * any lock that is ever acquired while holding them; or any lock which
906 * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
907 * does not disable irqs while taking ->wait_lock.
891 * 908 *
892 * That said, RCU readers are never priority boosted unless they were 909 * That said, RCU readers are never priority boosted unless they were
893 * preempted. Therefore, one way to avoid deadlock is to make sure 910 * preempted. Therefore, one way to avoid deadlock is to make sure
@@ -1047,6 +1064,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1047 */ 1064 */
1048#define RCU_INIT_POINTER(p, v) \ 1065#define RCU_INIT_POINTER(p, v) \
1049 do { \ 1066 do { \
1067 rcu_dereference_sparse(p, __rcu); \
1050 p = RCU_INITIALIZER(v); \ 1068 p = RCU_INITIALIZER(v); \
1051 } while (0) 1069 } while (0)
1052 1070
@@ -1103,7 +1121,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1103 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) 1121 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1104 1122
1105#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) 1123#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
1106static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 1124static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
1107{ 1125{
1108 *delta_jiffies = ULONG_MAX; 1126 *delta_jiffies = ULONG_MAX;
1109 return 0; 1127 return 0;
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 38cc5b1e252d..0e5366200154 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -78,7 +78,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
78 call_rcu(head, func); 78 call_rcu(head, func);
79} 79}
80 80
81static inline void rcu_note_context_switch(int cpu) 81static inline void rcu_note_context_switch(void)
82{ 82{
83 rcu_sched_qs(); 83 rcu_sched_qs();
84} 84}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 3e2f5d432743..52953790dcca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,9 +30,9 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33void rcu_note_context_switch(int cpu); 33void rcu_note_context_switch(void);
34#ifndef CONFIG_RCU_NOCB_CPU_ALL 34#ifndef CONFIG_RCU_NOCB_CPU_ALL
35int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); 35int rcu_needs_cpu(unsigned long *delta_jiffies);
36#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 36#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
37void rcu_cpu_stall_reset(void); 37void rcu_cpu_stall_reset(void);
38 38
@@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void);
43 */ 43 */
44static inline void rcu_virt_note_context_switch(int cpu) 44static inline void rcu_virt_note_context_switch(int cpu)
45{ 45{
46 rcu_note_context_switch(cpu); 46 rcu_note_context_switch();
47} 47}
48 48
49void synchronize_rcu_bh(void); 49void synchronize_rcu_bh(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index c5ed83f49c4e..4419b99d8d6e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -27,6 +27,7 @@ struct spmi_device;
27struct regmap; 27struct regmap;
28struct regmap_range_cfg; 28struct regmap_range_cfg;
29struct regmap_field; 29struct regmap_field;
30struct snd_ac97;
30 31
31/* An enum of all the supported cache types */ 32/* An enum of all the supported cache types */
32enum regcache_type { 33enum regcache_type {
@@ -340,6 +341,8 @@ struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
340struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, 341struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
341 void __iomem *regs, 342 void __iomem *regs,
342 const struct regmap_config *config); 343 const struct regmap_config *config);
344struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
345 const struct regmap_config *config);
343 346
344struct regmap *devm_regmap_init(struct device *dev, 347struct regmap *devm_regmap_init(struct device *dev,
345 const struct regmap_bus *bus, 348 const struct regmap_bus *bus,
@@ -356,6 +359,10 @@ struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
356struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, 359struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
357 void __iomem *regs, 360 void __iomem *regs,
358 const struct regmap_config *config); 361 const struct regmap_config *config);
362struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
363 const struct regmap_config *config);
364
365bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
359 366
360/** 367/**
361 * regmap_init_mmio(): Initialise register map 368 * regmap_init_mmio(): Initialise register map
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index d347c805f923..d17e1ff7ad01 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
35#ifndef __LINUX_REGULATOR_CONSUMER_H_ 35#ifndef __LINUX_REGULATOR_CONSUMER_H_
36#define __LINUX_REGULATOR_CONSUMER_H_ 36#define __LINUX_REGULATOR_CONSUMER_H_
37 37
38#include <linux/err.h>
39
38struct device; 40struct device;
39struct notifier_block; 41struct notifier_block;
40struct regmap; 42struct regmap;
@@ -99,6 +101,8 @@ struct regmap;
99 * Data passed is "struct pre_voltage_change_data" 101 * Data passed is "struct pre_voltage_change_data"
100 * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. 102 * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
101 * Data passed is old voltage cast to (void *). 103 * Data passed is old voltage cast to (void *).
104 * PRE_DISABLE Regulator is about to be disabled
105 * ABORT_DISABLE Regulator disable failed for some reason
102 * 106 *
103 * NOTE: These events can be OR'ed together when passed into handler. 107 * NOTE: These events can be OR'ed together when passed into handler.
104 */ 108 */
@@ -113,6 +117,8 @@ struct regmap;
113#define REGULATOR_EVENT_DISABLE 0x80 117#define REGULATOR_EVENT_DISABLE 0x80
114#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 118#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
115#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 119#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
120#define REGULATOR_EVENT_PRE_DISABLE 0x400
121#define REGULATOR_EVENT_ABORT_DISABLE 0x800
116 122
117/** 123/**
118 * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event 124 * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
@@ -282,7 +288,7 @@ devm_regulator_get(struct device *dev, const char *id)
282static inline struct regulator *__must_check 288static inline struct regulator *__must_check
283regulator_get_exclusive(struct device *dev, const char *id) 289regulator_get_exclusive(struct device *dev, const char *id)
284{ 290{
285 return NULL; 291 return ERR_PTR(-ENODEV);
286} 292}
287 293
288static inline struct regulator *__must_check 294static inline struct regulator *__must_check
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fc0ee0ce8325..5f1e9ca47417 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -243,6 +243,8 @@ enum regulator_type {
243 * 243 *
244 * @enable_time: Time taken for initial enable of regulator (in uS). 244 * @enable_time: Time taken for initial enable of regulator (in uS).
245 * @off_on_delay: guard time (in uS), before re-enabling a regulator 245 * @off_on_delay: guard time (in uS), before re-enabling a regulator
246 *
247 * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode
246 */ 248 */
247struct regulator_desc { 249struct regulator_desc {
248 const char *name; 250 const char *name;
@@ -285,6 +287,8 @@ struct regulator_desc {
285 unsigned int enable_time; 287 unsigned int enable_time;
286 288
287 unsigned int off_on_delay; 289 unsigned int off_on_delay;
290
291 unsigned int (*of_map_mode)(unsigned int mode);
288}; 292};
289 293
290/** 294/**
@@ -301,6 +305,9 @@ struct regulator_desc {
301 * NULL). 305 * NULL).
302 * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is 306 * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is
303 * insufficient. 307 * insufficient.
308 * @ena_gpio_initialized: GPIO controlling regulator enable was properly
309 * initialized, meaning that >= 0 is a valid gpio
310 * identifier and < 0 is a non existent gpio.
304 * @ena_gpio: GPIO controlling regulator enable. 311 * @ena_gpio: GPIO controlling regulator enable.
305 * @ena_gpio_invert: Sense for GPIO enable control. 312 * @ena_gpio_invert: Sense for GPIO enable control.
306 * @ena_gpio_flags: Flags to use when calling gpio_request_one() 313 * @ena_gpio_flags: Flags to use when calling gpio_request_one()
@@ -312,6 +319,7 @@ struct regulator_config {
312 struct device_node *of_node; 319 struct device_node *of_node;
313 struct regmap *regmap; 320 struct regmap *regmap;
314 321
322 bool ena_gpio_initialized;
315 int ena_gpio; 323 int ena_gpio;
316 unsigned int ena_gpio_invert:1; 324 unsigned int ena_gpio_invert:1;
317 unsigned int ena_gpio_flags; 325 unsigned int ena_gpio_flags;
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
index f9217965aaa3..763953f7e3b8 100644
--- a/include/linux/regulator/of_regulator.h
+++ b/include/linux/regulator/of_regulator.h
@@ -6,24 +6,29 @@
6#ifndef __LINUX_OF_REG_H 6#ifndef __LINUX_OF_REG_H
7#define __LINUX_OF_REG_H 7#define __LINUX_OF_REG_H
8 8
9struct regulator_desc;
10
9struct of_regulator_match { 11struct of_regulator_match {
10 const char *name; 12 const char *name;
11 void *driver_data; 13 void *driver_data;
12 struct regulator_init_data *init_data; 14 struct regulator_init_data *init_data;
13 struct device_node *of_node; 15 struct device_node *of_node;
16 const struct regulator_desc *desc;
14}; 17};
15 18
16#if defined(CONFIG_OF) 19#if defined(CONFIG_OF)
17extern struct regulator_init_data 20extern struct regulator_init_data
18 *of_get_regulator_init_data(struct device *dev, 21 *of_get_regulator_init_data(struct device *dev,
19 struct device_node *node); 22 struct device_node *node,
23 const struct regulator_desc *desc);
20extern int of_regulator_match(struct device *dev, struct device_node *node, 24extern int of_regulator_match(struct device *dev, struct device_node *node,
21 struct of_regulator_match *matches, 25 struct of_regulator_match *matches,
22 unsigned int num_matches); 26 unsigned int num_matches);
23#else 27#else
24static inline struct regulator_init_data 28static inline struct regulator_init_data
25 *of_get_regulator_init_data(struct device *dev, 29 *of_get_regulator_init_data(struct device *dev,
26 struct device_node *node) 30 struct device_node *node,
31 const struct regulator_desc *desc)
27{ 32{
28 return NULL; 33 return NULL;
29} 34}
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
deleted file mode 100644
index 56b7bc32db4f..000000000000
--- a/include/linux/res_counter.h
+++ /dev/null
@@ -1,223 +0,0 @@
1#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4/*
5 * Resource Counters
6 * Contain common data types and routines for resource accounting
7 *
8 * Copyright 2007 OpenVZ SWsoft Inc
9 *
10 * Author: Pavel Emelianov <xemul@openvz.org>
11 *
12 * See Documentation/cgroups/resource_counter.txt for more
13 * info about what this counter is.
14 */
15
16#include <linux/spinlock.h>
17#include <linux/errno.h>
18
19/*
20 * The core object. the cgroup that wishes to account for some
21 * resource may include this counter into its structures and use
22 * the helpers described beyond
23 */
24
25struct res_counter {
26 /*
27 * the current resource consumption level
28 */
29 unsigned long long usage;
30 /*
31 * the maximal value of the usage from the counter creation
32 */
33 unsigned long long max_usage;
34 /*
35 * the limit that usage cannot exceed
36 */
37 unsigned long long limit;
38 /*
39 * the limit that usage can be exceed
40 */
41 unsigned long long soft_limit;
42 /*
43 * the number of unsuccessful attempts to consume the resource
44 */
45 unsigned long long failcnt;
46 /*
47 * the lock to protect all of the above.
48 * the routines below consider this to be IRQ-safe
49 */
50 spinlock_t lock;
51 /*
52 * Parent counter, used for hierarchial resource accounting
53 */
54 struct res_counter *parent;
55};
56
57#define RES_COUNTER_MAX ULLONG_MAX
58
59/**
60 * Helpers to interact with userspace
61 * res_counter_read_u64() - returns the value of the specified member.
62 * res_counter_read/_write - put/get the specified fields from the
63 * res_counter struct to/from the user
64 *
65 * @counter: the counter in question
66 * @member: the field to work with (see RES_xxx below)
67 * @buf: the buffer to opeate on,...
68 * @nbytes: its size...
69 * @pos: and the offset.
70 */
71
72u64 res_counter_read_u64(struct res_counter *counter, int member);
73
74ssize_t res_counter_read(struct res_counter *counter, int member,
75 const char __user *buf, size_t nbytes, loff_t *pos,
76 int (*read_strategy)(unsigned long long val, char *s));
77
78int res_counter_memparse_write_strategy(const char *buf,
79 unsigned long long *res);
80
81/*
82 * the field descriptors. one for each member of res_counter
83 */
84
85enum {
86 RES_USAGE,
87 RES_MAX_USAGE,
88 RES_LIMIT,
89 RES_FAILCNT,
90 RES_SOFT_LIMIT,
91};
92
93/*
94 * helpers for accounting
95 */
96
97void res_counter_init(struct res_counter *counter, struct res_counter *parent);
98
99/*
100 * charge - try to consume more resource.
101 *
102 * @counter: the counter
103 * @val: the amount of the resource. each controller defines its own
104 * units, e.g. numbers, bytes, Kbytes, etc
105 *
106 * returns 0 on success and <0 if the counter->usage will exceed the
107 * counter->limit
108 *
109 * charge_nofail works the same, except that it charges the resource
110 * counter unconditionally, and returns < 0 if the after the current
111 * charge we are over limit.
112 */
113
114int __must_check res_counter_charge(struct res_counter *counter,
115 unsigned long val, struct res_counter **limit_fail_at);
116int res_counter_charge_nofail(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at);
118
119/*
120 * uncharge - tell that some portion of the resource is released
121 *
122 * @counter: the counter
123 * @val: the amount of the resource
124 *
125 * these calls check for usage underflow and show a warning on the console
126 *
127 * returns the total charges still present in @counter.
128 */
129
130u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
131
132u64 res_counter_uncharge_until(struct res_counter *counter,
133 struct res_counter *top,
134 unsigned long val);
135/**
136 * res_counter_margin - calculate chargeable space of a counter
137 * @cnt: the counter
138 *
139 * Returns the difference between the hard limit and the current usage
140 * of resource counter @cnt.
141 */
142static inline unsigned long long res_counter_margin(struct res_counter *cnt)
143{
144 unsigned long long margin;
145 unsigned long flags;
146
147 spin_lock_irqsave(&cnt->lock, flags);
148 if (cnt->limit > cnt->usage)
149 margin = cnt->limit - cnt->usage;
150 else
151 margin = 0;
152 spin_unlock_irqrestore(&cnt->lock, flags);
153 return margin;
154}
155
156/**
157 * Get the difference between the usage and the soft limit
158 * @cnt: The counter
159 *
160 * Returns 0 if usage is less than or equal to soft limit
161 * The difference between usage and soft limit, otherwise.
162 */
163static inline unsigned long long
164res_counter_soft_limit_excess(struct res_counter *cnt)
165{
166 unsigned long long excess;
167 unsigned long flags;
168
169 spin_lock_irqsave(&cnt->lock, flags);
170 if (cnt->usage <= cnt->soft_limit)
171 excess = 0;
172 else
173 excess = cnt->usage - cnt->soft_limit;
174 spin_unlock_irqrestore(&cnt->lock, flags);
175 return excess;
176}
177
178static inline void res_counter_reset_max(struct res_counter *cnt)
179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&cnt->lock, flags);
183 cnt->max_usage = cnt->usage;
184 spin_unlock_irqrestore(&cnt->lock, flags);
185}
186
187static inline void res_counter_reset_failcnt(struct res_counter *cnt)
188{
189 unsigned long flags;
190
191 spin_lock_irqsave(&cnt->lock, flags);
192 cnt->failcnt = 0;
193 spin_unlock_irqrestore(&cnt->lock, flags);
194}
195
196static inline int res_counter_set_limit(struct res_counter *cnt,
197 unsigned long long limit)
198{
199 unsigned long flags;
200 int ret = -EBUSY;
201
202 spin_lock_irqsave(&cnt->lock, flags);
203 if (cnt->usage <= limit) {
204 cnt->limit = limit;
205 ret = 0;
206 }
207 spin_unlock_irqrestore(&cnt->lock, flags);
208 return ret;
209}
210
211static inline int
212res_counter_set_soft_limit(struct res_counter *cnt,
213 unsigned long long soft_limit)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&cnt->lock, flags);
218 cnt->soft_limit = soft_limit;
219 spin_unlock_irqrestore(&cnt->lock, flags);
220 return 0;
221}
222
223#endif
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index 41a4695fde08..ce6b962ffed4 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -12,11 +12,13 @@ struct reset_controller_dev;
12 * things to reset the device 12 * things to reset the device
13 * @assert: manually assert the reset line, if supported 13 * @assert: manually assert the reset line, if supported
14 * @deassert: manually deassert the reset line, if supported 14 * @deassert: manually deassert the reset line, if supported
15 * @status: return the status of the reset line, if supported
15 */ 16 */
16struct reset_control_ops { 17struct reset_control_ops {
17 int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); 18 int (*reset)(struct reset_controller_dev *rcdev, unsigned long id);
18 int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); 19 int (*assert)(struct reset_controller_dev *rcdev, unsigned long id);
19 int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); 20 int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id);
21 int (*status)(struct reset_controller_dev *rcdev, unsigned long id);
20}; 22};
21 23
22struct module; 24struct module;
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 349f150ae12c..da5602bd77d7 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -10,6 +10,7 @@ struct reset_control;
10int reset_control_reset(struct reset_control *rstc); 10int reset_control_reset(struct reset_control *rstc);
11int reset_control_assert(struct reset_control *rstc); 11int reset_control_assert(struct reset_control *rstc);
12int reset_control_deassert(struct reset_control *rstc); 12int reset_control_deassert(struct reset_control *rstc);
13int reset_control_status(struct reset_control *rstc);
13 14
14struct reset_control *reset_control_get(struct device *dev, const char *id); 15struct reset_control *reset_control_get(struct device *dev, const char *id);
15void reset_control_put(struct reset_control *rstc); 16void reset_control_put(struct reset_control *rstc);
@@ -57,6 +58,12 @@ static inline int reset_control_deassert(struct reset_control *rstc)
57 return 0; 58 return 0;
58} 59}
59 60
61static inline int reset_control_status(struct reset_control *rstc)
62{
63 WARN_ON(1);
64 return 0;
65}
66
60static inline void reset_control_put(struct reset_control *rstc) 67static inline void reset_control_put(struct reset_control *rstc)
61{ 68{
62 WARN_ON(1); 69 WARN_ON(1);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index fb298e9d6d3a..b93fd89b2e5e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -65,7 +65,10 @@ struct rhashtable_params {
65 size_t new_size); 65 size_t new_size);
66 bool (*shrink_decision)(const struct rhashtable *ht, 66 bool (*shrink_decision)(const struct rhashtable *ht,
67 size_t new_size); 67 size_t new_size);
68 int (*mutex_is_held)(void); 68#ifdef CONFIG_PROVE_LOCKING
69 int (*mutex_is_held)(void *parent);
70 void *parent;
71#endif
69}; 72};
70 73
71/** 74/**
@@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
96u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); 99u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
97u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); 100u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
98 101
99void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); 102void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
100bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); 103bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
101void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, 104void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
102 struct rhash_head __rcu **pprev, gfp_t flags); 105 struct rhash_head __rcu **pprev);
103 106
104bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); 107bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
105bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); 108bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
106 109
107int rhashtable_expand(struct rhashtable *ht, gfp_t flags); 110int rhashtable_expand(struct rhashtable *ht);
108int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); 111int rhashtable_shrink(struct rhashtable *ht);
109 112
110void *rhashtable_lookup(const struct rhashtable *ht, const void *key); 113void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
111void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, 114void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 49a4d6f59108..e2c13cd863bd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
97 __ring_buffer_alloc((size), (flags), &__key); \ 97 __ring_buffer_alloc((size), (flags), &__key); \
98}) 98})
99 99
100int ring_buffer_wait(struct ring_buffer *buffer, int cpu); 100int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
102 struct file *filp, poll_table *poll_table); 102 struct file *filp, poll_table *poll_table);
103 103
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index c2c28975293c..6d6be09a2fe5 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -19,11 +19,28 @@
19extern int rtc_month_days(unsigned int month, unsigned int year); 19extern int rtc_month_days(unsigned int month, unsigned int year);
20extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); 20extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year);
21extern int rtc_valid_tm(struct rtc_time *tm); 21extern int rtc_valid_tm(struct rtc_time *tm);
22extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); 22extern time64_t rtc_tm_to_time64(struct rtc_time *tm);
23extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); 23extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm);
24ktime_t rtc_tm_to_ktime(struct rtc_time tm); 24ktime_t rtc_tm_to_ktime(struct rtc_time tm);
25struct rtc_time rtc_ktime_to_tm(ktime_t kt); 25struct rtc_time rtc_ktime_to_tm(ktime_t kt);
26 26
27/**
28 * Deprecated. Use rtc_time64_to_tm().
29 */
30static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
31{
32 rtc_time64_to_tm(time, tm);
33}
34
35/**
36 * Deprecated. Use rtc_tm_to_time64().
37 */
38static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
39{
40 *time = rtc_tm_to_time64(tm);
41
42 return 0;
43}
27 44
28#include <linux/device.h> 45#include <linux/device.h>
29#include <linux/seq_file.h> 46#include <linux/seq_file.h>
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 6cacbce1a06c..5db76a32fcab 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
17 u32 id, long expires, u32 error); 17 u32 id, long expires, u32 error);
18 18
19void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); 19void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
20struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
21 unsigned change, gfp_t flags);
22void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
23 gfp_t flags);
24
20 25
21/* RTNL is used as a global lock for all changes to network configuration */ 26/* RTNL is used as a global lock for all changes to network configuration */
22extern void rtnl_lock(void); 27extern void rtnl_lock(void);
@@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
94 struct nlattr *tb[], 99 struct nlattr *tb[],
95 struct net_device *dev, 100 struct net_device *dev,
96 const unsigned char *addr, 101 const unsigned char *addr,
97 u16 flags); 102 u16 vid,
103 u16 flags);
98extern int ndo_dflt_fdb_del(struct ndmsg *ndm, 104extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
99 struct nlattr *tb[], 105 struct nlattr *tb[],
100 struct net_device *dev, 106 struct net_device *dev,
101 const unsigned char *addr); 107 const unsigned char *addr,
108 u16 vid);
102 109
103extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 110extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
104 struct net_device *dev, u16 mode); 111 struct net_device *dev, u16 mode,
112 u32 flags, u32 mask);
105#endif /* __LINUX_RTNETLINK_H */ 113#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5e344bbe63ec..8db31ef98d2f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -243,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!(
243 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 243 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
244 (task->flags & PF_FROZEN) == 0) 244 (task->flags & PF_FROZEN) == 0)
245 245
246#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
247
248#define __set_task_state(tsk, state_value) \
249 do { \
250 (tsk)->task_state_change = _THIS_IP_; \
251 (tsk)->state = (state_value); \
252 } while (0)
253#define set_task_state(tsk, state_value) \
254 do { \
255 (tsk)->task_state_change = _THIS_IP_; \
256 set_mb((tsk)->state, (state_value)); \
257 } while (0)
258
259/*
260 * set_current_state() includes a barrier so that the write of current->state
261 * is correctly serialised wrt the caller's subsequent test of whether to
262 * actually sleep:
263 *
264 * set_current_state(TASK_UNINTERRUPTIBLE);
265 * if (do_i_need_to_sleep())
266 * schedule();
267 *
268 * If the caller does not need such serialisation then use __set_current_state()
269 */
270#define __set_current_state(state_value) \
271 do { \
272 current->task_state_change = _THIS_IP_; \
273 current->state = (state_value); \
274 } while (0)
275#define set_current_state(state_value) \
276 do { \
277 current->task_state_change = _THIS_IP_; \
278 set_mb(current->state, (state_value)); \
279 } while (0)
280
281#else
282
246#define __set_task_state(tsk, state_value) \ 283#define __set_task_state(tsk, state_value) \
247 do { (tsk)->state = (state_value); } while (0) 284 do { (tsk)->state = (state_value); } while (0)
248#define set_task_state(tsk, state_value) \ 285#define set_task_state(tsk, state_value) \
@@ -259,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!(
259 * 296 *
260 * If the caller does not need such serialisation then use __set_current_state() 297 * If the caller does not need such serialisation then use __set_current_state()
261 */ 298 */
262#define __set_current_state(state_value) \ 299#define __set_current_state(state_value) \
263 do { current->state = (state_value); } while (0) 300 do { current->state = (state_value); } while (0)
264#define set_current_state(state_value) \ 301#define set_current_state(state_value) \
265 set_mb(current->state, (state_value)) 302 set_mb(current->state, (state_value))
266 303
304#endif
305
267/* Task command name length */ 306/* Task command name length */
268#define TASK_COMM_LEN 16 307#define TASK_COMM_LEN 16
269 308
@@ -1278,9 +1317,9 @@ struct task_struct {
1278 union rcu_special rcu_read_unlock_special; 1317 union rcu_special rcu_read_unlock_special;
1279 struct list_head rcu_node_entry; 1318 struct list_head rcu_node_entry;
1280#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1319#endif /* #ifdef CONFIG_PREEMPT_RCU */
1281#ifdef CONFIG_TREE_PREEMPT_RCU 1320#ifdef CONFIG_PREEMPT_RCU
1282 struct rcu_node *rcu_blocked_node; 1321 struct rcu_node *rcu_blocked_node;
1283#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1322#endif /* #ifdef CONFIG_PREEMPT_RCU */
1284#ifdef CONFIG_TASKS_RCU 1323#ifdef CONFIG_TASKS_RCU
1285 unsigned long rcu_tasks_nvcsw; 1324 unsigned long rcu_tasks_nvcsw;
1286 bool rcu_tasks_holdout; 1325 bool rcu_tasks_holdout;
@@ -1325,6 +1364,10 @@ struct task_struct {
1325 unsigned sched_reset_on_fork:1; 1364 unsigned sched_reset_on_fork:1;
1326 unsigned sched_contributes_to_load:1; 1365 unsigned sched_contributes_to_load:1;
1327 1366
1367#ifdef CONFIG_MEMCG_KMEM
1368 unsigned memcg_kmem_skip_account:1;
1369#endif
1370
1328 unsigned long atomic_flags; /* Flags needing atomic access. */ 1371 unsigned long atomic_flags; /* Flags needing atomic access. */
1329 1372
1330 pid_t pid; 1373 pid_t pid;
@@ -1558,28 +1601,23 @@ struct task_struct {
1558 struct numa_group *numa_group; 1601 struct numa_group *numa_group;
1559 1602
1560 /* 1603 /*
1561 * Exponential decaying average of faults on a per-node basis. 1604 * numa_faults is an array split into four regions:
1562 * Scheduling placement decisions are made based on the these counts. 1605 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1563 * The values remain static for the duration of a PTE scan 1606 * in this precise order.
1607 *
1608 * faults_memory: Exponential decaying average of faults on a per-node
1609 * basis. Scheduling placement decisions are made based on these
1610 * counts. The values remain static for the duration of a PTE scan.
1611 * faults_cpu: Track the nodes the process was running on when a NUMA
1612 * hinting fault was incurred.
1613 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1614 * during the current scan window. When the scan completes, the counts
1615 * in faults_memory and faults_cpu decay and these values are copied.
1564 */ 1616 */
1565 unsigned long *numa_faults_memory; 1617 unsigned long *numa_faults;
1566 unsigned long total_numa_faults; 1618 unsigned long total_numa_faults;
1567 1619
1568 /* 1620 /*
1569 * numa_faults_buffer records faults per node during the current
1570 * scan window. When the scan completes, the counts in
1571 * numa_faults_memory decay and these values are copied.
1572 */
1573 unsigned long *numa_faults_buffer_memory;
1574
1575 /*
1576 * Track the nodes the process was running on when a NUMA hinting
1577 * fault was incurred.
1578 */
1579 unsigned long *numa_faults_cpu;
1580 unsigned long *numa_faults_buffer_cpu;
1581
1582 /*
1583 * numa_faults_locality tracks if faults recorded during the last 1621 * numa_faults_locality tracks if faults recorded during the last
1584 * scan window were remote/local. The task scan period is adapted 1622 * scan window were remote/local. The task scan period is adapted
1585 * based on the locality of the faults with different weights 1623 * based on the locality of the faults with different weights
@@ -1645,8 +1683,7 @@ struct task_struct {
1645 /* bitmask and counter of trace recursion */ 1683 /* bitmask and counter of trace recursion */
1646 unsigned long trace_recursion; 1684 unsigned long trace_recursion;
1647#endif /* CONFIG_TRACING */ 1685#endif /* CONFIG_TRACING */
1648#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ 1686#ifdef CONFIG_MEMCG
1649 unsigned int memcg_kmem_skip_account;
1650 struct memcg_oom_info { 1687 struct memcg_oom_info {
1651 struct mem_cgroup *memcg; 1688 struct mem_cgroup *memcg;
1652 gfp_t gfp_mask; 1689 gfp_t gfp_mask;
@@ -1661,6 +1698,9 @@ struct task_struct {
1661 unsigned int sequential_io; 1698 unsigned int sequential_io;
1662 unsigned int sequential_io_avg; 1699 unsigned int sequential_io_avg;
1663#endif 1700#endif
1701#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1702 unsigned long task_state_change;
1703#endif
1664}; 1704};
1665 1705
1666/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1706/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2052,6 +2092,10 @@ static inline void tsk_restore_flags(struct task_struct *task,
2052 task->flags |= orig_flags & flags; 2092 task->flags |= orig_flags & flags;
2053} 2093}
2054 2094
2095extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2096 const struct cpumask *trial);
2097extern int task_can_attach(struct task_struct *p,
2098 const struct cpumask *cs_cpus_allowed);
2055#ifdef CONFIG_SMP 2099#ifdef CONFIG_SMP
2056extern void do_set_cpus_allowed(struct task_struct *p, 2100extern void do_set_cpus_allowed(struct task_struct *p,
2057 const struct cpumask *new_mask); 2101 const struct cpumask *new_mask);
@@ -2441,6 +2485,10 @@ extern void do_group_exit(int);
2441extern int do_execve(struct filename *, 2485extern int do_execve(struct filename *,
2442 const char __user * const __user *, 2486 const char __user * const __user *,
2443 const char __user * const __user *); 2487 const char __user * const __user *);
2488extern int do_execveat(int, struct filename *,
2489 const char __user * const __user *,
2490 const char __user * const __user *,
2491 int);
2444extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 2492extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2445struct task_struct *fork_idle(int); 2493struct task_struct *fork_idle(int);
2446extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2494extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
@@ -2760,7 +2808,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
2760extern int _cond_resched(void); 2808extern int _cond_resched(void);
2761 2809
2762#define cond_resched() ({ \ 2810#define cond_resched() ({ \
2763 __might_sleep(__FILE__, __LINE__, 0); \ 2811 ___might_sleep(__FILE__, __LINE__, 0); \
2764 _cond_resched(); \ 2812 _cond_resched(); \
2765}) 2813})
2766 2814
@@ -2773,14 +2821,14 @@ extern int __cond_resched_lock(spinlock_t *lock);
2773#endif 2821#endif
2774 2822
2775#define cond_resched_lock(lock) ({ \ 2823#define cond_resched_lock(lock) ({ \
2776 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 2824 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2777 __cond_resched_lock(lock); \ 2825 __cond_resched_lock(lock); \
2778}) 2826})
2779 2827
2780extern int __cond_resched_softirq(void); 2828extern int __cond_resched_softirq(void);
2781 2829
2782#define cond_resched_softirq() ({ \ 2830#define cond_resched_softirq() ({ \
2783 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 2831 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2784 __cond_resched_softirq(); \ 2832 __cond_resched_softirq(); \
2785}) 2833})
2786 2834
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 52e0097f61f0..cf6a9daaaf6d 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -43,6 +43,21 @@ struct seq_operations {
43#define SEQ_SKIP 1 43#define SEQ_SKIP 1
44 44
45/** 45/**
46 * seq_has_overflowed - check if the buffer has overflowed
47 * @m: the seq_file handle
48 *
49 * seq_files have a buffer which may overflow. When this happens a larger
50 * buffer is reallocated and all the data will be printed again.
51 * The overflow state is true when m->count == m->size.
52 *
53 * Returns true if the buffer received more than it can hold.
54 */
55static inline bool seq_has_overflowed(struct seq_file *m)
56{
57 return m->count == m->size;
58}
59
60/**
46 * seq_get_buf - get buffer to write arbitrary data to 61 * seq_get_buf - get buffer to write arbitrary data to
47 * @m: the seq_file handle 62 * @m: the seq_file handle
48 * @bufp: the beginning of the buffer is stored here 63 * @bufp: the beginning of the buffer is stored here
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 68c097077ef0..f4aee75f00b1 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -18,8 +18,6 @@ struct shrink_control {
18 */ 18 */
19 unsigned long nr_to_scan; 19 unsigned long nr_to_scan;
20 20
21 /* shrink from these nodes */
22 nodemask_t nodes_to_scan;
23 /* current node being shrunk (for NUMA aware shrinkers) */ 21 /* current node being shrunk (for NUMA aware shrinkers) */
24 int nid; 22 int nid;
25}; 23};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a59d9343c25b..85ab7d72b54c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -20,6 +20,8 @@
20#include <linux/time.h> 20#include <linux/time.h>
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/cache.h> 22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
23 25
24#include <linux/atomic.h> 26#include <linux/atomic.h>
25#include <asm/types.h> 27#include <asm/types.h>
@@ -148,6 +150,8 @@
148struct net_device; 150struct net_device;
149struct scatterlist; 151struct scatterlist;
150struct pipe_inode_info; 152struct pipe_inode_info;
153struct iov_iter;
154struct napi_struct;
151 155
152#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 156#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
153struct nf_conntrack { 157struct nf_conntrack {
@@ -341,7 +345,6 @@ enum {
341 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ 345 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
342 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ 346 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
343 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ 347 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
344 SKB_FCLONE_FREE, /* this companion fclone skb is available */
345}; 348};
346 349
347enum { 350enum {
@@ -370,8 +373,7 @@ enum {
370 373
371 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, 374 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
372 375
373 SKB_GSO_MPLS = 1 << 12, 376 SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
374
375}; 377};
376 378
377#if BITS_PER_LONG > 32 379#if BITS_PER_LONG > 32
@@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
440 * @next: Next buffer in list 442 * @next: Next buffer in list
441 * @prev: Previous buffer in list 443 * @prev: Previous buffer in list
442 * @tstamp: Time we arrived/left 444 * @tstamp: Time we arrived/left
445 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
443 * @sk: Socket we are owned by 446 * @sk: Socket we are owned by
444 * @dev: Device we arrived on/are leaving by 447 * @dev: Device we arrived on/are leaving by
445 * @cb: Control buffer. Free for use by every layer. Put private vars here 448 * @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
504 */ 507 */
505 508
506struct sk_buff { 509struct sk_buff {
507 /* These two members must be first. */
508 struct sk_buff *next;
509 struct sk_buff *prev;
510
511 union { 510 union {
512 ktime_t tstamp; 511 struct {
513 struct skb_mstamp skb_mstamp; 512 /* These two members must be first. */
513 struct sk_buff *next;
514 struct sk_buff *prev;
515
516 union {
517 ktime_t tstamp;
518 struct skb_mstamp skb_mstamp;
519 };
520 };
521 struct rb_node rbnode; /* used in netem & tcp stack */
514 }; 522 };
515
516 struct sock *sk; 523 struct sock *sk;
517 struct net_device *dev; 524 struct net_device *dev;
518 525
@@ -557,7 +564,9 @@ struct sk_buff {
557 /* fields enclosed in headers_start/headers_end are copied 564 /* fields enclosed in headers_start/headers_end are copied
558 * using a single memcpy() in __copy_skb_header() 565 * using a single memcpy() in __copy_skb_header()
559 */ 566 */
567 /* private: */
560 __u32 headers_start[0]; 568 __u32 headers_start[0];
569 /* public: */
561 570
562/* if you move pkt_type around you also must adapt those constants */ 571/* if you move pkt_type around you also must adapt those constants */
563#ifdef __BIG_ENDIAN_BITFIELD 572#ifdef __BIG_ENDIAN_BITFIELD
@@ -595,7 +604,8 @@ struct sk_buff {
595#endif 604#endif
596 __u8 ipvs_property:1; 605 __u8 ipvs_property:1;
597 __u8 inner_protocol_type:1; 606 __u8 inner_protocol_type:1;
598 /* 4 or 6 bit hole */ 607 __u8 remcsum_offload:1;
608 /* 3 or 5 bit hole */
599 609
600#ifdef CONFIG_NET_SCHED 610#ifdef CONFIG_NET_SCHED
601 __u16 tc_index; /* traffic control index */ 611 __u16 tc_index; /* traffic control index */
@@ -642,7 +652,9 @@ struct sk_buff {
642 __u16 network_header; 652 __u16 network_header;
643 __u16 mac_header; 653 __u16 mac_header;
644 654
655 /* private: */
645 __u32 headers_end[0]; 656 __u32 headers_end[0];
657 /* public: */
646 658
647 /* These elements must be at the end, see alloc_skb() for details. */ 659 /* These elements must be at the end, see alloc_skb() for details. */
648 sk_buff_data_t tail; 660 sk_buff_data_t tail;
@@ -662,6 +674,7 @@ struct sk_buff {
662 674
663#define SKB_ALLOC_FCLONE 0x01 675#define SKB_ALLOC_FCLONE 0x01
664#define SKB_ALLOC_RX 0x02 676#define SKB_ALLOC_RX 0x02
677#define SKB_ALLOC_NAPI 0x04
665 678
666/* Returns true if the skb was allocated from PFMEMALLOC reserves */ 679/* Returns true if the skb was allocated from PFMEMALLOC reserves */
667static inline bool skb_pfmemalloc(const struct sk_buff *skb) 680static inline bool skb_pfmemalloc(const struct sk_buff *skb)
@@ -706,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
706 skb->_skb_refdst = (unsigned long)dst; 719 skb->_skb_refdst = (unsigned long)dst;
707} 720}
708 721
709void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
710 bool force);
711
712/** 722/**
713 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference 723 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
714 * @skb: buffer 724 * @skb: buffer
@@ -721,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
721 */ 731 */
722static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 732static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
723{ 733{
724 __skb_dst_set_noref(skb, dst, false); 734 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
725} 735 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
726
727/**
728 * skb_dst_set_noref_force - sets skb dst, without taking reference
729 * @skb: buffer
730 * @dst: dst entry
731 *
732 * Sets skb dst, assuming a reference was not taken on dst.
733 * No reference is taken and no dst_release will be called. While for
734 * cached dsts deferred reclaim is a basic feature, for entries that are
735 * not cached it is caller's job to guarantee that last dst_release for
736 * provided dst happens when nobody uses it, eg. after a RCU grace period.
737 */
738static inline void skb_dst_set_noref_force(struct sk_buff *skb,
739 struct dst_entry *dst)
740{
741 __skb_dst_set_noref(skb, dst, true);
742} 736}
743 737
744/** 738/**
@@ -795,15 +789,19 @@ struct sk_buff_fclones {
795 * @skb: buffer 789 * @skb: buffer
796 * 790 *
797 * Returns true is skb is a fast clone, and its clone is not freed. 791 * Returns true is skb is a fast clone, and its clone is not freed.
792 * Some drivers call skb_orphan() in their ndo_start_xmit(),
793 * so we also check that this didnt happen.
798 */ 794 */
799static inline bool skb_fclone_busy(const struct sk_buff *skb) 795static inline bool skb_fclone_busy(const struct sock *sk,
796 const struct sk_buff *skb)
800{ 797{
801 const struct sk_buff_fclones *fclones; 798 const struct sk_buff_fclones *fclones;
802 799
803 fclones = container_of(skb, struct sk_buff_fclones, skb1); 800 fclones = container_of(skb, struct sk_buff_fclones, skb1);
804 801
805 return skb->fclone == SKB_FCLONE_ORIG && 802 return skb->fclone == SKB_FCLONE_ORIG &&
806 fclones->skb2.fclone == SKB_FCLONE_CLONE; 803 atomic_read(&fclones->fclone_ref) > 1 &&
804 fclones->skb2.sk == sk;
807} 805}
808 806
809static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 807static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
@@ -2168,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2168 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 2166 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2169} 2167}
2170 2168
2169void *napi_alloc_frag(unsigned int fragsz);
2170struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2171 unsigned int length, gfp_t gfp_mask);
2172static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2173 unsigned int length)
2174{
2175 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2176}
2177
2171/** 2178/**
2172 * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data 2179 * __dev_alloc_pages - allocate page for network Rx
2173 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX 2180 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2174 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used 2181 * @order: size of the allocation
2175 * @order: size of the allocation
2176 * 2182 *
2177 * Allocate a new page. 2183 * Allocate a new page.
2178 * 2184 *
2179 * %NULL is returned if there is no free memory. 2185 * %NULL is returned if there is no free memory.
2180*/ 2186*/
2181static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, 2187static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2182 struct sk_buff *skb, 2188 unsigned int order)
2183 unsigned int order) 2189{
2184{ 2190 /* This piece of code contains several assumptions.
2185 struct page *page; 2191 * 1. This is for device Rx, therefor a cold page is preferred.
2186 2192 * 2. The expectation is the user wants a compound page.
2187 gfp_mask |= __GFP_COLD; 2193 * 3. If requesting a order 0 page it will not be compound
2188 2194 * due to the check to see if order has a value in prep_new_page
2189 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2195 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2190 gfp_mask |= __GFP_MEMALLOC; 2196 * code in gfp_to_alloc_flags that should be enforcing this.
2197 */
2198 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2191 2199
2192 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 2200 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2193 if (skb && page && page->pfmemalloc) 2201}
2194 skb->pfmemalloc = true;
2195 2202
2196 return page; 2203static inline struct page *dev_alloc_pages(unsigned int order)
2204{
2205 return __dev_alloc_pages(GFP_ATOMIC, order);
2197} 2206}
2198 2207
2199/** 2208/**
2200 * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data 2209 * __dev_alloc_page - allocate a page for network Rx
2201 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX 2210 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2202 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
2203 * 2211 *
2204 * Allocate a new page. 2212 * Allocate a new page.
2205 * 2213 *
2206 * %NULL is returned if there is no free memory. 2214 * %NULL is returned if there is no free memory.
2207 */ 2215 */
2208static inline struct page *__skb_alloc_page(gfp_t gfp_mask, 2216static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2209 struct sk_buff *skb) 2217{
2218 return __dev_alloc_pages(gfp_mask, 0);
2219}
2220
2221static inline struct page *dev_alloc_page(void)
2210{ 2222{
2211 return __skb_alloc_pages(gfp_mask, skb, 0); 2223 return __dev_alloc_page(GFP_ATOMIC);
2212} 2224}
2213 2225
2214/** 2226/**
@@ -2440,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2440 * is untouched. Otherwise it is extended. Returns zero on 2452 * is untouched. Otherwise it is extended. Returns zero on
2441 * success. The skb is freed on error. 2453 * success. The skb is freed on error.
2442 */ 2454 */
2443
2444static inline int skb_padto(struct sk_buff *skb, unsigned int len) 2455static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2445{ 2456{
2446 unsigned int size = skb->len; 2457 unsigned int size = skb->len;
@@ -2449,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2449 return skb_pad(skb, len - size); 2460 return skb_pad(skb, len - size);
2450} 2461}
2451 2462
2463/**
2464 * skb_put_padto - increase size and pad an skbuff up to a minimal size
2465 * @skb: buffer to pad
2466 * @len: minimal length
2467 *
2468 * Pads up a buffer to ensure the trailing bytes exist and are
2469 * blanked. If the buffer already contains sufficient data it
2470 * is untouched. Otherwise it is extended. Returns zero on
2471 * success. The skb is freed on error.
2472 */
2473static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2474{
2475 unsigned int size = skb->len;
2476
2477 if (unlikely(size < len)) {
2478 len -= size;
2479 if (skb_pad(skb, len))
2480 return -ENOMEM;
2481 __skb_put(skb, len);
2482 }
2483 return 0;
2484}
2485
2452static inline int skb_add_data(struct sk_buff *skb, 2486static inline int skb_add_data(struct sk_buff *skb,
2453 char __user *from, int copy) 2487 char __user *from, int copy)
2454{ 2488{
@@ -2621,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2621 int *err); 2655 int *err);
2622unsigned int datagram_poll(struct file *file, struct socket *sock, 2656unsigned int datagram_poll(struct file *file, struct socket *sock,
2623 struct poll_table_struct *wait); 2657 struct poll_table_struct *wait);
2624int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, 2658int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
2625 struct iovec *to, int size); 2659 struct iov_iter *to, int size);
2626int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, 2660static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
2627 struct iovec *iov); 2661 struct msghdr *msg, int size)
2628int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, 2662{
2629 const struct iovec *from, int from_offset, 2663 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
2630 int len); 2664}
2631int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, 2665int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
2632 int offset, size_t count); 2666 struct msghdr *msg);
2633int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, 2667int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2634 const struct iovec *to, int to_offset, 2668 struct iov_iter *from, int len);
2635 int size); 2669int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2636void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2670void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2637void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); 2671void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2638int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); 2672int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
@@ -2653,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2653unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); 2687unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2654struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 2688struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2655struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 2689struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
2690int skb_ensure_writable(struct sk_buff *skb, int write_len);
2691int skb_vlan_pop(struct sk_buff *skb);
2692int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
2693
2694static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
2695{
2696 /* XXX: stripping const */
2697 return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
2698}
2699
2700static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
2701{
2702 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2703}
2656 2704
2657struct skb_checksum_ops { 2705struct skb_checksum_ops {
2658 __wsum (*update)(const void *mem, int len, __wsum wsum); 2706 __wsum (*update)(const void *mem, int len, __wsum wsum);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c265bec6a57d..9a139b637069 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -493,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
493 * @memcg: pointer to the memcg this cache belongs to 493 * @memcg: pointer to the memcg this cache belongs to
494 * @list: list_head for the list of all caches in this memcg 494 * @list: list_head for the list of all caches in this memcg
495 * @root_cache: pointer to the global, root cache, this cache was derived from 495 * @root_cache: pointer to the global, root cache, this cache was derived from
496 * @nr_pages: number of pages that belongs to this cache.
497 */ 496 */
498struct memcg_cache_params { 497struct memcg_cache_params {
499 bool is_root_cache; 498 bool is_root_cache;
@@ -506,17 +505,12 @@ struct memcg_cache_params {
506 struct mem_cgroup *memcg; 505 struct mem_cgroup *memcg;
507 struct list_head list; 506 struct list_head list;
508 struct kmem_cache *root_cache; 507 struct kmem_cache *root_cache;
509 atomic_t nr_pages;
510 }; 508 };
511 }; 509 };
512}; 510};
513 511
514int memcg_update_all_caches(int num_memcgs); 512int memcg_update_all_caches(int num_memcgs);
515 513
516struct seq_file;
517int cache_show(struct kmem_cache *s, struct seq_file *m);
518void print_slabinfo_header(struct seq_file *m);
519
520/** 514/**
521 * kmalloc_array - allocate memory for an array. 515 * kmalloc_array - allocate memory for an array.
522 * @n: number of elements. 516 * @n: number of elements.
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ec538fc287a6..6e49a14365dc 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -47,16 +47,25 @@ struct linger {
47struct msghdr { 47struct msghdr {
48 void *msg_name; /* ptr to socket address structure */ 48 void *msg_name; /* ptr to socket address structure */
49 int msg_namelen; /* size of socket address structure */ 49 int msg_namelen; /* size of socket address structure */
50 struct iovec *msg_iov; /* scatter/gather array */ 50 struct iov_iter msg_iter; /* data */
51 __kernel_size_t msg_iovlen; /* # elements in msg_iov */
52 void *msg_control; /* ancillary data */ 51 void *msg_control; /* ancillary data */
53 __kernel_size_t msg_controllen; /* ancillary data buffer length */ 52 __kernel_size_t msg_controllen; /* ancillary data buffer length */
54 unsigned int msg_flags; /* flags on received message */ 53 unsigned int msg_flags; /* flags on received message */
55}; 54};
55
56struct user_msghdr {
57 void __user *msg_name; /* ptr to socket address structure */
58 int msg_namelen; /* size of socket address structure */
59 struct iovec __user *msg_iov; /* scatter/gather array */
60 __kernel_size_t msg_iovlen; /* # elements in msg_iov */
61 void __user *msg_control; /* ancillary data */
62 __kernel_size_t msg_controllen; /* ancillary data buffer length */
63 unsigned int msg_flags; /* flags on received message */
64};
56 65
57/* For recvmmsg/sendmmsg */ 66/* For recvmmsg/sendmmsg */
58struct mmsghdr { 67struct mmsghdr {
59 struct msghdr msg_hdr; 68 struct user_msghdr msg_hdr;
60 unsigned int msg_len; 69 unsigned int msg_len;
61}; 70};
62 71
@@ -94,6 +103,10 @@ struct cmsghdr {
94 (cmsg)->cmsg_len <= (unsigned long) \ 103 (cmsg)->cmsg_len <= (unsigned long) \
95 ((mhdr)->msg_controllen - \ 104 ((mhdr)->msg_controllen - \
96 ((char *)(cmsg) - (char *)(mhdr)->msg_control))) 105 ((char *)(cmsg) - (char *)(mhdr)->msg_control)))
106#define for_each_cmsghdr(cmsg, msg) \
107 for (cmsg = CMSG_FIRSTHDR(msg); \
108 cmsg; \
109 cmsg = CMSG_NXTHDR(msg, cmsg))
97 110
98/* 111/*
99 * Get the next cmsg header 112 * Get the next cmsg header
@@ -256,7 +269,7 @@ struct ucred {
256#define MSG_EOF MSG_FIN 269#define MSG_EOF MSG_FIN
257 270
258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 271#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file 272#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
260 descriptor received through 273 descriptor received through
261 SCM_RIGHTS */ 274 SCM_RIGHTS */
262#if defined(CONFIG_COMPAT) 275#if defined(CONFIG_COMPAT)
@@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
312extern unsigned long iov_pages(const struct iovec *iov, int offset, 325extern unsigned long iov_pages(const struct iovec *iov, int offset,
313 unsigned long nr_segs); 326 unsigned long nr_segs);
314 327
315extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
316extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 328extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
317extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 329extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
318 330
319struct timespec; 331struct timespec;
320 332
321/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ 333/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
322extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); 334extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
323extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); 335extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
324extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 336extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
325 unsigned int flags, struct timespec *timeout); 337 unsigned int flags, struct timespec *timeout);
326extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, 338extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 46d188a9947c..a6ef2a8e6de4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi)
1049extern const struct spi_device_id * 1049extern const struct spi_device_id *
1050spi_get_device_id(const struct spi_device *sdev); 1050spi_get_device_id(const struct spi_device *sdev);
1051 1051
1052static inline bool
1053spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer)
1054{
1055 return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers);
1056}
1057
1052#endif /* __LINUX_SPI_H */ 1058#endif /* __LINUX_SPI_H */
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 115b570e3bff..669045ab73f3 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -1,6 +1,8 @@
1#ifndef __LINUX_STACKTRACE_H 1#ifndef __LINUX_STACKTRACE_H
2#define __LINUX_STACKTRACE_H 2#define __LINUX_STACKTRACE_H
3 3
4#include <linux/types.h>
5
4struct task_struct; 6struct task_struct;
5struct pt_regs; 7struct pt_regs;
6 8
@@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
20 struct stack_trace *trace); 22 struct stack_trace *trace);
21 23
22extern void print_stack_trace(struct stack_trace *trace, int spaces); 24extern void print_stack_trace(struct stack_trace *trace, int spaces);
25extern int snprint_stack_trace(char *buf, size_t size,
26 struct stack_trace *trace, int spaces);
23 27
24#ifdef CONFIG_USER_STACKTRACE_SUPPORT 28#ifdef CONFIG_USER_STACKTRACE_SUPPORT
25extern void save_stack_trace_user(struct stack_trace *trace); 29extern void save_stack_trace_user(struct stack_trace *trace);
@@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace);
32# define save_stack_trace_tsk(tsk, trace) do { } while (0) 36# define save_stack_trace_tsk(tsk, trace) do { } while (0)
33# define save_stack_trace_user(trace) do { } while (0) 37# define save_stack_trace_user(trace) do { } while (0)
34# define print_stack_trace(trace, spaces) do { } while (0) 38# define print_stack_trace(trace, spaces) do { } while (0)
39# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
35#endif 40#endif
36 41
37#endif 42#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index e6edfe51575a..2e22a2e58f3a 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
132#endif 132#endif
133 133
134extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 134extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
135 const void *from, size_t available); 135 const void *from, size_t available);
136 136
137/** 137/**
138 * strstarts - does @str start with @prefix? 138 * strstarts - does @str start with @prefix?
@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
144 return strncmp(str, prefix, strlen(prefix)) == 0; 144 return strncmp(str, prefix, strlen(prefix)) == 0;
145} 145}
146 146
147extern size_t memweight(const void *ptr, size_t bytes); 147size_t memweight(const void *ptr, size_t bytes);
148void memzero_explicit(void *s, size_t count);
148 149
149/** 150/**
150 * kbasename - return the last part of a pathname. 151 * kbasename - return the last part of a pathname.
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 8e030075fe79..a7cbb570cc5c 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -53,7 +53,7 @@ struct rpc_cred {
53 struct rcu_head cr_rcu; 53 struct rcu_head cr_rcu;
54 struct rpc_auth * cr_auth; 54 struct rpc_auth * cr_auth;
55 const struct rpc_credops *cr_ops; 55 const struct rpc_credops *cr_ops;
56#ifdef RPC_DEBUG 56#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57 unsigned long cr_magic; /* 0x0f4aa4f0 */ 57 unsigned long cr_magic; /* 0x0f4aa4f0 */
58#endif 58#endif
59 unsigned long cr_expire; /* when to gc */ 59 unsigned long cr_expire; /* when to gc */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 70736b98c721..d86acc63b25f 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -63,6 +63,9 @@ struct rpc_clnt {
63 struct rpc_rtt cl_rtt_default; 63 struct rpc_rtt cl_rtt_default;
64 struct rpc_timeout cl_timeout_default; 64 struct rpc_timeout cl_timeout_default;
65 const struct rpc_program *cl_program; 65 const struct rpc_program *cl_program;
66#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
67 struct dentry *cl_debugfs; /* debugfs directory */
68#endif
66}; 69};
67 70
68/* 71/*
@@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
176const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); 179const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
177int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); 180int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
178 181
182const char *rpc_proc_name(const struct rpc_task *task);
179#endif /* __KERNEL__ */ 183#endif /* __KERNEL__ */
180#endif /* _LINUX_SUNRPC_CLNT_H */ 184#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 9385bd74c860..c57d8ea0716c 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -10,22 +10,10 @@
10 10
11#include <uapi/linux/sunrpc/debug.h> 11#include <uapi/linux/sunrpc/debug.h>
12 12
13
14/*
15 * Enable RPC debugging/profiling.
16 */
17#ifdef CONFIG_SUNRPC_DEBUG
18#define RPC_DEBUG
19#endif
20#ifdef CONFIG_TRACEPOINTS
21#define RPC_TRACEPOINTS
22#endif
23/* #define RPC_PROFILE */
24
25/* 13/*
26 * Debugging macros etc 14 * Debugging macros etc
27 */ 15 */
28#ifdef RPC_DEBUG 16#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
29extern unsigned int rpc_debug; 17extern unsigned int rpc_debug;
30extern unsigned int nfs_debug; 18extern unsigned int nfs_debug;
31extern unsigned int nfsd_debug; 19extern unsigned int nfsd_debug;
@@ -36,7 +24,7 @@ extern unsigned int nlm_debug;
36#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) 24#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args)
37 25
38#undef ifdebug 26#undef ifdebug
39#ifdef RPC_DEBUG 27#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
40# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) 28# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac))
41 29
42# define dfprintk(fac, args...) \ 30# define dfprintk(fac, args...) \
@@ -65,9 +53,55 @@ extern unsigned int nlm_debug;
65/* 53/*
66 * Sysctl interface for RPC debugging 54 * Sysctl interface for RPC debugging
67 */ 55 */
68#ifdef RPC_DEBUG 56
57struct rpc_clnt;
58struct rpc_xprt;
59
60#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
69void rpc_register_sysctl(void); 61void rpc_register_sysctl(void);
70void rpc_unregister_sysctl(void); 62void rpc_unregister_sysctl(void);
63int sunrpc_debugfs_init(void);
64void sunrpc_debugfs_exit(void);
65int rpc_clnt_debugfs_register(struct rpc_clnt *);
66void rpc_clnt_debugfs_unregister(struct rpc_clnt *);
67int rpc_xprt_debugfs_register(struct rpc_xprt *);
68void rpc_xprt_debugfs_unregister(struct rpc_xprt *);
69#else
70static inline int
71sunrpc_debugfs_init(void)
72{
73 return 0;
74}
75
76static inline void
77sunrpc_debugfs_exit(void)
78{
79 return;
80}
81
82static inline int
83rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
84{
85 return 0;
86}
87
88static inline void
89rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
90{
91 return;
92}
93
94static inline int
95rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
96{
97 return 0;
98}
99
100static inline void
101rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt)
102{
103 return;
104}
71#endif 105#endif
72 106
73#endif /* _LINUX_SUNRPC_DEBUG_H_ */ 107#endif /* _LINUX_SUNRPC_DEBUG_H_ */
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 1565bbe86d51..eecb5a71e6c0 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -27,10 +27,13 @@
27 27
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/spinlock.h>
30 31
31#define RPC_IOSTATS_VERS "1.0" 32#define RPC_IOSTATS_VERS "1.0"
32 33
33struct rpc_iostats { 34struct rpc_iostats {
35 spinlock_t om_lock;
36
34 /* 37 /*
35 * These counters give an idea about how many request 38 * These counters give an idea about how many request
36 * transmissions are required, on average, to complete that 39 * transmissions are required, on average, to complete that
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 1a8959944c5f..5f1e6bd4c316 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -79,7 +79,7 @@ struct rpc_task {
79 unsigned short tk_flags; /* misc flags */ 79 unsigned short tk_flags; /* misc flags */
80 unsigned short tk_timeouts; /* maj timeouts */ 80 unsigned short tk_timeouts; /* maj timeouts */
81 81
82#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) 82#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
83 unsigned short tk_pid; /* debugging aid */ 83 unsigned short tk_pid; /* debugging aid */
84#endif 84#endif
85 unsigned char tk_priority : 2,/* Task priority */ 85 unsigned char tk_priority : 2,/* Task priority */
@@ -187,7 +187,7 @@ struct rpc_wait_queue {
187 unsigned char nr; /* # tasks remaining for cookie */ 187 unsigned char nr; /* # tasks remaining for cookie */
188 unsigned short qlen; /* total # tasks waiting in queue */ 188 unsigned short qlen; /* total # tasks waiting in queue */
189 struct rpc_timer timer_list; 189 struct rpc_timer timer_list;
190#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) 190#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
191 const char * name; 191 const char * name;
192#endif 192#endif
193}; 193};
@@ -237,7 +237,7 @@ void rpc_free(void *);
237int rpciod_up(void); 237int rpciod_up(void);
238void rpciod_down(void); 238void rpciod_down(void);
239int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); 239int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
240#ifdef RPC_DEBUG 240#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
241struct net; 241struct net;
242void rpc_show_tasks(struct net *); 242void rpc_show_tasks(struct net *);
243#endif 243#endif
@@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task)
251 return __rpc_wait_for_completion_task(task, NULL); 251 return __rpc_wait_for_completion_task(task, NULL);
252} 252}
253 253
254#if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) 254#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
255static inline const char * rpc_qname(const struct rpc_wait_queue *q) 255static inline const char * rpc_qname(const struct rpc_wait_queue *q)
256{ 256{
257 return ((q && q->name) ? q->name : "unknown"); 257 return ((q && q->name) ? q->name : "unknown");
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index cf391eef2e6d..9d27ac45b909 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -239,6 +239,9 @@ struct rpc_xprt {
239 struct net *xprt_net; 239 struct net *xprt_net;
240 const char *servername; 240 const char *servername;
241 const char *address_strings[RPC_DISPLAY_MAX]; 241 const char *address_strings[RPC_DISPLAY_MAX];
242#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
243 struct dentry *debugfs; /* debugfs directory */
244#endif
242}; 245};
243 246
244#if defined(CONFIG_SUNRPC_BACKCHANNEL) 247#if defined(CONFIG_SUNRPC_BACKCHANNEL)
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 1ad36cc25b2e..7591788e9fbf 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -17,6 +17,65 @@ void cleanup_socket_xprt(void);
17#define RPC_DEF_MIN_RESVPORT (665U) 17#define RPC_DEF_MIN_RESVPORT (665U)
18#define RPC_DEF_MAX_RESVPORT (1023U) 18#define RPC_DEF_MAX_RESVPORT (1023U)
19 19
20struct sock_xprt {
21 struct rpc_xprt xprt;
22
23 /*
24 * Network layer
25 */
26 struct socket * sock;
27 struct sock * inet;
28
29 /*
30 * State of TCP reply receive
31 */
32 __be32 tcp_fraghdr,
33 tcp_xid,
34 tcp_calldir;
35
36 u32 tcp_offset,
37 tcp_reclen;
38
39 unsigned long tcp_copied,
40 tcp_flags;
41
42 /*
43 * Connection of transports
44 */
45 struct delayed_work connect_worker;
46 struct sockaddr_storage srcaddr;
47 unsigned short srcport;
48
49 /*
50 * UDP socket buffer size parameters
51 */
52 size_t rcvsize,
53 sndsize;
54
55 /*
56 * Saved socket callback addresses
57 */
58 void (*old_data_ready)(struct sock *);
59 void (*old_state_change)(struct sock *);
60 void (*old_write_space)(struct sock *);
61 void (*old_error_report)(struct sock *);
62};
63
64/*
65 * TCP receive state flags
66 */
67#define TCP_RCV_LAST_FRAG (1UL << 0)
68#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
69#define TCP_RCV_COPY_XID (1UL << 2)
70#define TCP_RCV_COPY_DATA (1UL << 3)
71#define TCP_RCV_READ_CALLDIR (1UL << 4)
72#define TCP_RCV_COPY_CALLDIR (1UL << 5)
73
74/*
75 * TCP RPC flags
76 */
77#define TCP_RPC_REPLY (1UL << 6)
78
20#endif /* __KERNEL__ */ 79#endif /* __KERNEL__ */
21 80
22#endif /* _LINUX_SUNRPC_XPRTSOCK_H */ 81#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 37a585beef5c..34e8b60ab973 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -102,14 +102,6 @@ union swap_header {
102 } info; 102 } info;
103}; 103};
104 104
105 /* A swap entry has to fit into a "unsigned long", as
106 * the entry is hidden in the "index" field of the
107 * swapper address space.
108 */
109typedef struct {
110 unsigned long val;
111} swp_entry_t;
112
113/* 105/*
114 * current->reclaim_state points to one of these when a task is running 106 * current->reclaim_state points to one of these when a task is running
115 * memory reclaim 107 * memory reclaim
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
new file mode 100644
index 000000000000..145306bdc92f
--- /dev/null
+++ b/include/linux/swap_cgroup.h
@@ -0,0 +1,42 @@
1#ifndef __LINUX_SWAP_CGROUP_H
2#define __LINUX_SWAP_CGROUP_H
3
4#include <linux/swap.h>
5
6#ifdef CONFIG_MEMCG_SWAP
7
8extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
9 unsigned short old, unsigned short new);
10extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
11extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
12extern int swap_cgroup_swapon(int type, unsigned long max_pages);
13extern void swap_cgroup_swapoff(int type);
14
15#else
16
17static inline
18unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
19{
20 return 0;
21}
22
23static inline
24unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
25{
26 return 0;
27}
28
29static inline int
30swap_cgroup_swapon(int type, unsigned long max_pages)
31{
32 return 0;
33}
34
35static inline void swap_cgroup_swapoff(int type)
36{
37 return;
38}
39
40#endif /* CONFIG_MEMCG_SWAP */
41
42#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bda9b81357cc..85893d744901 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -25,7 +25,7 @@ struct linux_dirent64;
25struct list_head; 25struct list_head;
26struct mmap_arg_struct; 26struct mmap_arg_struct;
27struct msgbuf; 27struct msgbuf;
28struct msghdr; 28struct user_msghdr;
29struct mmsghdr; 29struct mmsghdr;
30struct msqid_ds; 30struct msqid_ds;
31struct new_utsname; 31struct new_utsname;
@@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
601asmlinkage long sys_send(int, void __user *, size_t, unsigned); 601asmlinkage long sys_send(int, void __user *, size_t, unsigned);
602asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, 602asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
603 struct sockaddr __user *, int); 603 struct sockaddr __user *, int);
604asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); 604asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
605asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, 605asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
606 unsigned int vlen, unsigned flags); 606 unsigned int vlen, unsigned flags);
607asmlinkage long sys_recv(int, void __user *, size_t, unsigned); 607asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
608asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, 608asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned,
609 struct sockaddr __user *, int __user *); 609 struct sockaddr __user *, int __user *);
610asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); 610asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
611asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, 611asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
612 unsigned int vlen, unsigned flags, 612 unsigned int vlen, unsigned flags,
613 struct timespec __user *timeout); 613 struct timespec __user *timeout);
@@ -877,4 +877,9 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
877asmlinkage long sys_getrandom(char __user *buf, size_t count, 877asmlinkage long sys_getrandom(char __user *buf, size_t count,
878 unsigned int flags); 878 unsigned int flags);
879asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 879asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
880
881asmlinkage long sys_execveat(int dfd, const char __user *filename,
882 const char __user *const __user *argv,
883 const char __user *const __user *envp, int flags);
884
880#endif 885#endif
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 98a3153c0f96..4b7b875a7ce1 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -49,4 +49,13 @@
49 49
50int do_syslog(int type, char __user *buf, int count, bool from_file); 50int do_syslog(int type, char __user *buf, int count, bool from_file);
51 51
52#ifdef CONFIG_PRINTK
53int check_syslog_permissions(int type, bool from_file);
54#else
55static inline int check_syslog_permissions(int type, bool from_file)
56{
57 return 0;
58}
59#endif
60
52#endif /* _LINUX_SYSLOG_H */ 61#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index c2dee7deefa8..67309ece0772 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -130,7 +130,7 @@ struct tcp_sock {
130 /* inet_connection_sock has to be the first member of tcp_sock */ 130 /* inet_connection_sock has to be the first member of tcp_sock */
131 struct inet_connection_sock inet_conn; 131 struct inet_connection_sock inet_conn;
132 u16 tcp_header_len; /* Bytes of tcp header to send */ 132 u16 tcp_header_len; /* Bytes of tcp header to send */
133 u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ 133 u16 gso_segs; /* Max number of segs per GSO packet */
134 134
135/* 135/*
136 * Header prediction flags 136 * Header prediction flags
@@ -162,7 +162,7 @@ struct tcp_sock {
162 struct { 162 struct {
163 struct sk_buff_head prequeue; 163 struct sk_buff_head prequeue;
164 struct task_struct *task; 164 struct task_struct *task;
165 struct iovec *iov; 165 struct msghdr *msg;
166 int memory; 166 int memory;
167 int len; 167 int len;
168 } ucopy; 168 } ucopy;
@@ -204,10 +204,10 @@ struct tcp_sock {
204 204
205 u16 urg_data; /* Saved octet of OOB data and control flags */ 205 u16 urg_data; /* Saved octet of OOB data and control flags */
206 u8 ecn_flags; /* ECN status bits. */ 206 u8 ecn_flags; /* ECN status bits. */
207 u8 reordering; /* Packet reordering metric. */ 207 u8 keepalive_probes; /* num of allowed keep alive probes */
208 u32 reordering; /* Packet reordering metric. */
208 u32 snd_up; /* Urgent pointer */ 209 u32 snd_up; /* Urgent pointer */
209 210
210 u8 keepalive_probes; /* num of allowed keep alive probes */
211/* 211/*
212 * Options received (usually on last packet, some only on SYN packets). 212 * Options received (usually on last packet, some only on SYN packets).
213 */ 213 */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 0305cde21a74..ef90838b36a0 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -44,6 +44,10 @@
44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ 44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10) 45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
46#define CELSIUS_TO_KELVIN(t) ((t)*10+2732) 46#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
47#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
48#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
49#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
50#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
47 51
48/* Adding event notification support elements */ 52/* Adding event notification support elements */
49#define THERMAL_GENL_FAMILY_NAME "thermal_event" 53#define THERMAL_GENL_FAMILY_NAME "thermal_event"
diff --git a/include/linux/time.h b/include/linux/time.h
index 8c42cf8d2444..203c2ad40d71 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -39,9 +39,20 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva
39 return lhs->tv_usec - rhs->tv_usec; 39 return lhs->tv_usec - rhs->tv_usec;
40} 40}
41 41
42extern unsigned long mktime(const unsigned int year, const unsigned int mon, 42extern time64_t mktime64(const unsigned int year, const unsigned int mon,
43 const unsigned int day, const unsigned int hour, 43 const unsigned int day, const unsigned int hour,
44 const unsigned int min, const unsigned int sec); 44 const unsigned int min, const unsigned int sec);
45
46/**
47 * Deprecated. Use mktime64().
48 */
49static inline unsigned long mktime(const unsigned int year,
50 const unsigned int mon, const unsigned int day,
51 const unsigned int hour, const unsigned int min,
52 const unsigned int sec)
53{
54 return mktime64(year, mon, day, hour, min, sec);
55}
45 56
46extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); 57extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
47 58
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 95640dcd1899..05af9a334893 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -42,6 +42,7 @@ struct tk_read_base {
42 * struct timekeeper - Structure holding internal timekeeping values. 42 * struct timekeeper - Structure holding internal timekeeping values.
43 * @tkr: The readout base structure 43 * @tkr: The readout base structure
44 * @xtime_sec: Current CLOCK_REALTIME time in seconds 44 * @xtime_sec: Current CLOCK_REALTIME time in seconds
45 * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
45 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset 46 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
46 * @offs_real: Offset clock monotonic -> clock realtime 47 * @offs_real: Offset clock monotonic -> clock realtime
47 * @offs_boot: Offset clock monotonic -> clock boottime 48 * @offs_boot: Offset clock monotonic -> clock boottime
@@ -77,6 +78,7 @@ struct tk_read_base {
77struct timekeeper { 78struct timekeeper {
78 struct tk_read_base tkr; 79 struct tk_read_base tkr;
79 u64 xtime_sec; 80 u64 xtime_sec;
81 unsigned long ktime_sec;
80 struct timespec64 wall_to_monotonic; 82 struct timespec64 wall_to_monotonic;
81 ktime_t offs_real; 83 ktime_t offs_real;
82 ktime_t offs_boot; 84 ktime_t offs_boot;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 1caa6b04fdc5..9b63d13ba82b 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -10,7 +10,7 @@ extern int timekeeping_suspended;
10 * Get and set timeofday 10 * Get and set timeofday
11 */ 11 */
12extern void do_gettimeofday(struct timeval *tv); 12extern void do_gettimeofday(struct timeval *tv);
13extern int do_settimeofday(const struct timespec *tv); 13extern int do_settimeofday64(const struct timespec64 *ts);
14extern int do_sys_settimeofday(const struct timespec *tv, 14extern int do_sys_settimeofday(const struct timespec *tv,
15 const struct timezone *tz); 15 const struct timezone *tz);
16 16
@@ -25,14 +25,24 @@ struct timespec __current_kernel_time(void);
25/* 25/*
26 * timespec based interfaces 26 * timespec based interfaces
27 */ 27 */
28struct timespec get_monotonic_coarse(void); 28struct timespec64 get_monotonic_coarse64(void);
29extern void getrawmonotonic(struct timespec *ts); 29extern void getrawmonotonic64(struct timespec64 *ts);
30extern void ktime_get_ts64(struct timespec64 *ts); 30extern void ktime_get_ts64(struct timespec64 *ts);
31extern time64_t ktime_get_seconds(void);
32extern time64_t ktime_get_real_seconds(void);
31 33
32extern int __getnstimeofday64(struct timespec64 *tv); 34extern int __getnstimeofday64(struct timespec64 *tv);
33extern void getnstimeofday64(struct timespec64 *tv); 35extern void getnstimeofday64(struct timespec64 *tv);
34 36
35#if BITS_PER_LONG == 64 37#if BITS_PER_LONG == 64
38/**
39 * Deprecated. Use do_settimeofday64().
40 */
41static inline int do_settimeofday(const struct timespec *ts)
42{
43 return do_settimeofday64(ts);
44}
45
36static inline int __getnstimeofday(struct timespec *ts) 46static inline int __getnstimeofday(struct timespec *ts)
37{ 47{
38 return __getnstimeofday64(ts); 48 return __getnstimeofday64(ts);
@@ -53,7 +63,27 @@ static inline void ktime_get_real_ts(struct timespec *ts)
53 getnstimeofday64(ts); 63 getnstimeofday64(ts);
54} 64}
55 65
66static inline void getrawmonotonic(struct timespec *ts)
67{
68 getrawmonotonic64(ts);
69}
70
71static inline struct timespec get_monotonic_coarse(void)
72{
73 return get_monotonic_coarse64();
74}
56#else 75#else
76/**
77 * Deprecated. Use do_settimeofday64().
78 */
79static inline int do_settimeofday(const struct timespec *ts)
80{
81 struct timespec64 ts64;
82
83 ts64 = timespec_to_timespec64(*ts);
84 return do_settimeofday64(&ts64);
85}
86
57static inline int __getnstimeofday(struct timespec *ts) 87static inline int __getnstimeofday(struct timespec *ts)
58{ 88{
59 struct timespec64 ts64; 89 struct timespec64 ts64;
@@ -86,6 +116,19 @@ static inline void ktime_get_real_ts(struct timespec *ts)
86 getnstimeofday64(&ts64); 116 getnstimeofday64(&ts64);
87 *ts = timespec64_to_timespec(ts64); 117 *ts = timespec64_to_timespec(ts64);
88} 118}
119
120static inline void getrawmonotonic(struct timespec *ts)
121{
122 struct timespec64 ts64;
123
124 getrawmonotonic64(&ts64);
125 *ts = timespec64_to_timespec(ts64);
126}
127
128static inline struct timespec get_monotonic_coarse(void)
129{
130 return timespec64_to_timespec(get_monotonic_coarse64());
131}
89#endif 132#endif
90 133
91extern void getboottime(struct timespec *ts); 134extern void getboottime(struct timespec *ts);
@@ -182,7 +225,7 @@ static inline void timekeeping_clocktai(struct timespec *ts)
182/* 225/*
183 * RTC specific 226 * RTC specific
184 */ 227 */
185extern void timekeeping_inject_sleeptime(struct timespec *delta); 228extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
186 229
187/* 230/*
188 * PPS accessor 231 * PPS accessor
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9b1581414cd4..a41e252396c0 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -31,6 +31,7 @@ struct iov_iter {
31 size_t count; 31 size_t count;
32 union { 32 union {
33 const struct iovec *iov; 33 const struct iovec *iov;
34 const struct kvec *kvec;
34 const struct bio_vec *bvec; 35 const struct bio_vec *bvec;
35 }; 36 };
36 unsigned long nr_segs; 37 unsigned long nr_segs;
@@ -82,10 +83,13 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
82 struct iov_iter *i); 83 struct iov_iter *i);
83size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); 84size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i);
84size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 85size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
86size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
85size_t iov_iter_zero(size_t bytes, struct iov_iter *); 87size_t iov_iter_zero(size_t bytes, struct iov_iter *);
86unsigned long iov_iter_alignment(const struct iov_iter *i); 88unsigned long iov_iter_alignment(const struct iov_iter *i);
87void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, 89void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
88 unsigned long nr_segs, size_t count); 90 unsigned long nr_segs, size_t count);
91void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov,
92 unsigned long nr_segs, size_t count);
89ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 93ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
90 size_t maxsize, unsigned maxpages, size_t *start); 94 size_t maxsize, unsigned maxpages, size_t *start);
91ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 95ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -123,9 +127,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
123{ 127{
124 i->count = count; 128 i->count = count;
125} 129}
130size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
131size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
126 132
127int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); 133int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
128int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
129int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, 134int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
130 int offset, int len); 135 int offset, int len);
131int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 136int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 1ad4724458de..baa81718d985 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -63,7 +63,17 @@ struct uio_port {
63 63
64#define MAX_UIO_PORT_REGIONS 5 64#define MAX_UIO_PORT_REGIONS 5
65 65
66struct uio_device; 66struct uio_device {
67 struct module *owner;
68 struct device *dev;
69 int minor;
70 atomic_t event;
71 struct fasync_struct *async_queue;
72 wait_queue_head_t wait;
73 struct uio_info *info;
74 struct kobject *map_dir;
75 struct kobject *portio_dir;
76};
67 77
68/** 78/**
69 * struct uio_info - UIO device capabilities 79 * struct uio_info - UIO device capabilities
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 4f844c6b03ee..60beb5dc7977 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -98,11 +98,11 @@ struct uprobes_state {
98 struct xol_area *xol_area; 98 struct xol_area *xol_area;
99}; 99};
100 100
101extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 101extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
102extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 102extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
103extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); 103extern bool is_swbp_insn(uprobe_opcode_t *insn);
104extern bool __weak is_trap_insn(uprobe_opcode_t *insn); 104extern bool is_trap_insn(uprobe_opcode_t *insn);
105extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); 105extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
106extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); 106extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
107extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); 107extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
108extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 108extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); 130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
131extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); 131extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
132extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 132extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
133 void *src, unsigned long len); 133 void *src, unsigned long len);
134#else /* !CONFIG_UPROBES */ 134#else /* !CONFIG_UPROBES */
135struct uprobes_state { 135struct uprobes_state {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 447a7e2fc19b..f89c24a03bd9 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
637#endif 637#endif
638 638
639/* USB autosuspend and autoresume */ 639/* USB autosuspend and autoresume */
640#ifdef CONFIG_PM_RUNTIME 640#ifdef CONFIG_PM
641extern void usb_enable_autosuspend(struct usb_device *udev); 641extern void usb_enable_autosuspend(struct usb_device *udev);
642extern void usb_disable_autosuspend(struct usb_device *udev); 642extern void usb_disable_autosuspend(struct usb_device *udev);
643 643
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index cd96a2bc3388..668898e29d0e 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -93,7 +93,7 @@ struct usb_hcd {
93 93
94 struct timer_list rh_timer; /* drives root-hub polling */ 94 struct timer_list rh_timer; /* drives root-hub polling */
95 struct urb *status_urb; /* the current status urb */ 95 struct urb *status_urb; /* the current status urb */
96#ifdef CONFIG_PM_RUNTIME 96#ifdef CONFIG_PM
97 struct work_struct wakeup_work; /* for remote wakeup */ 97 struct work_struct wakeup_work; /* for remote wakeup */
98#endif 98#endif
99 99
@@ -625,16 +625,13 @@ extern int usb_find_interface_driver(struct usb_device *dev,
625extern void usb_root_hub_lost_power(struct usb_device *rhdev); 625extern void usb_root_hub_lost_power(struct usb_device *rhdev);
626extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); 626extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
627extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); 627extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
628#endif /* CONFIG_PM */
629
630#ifdef CONFIG_PM_RUNTIME
631extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); 628extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
632#else 629#else
633static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) 630static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
634{ 631{
635 return; 632 return;
636} 633}
637#endif /* CONFIG_PM_RUNTIME */ 634#endif /* CONFIG_PM */
638 635
639/*-------------------------------------------------------------------------*/ 636/*-------------------------------------------------------------------------*/
640 637
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 26088feb6608..d9a4905e01d0 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -78,6 +78,7 @@ struct usbnet {
78# define EVENT_NO_RUNTIME_PM 9 78# define EVENT_NO_RUNTIME_PM 9
79# define EVENT_RX_KILL 10 79# define EVENT_RX_KILL 10
80# define EVENT_LINK_CHANGE 11 80# define EVENT_LINK_CHANGE 11
81# define EVENT_SET_RX_MODE 12
81}; 82};
82 83
83static inline struct usb_driver *driver_of(struct usb_interface *intf) 84static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -159,6 +160,9 @@ struct driver_info {
159 /* called by minidriver when receiving indication */ 160 /* called by minidriver when receiving indication */
160 void (*indication)(struct usbnet *dev, void *ind, int indlen); 161 void (*indication)(struct usbnet *dev, void *ind, int indlen);
161 162
163 /* rx mode change (device changes address list filtering) */
164 void (*set_rx_mode)(struct usbnet *dev);
165
162 /* for new devices, use the descriptor-reading code instead */ 166 /* for new devices, use the descriptor-reading code instead */
163 int in; /* rx endpoint */ 167 int in; /* rx endpoint */
164 int out; /* tx endpoint */ 168 int out; /* tx endpoint */
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index a4c9547aae64..f8e76e08ebe4 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -15,8 +15,6 @@
15#define _LINUX_VEXPRESS_H 15#define _LINUX_VEXPRESS_H
16 16
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/platform_device.h>
19#include <linux/reboot.h>
20#include <linux/regmap.h> 18#include <linux/regmap.h>
21 19
22#define VEXPRESS_SITE_MB 0 20#define VEXPRESS_SITE_MB 0
@@ -24,13 +22,6 @@
24#define VEXPRESS_SITE_DB2 2 22#define VEXPRESS_SITE_DB2 2
25#define VEXPRESS_SITE_MASTER 0xf 23#define VEXPRESS_SITE_MASTER 0xf
26 24
27#define VEXPRESS_RES_FUNC(_site, _func) \
28{ \
29 .start = (_site), \
30 .end = (_func), \
31 .flags = IORESOURCE_BUS, \
32}
33
34/* Config infrastructure */ 25/* Config infrastructure */
35 26
36void vexpress_config_set_master(u32 site); 27void vexpress_config_set_master(u32 site);
@@ -58,16 +49,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev);
58 49
59/* Platform control */ 50/* Platform control */
60 51
61unsigned int vexpress_get_mci_cardin(struct device *dev);
62u32 vexpress_get_procid(int site);
63void *vexpress_get_24mhz_clock_base(void);
64void vexpress_flags_set(u32 data); 52void vexpress_flags_set(u32 data);
65 53
66void vexpress_sysreg_early_init(void __iomem *base);
67int vexpress_syscfg_device_register(struct platform_device *pdev);
68
69/* Clocks */
70
71void vexpress_clk_init(void __iomem *sp810_base);
72
73#endif 54#endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 65261a7244fc..d09e0938fd60 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -75,6 +75,9 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
75 75
76bool virtqueue_is_broken(struct virtqueue *vq); 76bool virtqueue_is_broken(struct virtqueue *vq);
77 77
78void *virtqueue_get_avail(struct virtqueue *vq);
79void *virtqueue_get_used(struct virtqueue *vq);
80
78/** 81/**
79 * virtio_device - representation of a device using virtio 82 * virtio_device - representation of a device using virtio
80 * @index: unique position on the virtio bus 83 * @index: unique position on the virtio bus
@@ -101,11 +104,12 @@ struct virtio_device {
101 const struct virtio_config_ops *config; 104 const struct virtio_config_ops *config;
102 const struct vringh_config_ops *vringh_config; 105 const struct vringh_config_ops *vringh_config;
103 struct list_head vqs; 106 struct list_head vqs;
104 /* Note that this is a Linux set_bit-style bitmap. */ 107 u64 features;
105 unsigned long features[1];
106 void *priv; 108 void *priv;
107}; 109};
108 110
111bool virtio_device_is_legacy_only(struct virtio_device_id id);
112
109static inline struct virtio_device *dev_to_virtio(struct device *_dev) 113static inline struct virtio_device *dev_to_virtio(struct device *_dev)
110{ 114{
111 return container_of(_dev, struct virtio_device, dev); 115 return container_of(_dev, struct virtio_device, dev);
@@ -128,6 +132,8 @@ int virtio_device_restore(struct virtio_device *dev);
128 * @id_table: the ids serviced by this driver. 132 * @id_table: the ids serviced by this driver.
129 * @feature_table: an array of feature numbers supported by this driver. 133 * @feature_table: an array of feature numbers supported by this driver.
130 * @feature_table_size: number of entries in the feature table array. 134 * @feature_table_size: number of entries in the feature table array.
135 * @feature_table_legacy: same as feature_table but when working in legacy mode.
136 * @feature_table_size_legacy: number of entries in feature table legacy array.
131 * @probe: the function to call when a device is found. Returns 0 or -errno. 137 * @probe: the function to call when a device is found. Returns 0 or -errno.
132 * @remove: the function to call when a device is removed. 138 * @remove: the function to call when a device is removed.
133 * @config_changed: optional function to call when the device configuration 139 * @config_changed: optional function to call when the device configuration
@@ -138,6 +144,8 @@ struct virtio_driver {
138 const struct virtio_device_id *id_table; 144 const struct virtio_device_id *id_table;
139 const unsigned int *feature_table; 145 const unsigned int *feature_table;
140 unsigned int feature_table_size; 146 unsigned int feature_table_size;
147 const unsigned int *feature_table_legacy;
148 unsigned int feature_table_size_legacy;
141 int (*probe)(struct virtio_device *dev); 149 int (*probe)(struct virtio_device *dev);
142 void (*scan)(struct virtio_device *dev); 150 void (*scan)(struct virtio_device *dev);
143 void (*remove)(struct virtio_device *dev); 151 void (*remove)(struct virtio_device *dev);
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
new file mode 100644
index 000000000000..51865d05b267
--- /dev/null
+++ b/include/linux/virtio_byteorder.h
@@ -0,0 +1,59 @@
1#ifndef _LINUX_VIRTIO_BYTEORDER_H
2#define _LINUX_VIRTIO_BYTEORDER_H
3#include <linux/types.h>
4#include <uapi/linux/virtio_types.h>
5
6/*
7 * Low-level memory accessors for handling virtio in modern little endian and in
8 * compatibility native endian format.
9 */
10
11static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
12{
13 if (little_endian)
14 return le16_to_cpu((__force __le16)val);
15 else
16 return (__force u16)val;
17}
18
19static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
20{
21 if (little_endian)
22 return (__force __virtio16)cpu_to_le16(val);
23 else
24 return (__force __virtio16)val;
25}
26
27static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
28{
29 if (little_endian)
30 return le32_to_cpu((__force __le32)val);
31 else
32 return (__force u32)val;
33}
34
35static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
36{
37 if (little_endian)
38 return (__force __virtio32)cpu_to_le32(val);
39 else
40 return (__force __virtio32)val;
41}
42
43static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
44{
45 if (little_endian)
46 return le64_to_cpu((__force __le64)val);
47 else
48 return (__force u64)val;
49}
50
51static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
52{
53 if (little_endian)
54 return (__force __virtio64)cpu_to_le64(val);
55 else
56 return (__force __virtio64)val;
57}
58
59#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 7f4ef66873ef..7979f850e7ac 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -4,6 +4,7 @@
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/bug.h> 5#include <linux/bug.h>
6#include <linux/virtio.h> 6#include <linux/virtio.h>
7#include <linux/virtio_byteorder.h>
7#include <uapi/linux/virtio_config.h> 8#include <uapi/linux/virtio_config.h>
8 9
9/** 10/**
@@ -46,6 +47,7 @@
46 * vdev: the virtio_device 47 * vdev: the virtio_device
47 * This gives the final feature bits for the device: it can change 48 * This gives the final feature bits for the device: it can change
48 * the dev->feature bits if it wants. 49 * the dev->feature bits if it wants.
50 * Returns 0 on success or error status
49 * @bus_name: return the bus name associated with the device 51 * @bus_name: return the bus name associated with the device
50 * vdev: the virtio_device 52 * vdev: the virtio_device
51 * This returns a pointer to the bus name a la pci_name from which 53 * This returns a pointer to the bus name a la pci_name from which
@@ -66,8 +68,8 @@ struct virtio_config_ops {
66 vq_callback_t *callbacks[], 68 vq_callback_t *callbacks[],
67 const char *names[]); 69 const char *names[]);
68 void (*del_vqs)(struct virtio_device *); 70 void (*del_vqs)(struct virtio_device *);
69 u32 (*get_features)(struct virtio_device *vdev); 71 u64 (*get_features)(struct virtio_device *vdev);
70 void (*finalize_features)(struct virtio_device *vdev); 72 int (*finalize_features)(struct virtio_device *vdev);
71 const char *(*bus_name)(struct virtio_device *vdev); 73 const char *(*bus_name)(struct virtio_device *vdev);
72 int (*set_vq_affinity)(struct virtqueue *vq, int cpu); 74 int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
73}; 75};
@@ -77,23 +79,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
77 unsigned int fbit); 79 unsigned int fbit);
78 80
79/** 81/**
80 * virtio_has_feature - helper to determine if this device has this feature. 82 * __virtio_test_bit - helper to test feature bits. For use by transports.
83 * Devices should normally use virtio_has_feature,
84 * which includes more checks.
81 * @vdev: the device 85 * @vdev: the device
82 * @fbit: the feature bit 86 * @fbit: the feature bit
83 */ 87 */
84static inline bool virtio_has_feature(const struct virtio_device *vdev, 88static inline bool __virtio_test_bit(const struct virtio_device *vdev,
89 unsigned int fbit)
90{
91 /* Did you forget to fix assumptions on max features? */
92 if (__builtin_constant_p(fbit))
93 BUILD_BUG_ON(fbit >= 64);
94 else
95 BUG_ON(fbit >= 64);
96
97 return vdev->features & BIT_ULL(fbit);
98}
99
100/**
101 * __virtio_set_bit - helper to set feature bits. For use by transports.
102 * @vdev: the device
103 * @fbit: the feature bit
104 */
105static inline void __virtio_set_bit(struct virtio_device *vdev,
106 unsigned int fbit)
107{
108 /* Did you forget to fix assumptions on max features? */
109 if (__builtin_constant_p(fbit))
110 BUILD_BUG_ON(fbit >= 64);
111 else
112 BUG_ON(fbit >= 64);
113
114 vdev->features |= BIT_ULL(fbit);
115}
116
117/**
118 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
119 * @vdev: the device
120 * @fbit: the feature bit
121 */
122static inline void __virtio_clear_bit(struct virtio_device *vdev,
85 unsigned int fbit) 123 unsigned int fbit)
86{ 124{
87 /* Did you forget to fix assumptions on max features? */ 125 /* Did you forget to fix assumptions on max features? */
88 if (__builtin_constant_p(fbit)) 126 if (__builtin_constant_p(fbit))
89 BUILD_BUG_ON(fbit >= 32); 127 BUILD_BUG_ON(fbit >= 64);
90 else 128 else
91 BUG_ON(fbit >= 32); 129 BUG_ON(fbit >= 64);
92 130
131 vdev->features &= ~BIT_ULL(fbit);
132}
133
134/**
135 * virtio_has_feature - helper to determine if this device has this feature.
136 * @vdev: the device
137 * @fbit: the feature bit
138 */
139static inline bool virtio_has_feature(const struct virtio_device *vdev,
140 unsigned int fbit)
141{
93 if (fbit < VIRTIO_TRANSPORT_F_START) 142 if (fbit < VIRTIO_TRANSPORT_F_START)
94 virtio_check_driver_offered_feature(vdev, fbit); 143 virtio_check_driver_offered_feature(vdev, fbit);
95 144
96 return test_bit(fbit, vdev->features); 145 return __virtio_test_bit(vdev, fbit);
97} 146}
98 147
99static inline 148static inline
@@ -152,6 +201,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
152 return 0; 201 return 0;
153} 202}
154 203
204/* Memory accessors */
205static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
206{
207 return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
208}
209
210static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
211{
212 return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
213}
214
215static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
216{
217 return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
218}
219
220static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
221{
222 return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
223}
224
225static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
226{
227 return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
228}
229
230static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
231{
232 return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
233}
234
155/* Config space accessors. */ 235/* Config space accessors. */
156#define virtio_cread(vdev, structname, member, ptr) \ 236#define virtio_cread(vdev, structname, member, ptr) \
157 do { \ 237 do { \
@@ -239,12 +319,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
239{ 319{
240 u16 ret; 320 u16 ret;
241 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 321 vdev->config->get(vdev, offset, &ret, sizeof(ret));
242 return ret; 322 return virtio16_to_cpu(vdev, (__force __virtio16)ret);
243} 323}
244 324
245static inline void virtio_cwrite16(struct virtio_device *vdev, 325static inline void virtio_cwrite16(struct virtio_device *vdev,
246 unsigned int offset, u16 val) 326 unsigned int offset, u16 val)
247{ 327{
328 val = (__force u16)cpu_to_virtio16(vdev, val);
248 vdev->config->set(vdev, offset, &val, sizeof(val)); 329 vdev->config->set(vdev, offset, &val, sizeof(val));
249} 330}
250 331
@@ -253,12 +334,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
253{ 334{
254 u32 ret; 335 u32 ret;
255 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 336 vdev->config->get(vdev, offset, &ret, sizeof(ret));
256 return ret; 337 return virtio32_to_cpu(vdev, (__force __virtio32)ret);
257} 338}
258 339
259static inline void virtio_cwrite32(struct virtio_device *vdev, 340static inline void virtio_cwrite32(struct virtio_device *vdev,
260 unsigned int offset, u32 val) 341 unsigned int offset, u32 val)
261{ 342{
343 val = (__force u32)cpu_to_virtio32(vdev, val);
262 vdev->config->set(vdev, offset, &val, sizeof(val)); 344 vdev->config->set(vdev, offset, &val, sizeof(val));
263} 345}
264 346
@@ -267,12 +349,13 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
267{ 349{
268 u64 ret; 350 u64 ret;
269 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 351 vdev->config->get(vdev, offset, &ret, sizeof(ret));
270 return ret; 352 return virtio64_to_cpu(vdev, (__force __virtio64)ret);
271} 353}
272 354
273static inline void virtio_cwrite64(struct virtio_device *vdev, 355static inline void virtio_cwrite64(struct virtio_device *vdev,
274 unsigned int offset, u64 val) 356 unsigned int offset, u64 val)
275{ 357{
358 val = (__force u64)cpu_to_virtio64(vdev, val);
276 vdev->config->set(vdev, offset, &val, sizeof(val)); 359 vdev->config->set(vdev, offset, &val, sizeof(val));
277} 360}
278 361
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
deleted file mode 100644
index de429d1f4357..000000000000
--- a/include/linux/virtio_scsi.h
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * This header is BSD licensed so anyone can use the definitions to implement
3 * compatible drivers/servers.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifndef _LINUX_VIRTIO_SCSI_H
28#define _LINUX_VIRTIO_SCSI_H
29
30#define VIRTIO_SCSI_CDB_SIZE 32
31#define VIRTIO_SCSI_SENSE_SIZE 96
32
33/* SCSI command request, followed by data-out */
34struct virtio_scsi_cmd_req {
35 u8 lun[8]; /* Logical Unit Number */
36 u64 tag; /* Command identifier */
37 u8 task_attr; /* Task attribute */
38 u8 prio; /* SAM command priority field */
39 u8 crn;
40 u8 cdb[VIRTIO_SCSI_CDB_SIZE];
41} __packed;
42
43/* SCSI command request, followed by protection information */
44struct virtio_scsi_cmd_req_pi {
45 u8 lun[8]; /* Logical Unit Number */
46 u64 tag; /* Command identifier */
47 u8 task_attr; /* Task attribute */
48 u8 prio; /* SAM command priority field */
49 u8 crn;
50 u32 pi_bytesout; /* DataOUT PI Number of bytes */
51 u32 pi_bytesin; /* DataIN PI Number of bytes */
52 u8 cdb[VIRTIO_SCSI_CDB_SIZE];
53} __packed;
54
55/* Response, followed by sense data and data-in */
56struct virtio_scsi_cmd_resp {
57 u32 sense_len; /* Sense data length */
58 u32 resid; /* Residual bytes in data buffer */
59 u16 status_qualifier; /* Status qualifier */
60 u8 status; /* Command completion status */
61 u8 response; /* Response values */
62 u8 sense[VIRTIO_SCSI_SENSE_SIZE];
63} __packed;
64
65/* Task Management Request */
66struct virtio_scsi_ctrl_tmf_req {
67 u32 type;
68 u32 subtype;
69 u8 lun[8];
70 u64 tag;
71} __packed;
72
73struct virtio_scsi_ctrl_tmf_resp {
74 u8 response;
75} __packed;
76
77/* Asynchronous notification query/subscription */
78struct virtio_scsi_ctrl_an_req {
79 u32 type;
80 u8 lun[8];
81 u32 event_requested;
82} __packed;
83
84struct virtio_scsi_ctrl_an_resp {
85 u32 event_actual;
86 u8 response;
87} __packed;
88
89struct virtio_scsi_event {
90 u32 event;
91 u8 lun[8];
92 u32 reason;
93} __packed;
94
95struct virtio_scsi_config {
96 u32 num_queues;
97 u32 seg_max;
98 u32 max_sectors;
99 u32 cmd_per_lun;
100 u32 event_info_size;
101 u32 sense_size;
102 u32 cdb_size;
103 u16 max_channel;
104 u16 max_target;
105 u32 max_lun;
106} __packed;
107
108/* Feature Bits */
109#define VIRTIO_SCSI_F_INOUT 0
110#define VIRTIO_SCSI_F_HOTPLUG 1
111#define VIRTIO_SCSI_F_CHANGE 2
112#define VIRTIO_SCSI_F_T10_PI 3
113
114/* Response codes */
115#define VIRTIO_SCSI_S_OK 0
116#define VIRTIO_SCSI_S_OVERRUN 1
117#define VIRTIO_SCSI_S_ABORTED 2
118#define VIRTIO_SCSI_S_BAD_TARGET 3
119#define VIRTIO_SCSI_S_RESET 4
120#define VIRTIO_SCSI_S_BUSY 5
121#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
122#define VIRTIO_SCSI_S_TARGET_FAILURE 7
123#define VIRTIO_SCSI_S_NEXUS_FAILURE 8
124#define VIRTIO_SCSI_S_FAILURE 9
125#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
126#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
127#define VIRTIO_SCSI_S_INCORRECT_LUN 12
128
129/* Controlq type codes. */
130#define VIRTIO_SCSI_T_TMF 0
131#define VIRTIO_SCSI_T_AN_QUERY 1
132#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2
133
134/* Valid TMF subtypes. */
135#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
136#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
137#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
138#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
139#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
140#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
141#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
142#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
143
144/* Events. */
145#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000
146#define VIRTIO_SCSI_T_NO_EVENT 0
147#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
148#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
149#define VIRTIO_SCSI_T_PARAM_CHANGE 3
150
151/* Reasons of transport reset event */
152#define VIRTIO_SCSI_EVT_RESET_HARD 0
153#define VIRTIO_SCSI_EVT_RESET_RESCAN 1
154#define VIRTIO_SCSI_EVT_RESET_REMOVED 2
155
156#define VIRTIO_SCSI_S_SIMPLE 0
157#define VIRTIO_SCSI_S_ORDERED 1
158#define VIRTIO_SCSI_S_HEAD 2
159#define VIRTIO_SCSI_S_ACA 3
160
161
162#endif /* _LINUX_VIRTIO_SCSI_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 730334cdf037..9246d32dc973 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -90,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
90#ifdef CONFIG_DEBUG_VM_VMACACHE 90#ifdef CONFIG_DEBUG_VM_VMACACHE
91 VMACACHE_FIND_CALLS, 91 VMACACHE_FIND_CALLS,
92 VMACACHE_FIND_HITS, 92 VMACACHE_FIND_HITS,
93 VMACACHE_FULL_FLUSHES,
93#endif 94#endif
94 NR_VM_EVENT_ITEMS 95 NR_VM_EVENT_ITEMS
95}; 96};
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 023430e265fe..5691f752ce8f 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -24,6 +24,7 @@
24#define VMCI_KERNEL_API_VERSION_2 2 24#define VMCI_KERNEL_API_VERSION_2 2
25#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 25#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
26 26
27struct msghdr;
27typedef void (vmci_device_shutdown_fn) (void *device_registration, 28typedef void (vmci_device_shutdown_fn) (void *device_registration,
28 void *user_data); 29 void *user_data);
29 30
@@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
75ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 76ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
76 void *iov, size_t iov_size, int mode); 77 void *iov, size_t iov_size, int mode);
77ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 78ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
78 void *iov, size_t iov_size, int mode); 79 struct msghdr *msg, size_t iov_size, int mode);
79ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, 80ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
80 int mode); 81 int mode);
81 82
82#endif /* !__VMW_VMCI_API_H__ */ 83#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index e4a8eb9312ea..2232ed16635a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -13,9 +13,12 @@ typedef struct __wait_queue wait_queue_t;
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); 13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15 15
16/* __wait_queue::flags */
17#define WQ_FLAG_EXCLUSIVE 0x01
18#define WQ_FLAG_WOKEN 0x02
19
16struct __wait_queue { 20struct __wait_queue {
17 unsigned int flags; 21 unsigned int flags;
18#define WQ_FLAG_EXCLUSIVE 0x01
19 void *private; 22 void *private;
20 wait_queue_func_t func; 23 wait_queue_func_t func;
21 struct list_head task_list; 24 struct list_head task_list;
@@ -258,11 +261,37 @@ __out: __ret; \
258 */ 261 */
259#define wait_event(wq, condition) \ 262#define wait_event(wq, condition) \
260do { \ 263do { \
264 might_sleep(); \
261 if (condition) \ 265 if (condition) \
262 break; \ 266 break; \
263 __wait_event(wq, condition); \ 267 __wait_event(wq, condition); \
264} while (0) 268} while (0)
265 269
270#define __wait_event_freezable(wq, condition) \
271 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
272 schedule(); try_to_freeze())
273
274/**
275 * wait_event - sleep (or freeze) until a condition gets true
276 * @wq: the waitqueue to wait on
277 * @condition: a C expression for the event to wait for
278 *
279 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
280 * to system load) until the @condition evaluates to true. The
281 * @condition is checked each time the waitqueue @wq is woken up.
282 *
283 * wake_up() has to be called after changing any variable that could
284 * change the result of the wait condition.
285 */
286#define wait_event_freezable(wq, condition) \
287({ \
288 int __ret = 0; \
289 might_sleep(); \
290 if (!(condition)) \
291 __ret = __wait_event_freezable(wq, condition); \
292 __ret; \
293})
294
266#define __wait_event_timeout(wq, condition, timeout) \ 295#define __wait_event_timeout(wq, condition, timeout) \
267 ___wait_event(wq, ___wait_cond_timeout(condition), \ 296 ___wait_event(wq, ___wait_cond_timeout(condition), \
268 TASK_UNINTERRUPTIBLE, 0, timeout, \ 297 TASK_UNINTERRUPTIBLE, 0, timeout, \
@@ -290,11 +319,30 @@ do { \
290#define wait_event_timeout(wq, condition, timeout) \ 319#define wait_event_timeout(wq, condition, timeout) \
291({ \ 320({ \
292 long __ret = timeout; \ 321 long __ret = timeout; \
322 might_sleep(); \
293 if (!___wait_cond_timeout(condition)) \ 323 if (!___wait_cond_timeout(condition)) \
294 __ret = __wait_event_timeout(wq, condition, timeout); \ 324 __ret = __wait_event_timeout(wq, condition, timeout); \
295 __ret; \ 325 __ret; \
296}) 326})
297 327
328#define __wait_event_freezable_timeout(wq, condition, timeout) \
329 ___wait_event(wq, ___wait_cond_timeout(condition), \
330 TASK_INTERRUPTIBLE, 0, timeout, \
331 __ret = schedule_timeout(__ret); try_to_freeze())
332
333/*
334 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
335 * increasing load and is freezable.
336 */
337#define wait_event_freezable_timeout(wq, condition, timeout) \
338({ \
339 long __ret = timeout; \
340 might_sleep(); \
341 if (!___wait_cond_timeout(condition)) \
342 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
343 __ret; \
344})
345
298#define __wait_event_cmd(wq, condition, cmd1, cmd2) \ 346#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
299 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 347 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
300 cmd1; schedule(); cmd2) 348 cmd1; schedule(); cmd2)
@@ -315,6 +363,7 @@ do { \
315 */ 363 */
316#define wait_event_cmd(wq, condition, cmd1, cmd2) \ 364#define wait_event_cmd(wq, condition, cmd1, cmd2) \
317do { \ 365do { \
366 might_sleep(); \
318 if (condition) \ 367 if (condition) \
319 break; \ 368 break; \
320 __wait_event_cmd(wq, condition, cmd1, cmd2); \ 369 __wait_event_cmd(wq, condition, cmd1, cmd2); \
@@ -342,6 +391,7 @@ do { \
342#define wait_event_interruptible(wq, condition) \ 391#define wait_event_interruptible(wq, condition) \
343({ \ 392({ \
344 int __ret = 0; \ 393 int __ret = 0; \
394 might_sleep(); \
345 if (!(condition)) \ 395 if (!(condition)) \
346 __ret = __wait_event_interruptible(wq, condition); \ 396 __ret = __wait_event_interruptible(wq, condition); \
347 __ret; \ 397 __ret; \
@@ -375,6 +425,7 @@ do { \
375#define wait_event_interruptible_timeout(wq, condition, timeout) \ 425#define wait_event_interruptible_timeout(wq, condition, timeout) \
376({ \ 426({ \
377 long __ret = timeout; \ 427 long __ret = timeout; \
428 might_sleep(); \
378 if (!___wait_cond_timeout(condition)) \ 429 if (!___wait_cond_timeout(condition)) \
379 __ret = __wait_event_interruptible_timeout(wq, \ 430 __ret = __wait_event_interruptible_timeout(wq, \
380 condition, timeout); \ 431 condition, timeout); \
@@ -425,6 +476,7 @@ do { \
425#define wait_event_hrtimeout(wq, condition, timeout) \ 476#define wait_event_hrtimeout(wq, condition, timeout) \
426({ \ 477({ \
427 int __ret = 0; \ 478 int __ret = 0; \
479 might_sleep(); \
428 if (!(condition)) \ 480 if (!(condition)) \
429 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 481 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
430 TASK_UNINTERRUPTIBLE); \ 482 TASK_UNINTERRUPTIBLE); \
@@ -450,6 +502,7 @@ do { \
450#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ 502#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
451({ \ 503({ \
452 long __ret = 0; \ 504 long __ret = 0; \
505 might_sleep(); \
453 if (!(condition)) \ 506 if (!(condition)) \
454 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 507 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
455 TASK_INTERRUPTIBLE); \ 508 TASK_INTERRUPTIBLE); \
@@ -463,12 +516,27 @@ do { \
463#define wait_event_interruptible_exclusive(wq, condition) \ 516#define wait_event_interruptible_exclusive(wq, condition) \
464({ \ 517({ \
465 int __ret = 0; \ 518 int __ret = 0; \
519 might_sleep(); \
466 if (!(condition)) \ 520 if (!(condition)) \
467 __ret = __wait_event_interruptible_exclusive(wq, condition);\ 521 __ret = __wait_event_interruptible_exclusive(wq, condition);\
468 __ret; \ 522 __ret; \
469}) 523})
470 524
471 525
526#define __wait_event_freezable_exclusive(wq, condition) \
527 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
528 schedule(); try_to_freeze())
529
530#define wait_event_freezable_exclusive(wq, condition) \
531({ \
532 int __ret = 0; \
533 might_sleep(); \
534 if (!(condition)) \
535 __ret = __wait_event_freezable_exclusive(wq, condition);\
536 __ret; \
537})
538
539
472#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ 540#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
473({ \ 541({ \
474 int __ret = 0; \ 542 int __ret = 0; \
@@ -637,6 +705,7 @@ do { \
637#define wait_event_killable(wq, condition) \ 705#define wait_event_killable(wq, condition) \
638({ \ 706({ \
639 int __ret = 0; \ 707 int __ret = 0; \
708 might_sleep(); \
640 if (!(condition)) \ 709 if (!(condition)) \
641 __ret = __wait_event_killable(wq, condition); \ 710 __ret = __wait_event_killable(wq, condition); \
642 __ret; \ 711 __ret; \
@@ -830,6 +899,8 @@ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int sta
830long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); 899long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
831void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 900void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
832void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); 901void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
902long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
903int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
833int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 904int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
834int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 905int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
835 906
@@ -886,6 +957,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *);
886static inline int 957static inline int
887wait_on_bit(void *word, int bit, unsigned mode) 958wait_on_bit(void *word, int bit, unsigned mode)
888{ 959{
960 might_sleep();
889 if (!test_bit(bit, word)) 961 if (!test_bit(bit, word))
890 return 0; 962 return 0;
891 return out_of_line_wait_on_bit(word, bit, 963 return out_of_line_wait_on_bit(word, bit,
@@ -910,6 +982,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
910static inline int 982static inline int
911wait_on_bit_io(void *word, int bit, unsigned mode) 983wait_on_bit_io(void *word, int bit, unsigned mode)
912{ 984{
985 might_sleep();
913 if (!test_bit(bit, word)) 986 if (!test_bit(bit, word))
914 return 0; 987 return 0;
915 return out_of_line_wait_on_bit(word, bit, 988 return out_of_line_wait_on_bit(word, bit,
@@ -936,6 +1009,7 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
936static inline int 1009static inline int
937wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) 1010wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
938{ 1011{
1012 might_sleep();
939 if (!test_bit(bit, word)) 1013 if (!test_bit(bit, word))
940 return 0; 1014 return 0;
941 return out_of_line_wait_on_bit(word, bit, action, mode); 1015 return out_of_line_wait_on_bit(word, bit, action, mode);
@@ -963,6 +1037,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
963static inline int 1037static inline int
964wait_on_bit_lock(void *word, int bit, unsigned mode) 1038wait_on_bit_lock(void *word, int bit, unsigned mode)
965{ 1039{
1040 might_sleep();
966 if (!test_and_set_bit(bit, word)) 1041 if (!test_and_set_bit(bit, word))
967 return 0; 1042 return 0;
968 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); 1043 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
@@ -986,6 +1061,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
986static inline int 1061static inline int
987wait_on_bit_lock_io(void *word, int bit, unsigned mode) 1062wait_on_bit_lock_io(void *word, int bit, unsigned mode)
988{ 1063{
1064 might_sleep();
989 if (!test_and_set_bit(bit, word)) 1065 if (!test_and_set_bit(bit, word))
990 return 0; 1066 return 0;
991 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); 1067 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
@@ -1011,6 +1087,7 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
1011static inline int 1087static inline int
1012wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) 1088wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1013{ 1089{
1090 might_sleep();
1014 if (!test_and_set_bit(bit, word)) 1091 if (!test_and_set_bit(bit, word))
1015 return 0; 1092 return 0;
1016 return out_of_line_wait_on_bit_lock(word, bit, action, mode); 1093 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
@@ -1029,6 +1106,7 @@ wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned
1029static inline 1106static inline
1030int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) 1107int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1031{ 1108{
1109 might_sleep();
1032 if (atomic_read(val) == 0) 1110 if (atomic_read(val) == 0)
1033 return 0; 1111 return 0;
1034 return out_of_line_wait_on_atomic_t(val, action, mode); 1112 return out_of_line_wait_on_atomic_t(val, action, mode);
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 2a3038ee17a3..395b70e0eccf 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -97,13 +97,8 @@ struct watchdog_device {
97#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ 97#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
98}; 98};
99 99
100#ifdef CONFIG_WATCHDOG_NOWAYOUT 100#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
101#define WATCHDOG_NOWAYOUT 1 101#define WATCHDOG_NOWAYOUT_INIT_STATUS (WATCHDOG_NOWAYOUT << WDOG_NO_WAY_OUT)
102#define WATCHDOG_NOWAYOUT_INIT_STATUS (1 << WDOG_NO_WAY_OUT)
103#else
104#define WATCHDOG_NOWAYOUT 0
105#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
106#endif
107 102
108/* Use the following function to check whether or not the watchdog is active */ 103/* Use the following function to check whether or not the watchdog is active */
109static inline bool watchdog_active(struct watchdog_device *wdd) 104static inline bool watchdog_active(struct watchdog_device *wdd)