aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h10
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h3
-rw-r--r--include/linux/bpf-cgroup.h13
-rw-r--r--include/linux/clockchips.h9
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h20
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/cpumask.h7
-rw-r--r--include/linux/cputime.h7
-rw-r--r--include/linux/delay.h11
-rw-r--r--include/linux/delayacct.h1
-rw-r--r--include/linux/devfreq.h3
-rw-r--r--include/linux/dma-iommu.h10
-rw-r--r--include/linux/dma-mapping.h7
-rw-r--r--include/linux/edac.h4
-rw-r--r--include/linux/efi-bgrt.h11
-rw-r--r--include/linux/efi.h56
-rw-r--r--include/linux/fsl_ifc.h8
-rw-r--r--include/linux/hrtimer.h11
-rw-r--r--include/linux/init_task.h40
-rw-r--r--include/linux/intel-iommu.h17
-rw-r--r--include/linux/intel_pmic_gpio.h15
-rw-r--r--include/linux/iommu.h138
-rw-r--r--include/linux/irq.h19
-rw-r--r--include/linux/irqchip/arm-gic-v3.h5
-rw-r--r--include/linux/irqdomain.h36
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/jump_label.h4
-rw-r--r--include/linux/kernel_stat.h14
-rw-r--r--include/linux/kprobes.h30
-rw-r--r--include/linux/kref.h78
-rw-r--r--include/linux/leds.h16
-rw-r--r--include/linux/llist.h37
-rw-r--r--include/linux/math64.h26
-rw-r--r--include/linux/mfd/axp20x.h31
-rw-r--r--include/linux/mfd/lpc_ich.h3
-rw-r--r--include/linux/msi.h11
-rw-r--r--include/linux/mtd/fsmc.h156
-rw-r--r--include/linux/mtd/mtd.h16
-rw-r--r--include/linux/mtd/nand.h9
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/spi-nor.h34
-rw-r--r--include/linux/mutex.h9
-rw-r--r--include/linux/of_iommu.h11
-rw-r--r--include/linux/percpu-rwsem.h8
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/platform_data/intel-spi.h31
-rw-r--r--include/linux/platform_data/spi-ep93xx.h17
-rw-r--r--include/linux/pm_domain.h3
-rw-r--r--include/linux/pm_opp.h72
-rw-r--r--include/linux/pm_qos.h9
-rw-r--r--include/linux/poison.h1
-rw-r--r--include/linux/posix-timers.h14
-rw-r--r--include/linux/power/bq27xxx_battery.h12
-rw-r--r--include/linux/pxa2xx_ssp.h14
-rw-r--r--include/linux/rcupdate.h12
-rw-r--r--include/linux/rcutiny.h6
-rw-r--r--include/linux/rcuwait.h63
-rw-r--r--include/linux/refcount.h294
-rw-r--r--include/linux/regmap.h115
-rw-r--r--include/linux/sched.h130
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/srcu.h10
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/timer.h45
-rw-r--r--include/linux/vtime.h7
-rw-r--r--include/linux/ww_mutex.h32
70 files changed, 1160 insertions, 712 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 5b36974ed60a..8e577c2cb0ce 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1153,4 +1153,14 @@ int parse_spcr(bool earlycon);
1153static inline int parse_spcr(bool earlycon) { return 0; } 1153static inline int parse_spcr(bool earlycon) { return 0; }
1154#endif 1154#endif
1155 1155
1156#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
1157int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
1158#else
1159static inline
1160int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
1161{
1162 return -EINVAL;
1163}
1164#endif
1165
1156#endif /*_LINUX_ACPI_H*/ 1166#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index b20e3d56253f..2f1c690a3e66 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -593,9 +593,6 @@ struct bcma_sflash {
593 u32 blocksize; 593 u32 blocksize;
594 u16 numblocks; 594 u16 numblocks;
595 u32 size; 595 u32 size;
596
597 struct mtd_info *mtd;
598 void *priv;
599}; 596};
600#endif 597#endif
601 598
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 92bc89ae7e20..c970a25d2a49 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -21,20 +21,19 @@ struct cgroup_bpf {
21 */ 21 */
22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; 22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; 23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
24 bool disallow_override[MAX_BPF_ATTACH_TYPE];
24}; 25};
25 26
26void cgroup_bpf_put(struct cgroup *cgrp); 27void cgroup_bpf_put(struct cgroup *cgrp);
27void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); 28void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
28 29
29void __cgroup_bpf_update(struct cgroup *cgrp, 30int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
30 struct cgroup *parent, 31 struct bpf_prog *prog, enum bpf_attach_type type,
31 struct bpf_prog *prog, 32 bool overridable);
32 enum bpf_attach_type type);
33 33
34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ 34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
35void cgroup_bpf_update(struct cgroup *cgrp, 35int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
36 struct bpf_prog *prog, 36 enum bpf_attach_type type, bool overridable);
37 enum bpf_attach_type type);
38 37
39int __cgroup_bpf_run_filter_skb(struct sock *sk, 38int __cgroup_bpf_run_filter_skb(struct sock *sk,
40 struct sk_buff *skb, 39 struct sk_buff *skb,
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0d442e34c349..5d3053c34fb3 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -224,4 +224,13 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
224 224
225#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 225#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
226 226
227#define CLOCKEVENT_OF_DECLARE(name, compat, fn) \
228 OF_DECLARE_1_RET(clkevt, name, compat, fn)
229
230#ifdef CONFIG_CLKEVT_PROBE
231extern int clockevent_probe(void);
232#els
233static inline int clockevent_probe(void) { return 0; }
234#endif
235
227#endif /* _LINUX_CLOCKCHIPS_H */ 236#endif /* _LINUX_CLOCKCHIPS_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index e315d04a2fd9..cfc75848a35d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -62,6 +62,8 @@ struct module;
62 * @archdata: arch-specific data 62 * @archdata: arch-specific data
63 * @suspend: suspend function for the clocksource, if necessary 63 * @suspend: suspend function for the clocksource, if necessary
64 * @resume: resume function for the clocksource, if necessary 64 * @resume: resume function for the clocksource, if necessary
65 * @mark_unstable: Optional function to inform the clocksource driver that
66 * the watchdog marked the clocksource unstable
65 * @owner: module reference, must be set by clocksource in modules 67 * @owner: module reference, must be set by clocksource in modules
66 * 68 *
67 * Note: This struct is not used in hotpathes of the timekeeping code 69 * Note: This struct is not used in hotpathes of the timekeeping code
@@ -93,6 +95,7 @@ struct clocksource {
93 unsigned long flags; 95 unsigned long flags;
94 void (*suspend)(struct clocksource *cs); 96 void (*suspend)(struct clocksource *cs);
95 void (*resume)(struct clocksource *cs); 97 void (*resume)(struct clocksource *cs);
98 void (*mark_unstable)(struct clocksource *cs);
96 99
97 /* private: */ 100 /* private: */
98#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 101#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 63609398ef9f..9e40be522793 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -731,7 +731,25 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
731static inline bool in_compat_syscall(void) { return is_compat_task(); } 731static inline bool in_compat_syscall(void) { return is_compat_task(); }
732#endif 732#endif
733 733
734#else 734/**
735 * ns_to_compat_timeval - Compat version of ns_to_timeval
736 * @nsec: the nanoseconds value to be converted
737 *
738 * Returns the compat_timeval representation of the nsec parameter.
739 */
740static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
741{
742 struct timeval tv;
743 struct compat_timeval ctv;
744
745 tv = ns_to_timeval(nsec);
746 ctv.tv_sec = tv.tv_sec;
747 ctv.tv_usec = tv.tv_usec;
748
749 return ctv;
750}
751
752#else /* !CONFIG_COMPAT */
735 753
736#define is_compat_task() (0) 754#define is_compat_task() (0)
737static inline bool in_compat_syscall(void) { return false; } 755static inline bool in_compat_syscall(void) { return false; }
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7e05c5e4e45c..87165f06a307 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -31,7 +31,7 @@
31 31
32#define CPUFREQ_ETERNAL (-1) 32#define CPUFREQ_ETERNAL (-1)
33#define CPUFREQ_NAME_LEN 16 33#define CPUFREQ_NAME_LEN 16
34/* Print length for names. Extra 1 space for accomodating '\n' in prints */ 34/* Print length for names. Extra 1 space for accommodating '\n' in prints */
35#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) 35#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
36 36
37struct cpufreq_governor; 37struct cpufreq_governor;
@@ -115,7 +115,7 @@ struct cpufreq_policy {
115 * guarantee that frequency can be changed on any CPU sharing the 115 * guarantee that frequency can be changed on any CPU sharing the
116 * policy and that the change will affect all of the policy CPUs then. 116 * policy and that the change will affect all of the policy CPUs then.
117 * - fast_switch_enabled is to be set by governors that support fast 117 * - fast_switch_enabled is to be set by governors that support fast
118 * freqnency switching with the help of cpufreq_enable_fast_switch(). 118 * frequency switching with the help of cpufreq_enable_fast_switch().
119 */ 119 */
120 bool fast_switch_possible; 120 bool fast_switch_possible;
121 bool fast_switch_enabled; 121 bool fast_switch_enabled;
@@ -415,9 +415,6 @@ static inline void cpufreq_resume(void) {}
415/* Policy Notifiers */ 415/* Policy Notifiers */
416#define CPUFREQ_ADJUST (0) 416#define CPUFREQ_ADJUST (0)
417#define CPUFREQ_NOTIFY (1) 417#define CPUFREQ_NOTIFY (1)
418#define CPUFREQ_START (2)
419#define CPUFREQ_CREATE_POLICY (3)
420#define CPUFREQ_REMOVE_POLICY (4)
421 418
422#ifdef CONFIG_CPU_FREQ 419#ifdef CONFIG_CPU_FREQ
423int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 420int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b3d2c1a89ac4..96f1e88b767c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -649,11 +649,15 @@ static inline size_t cpumask_size(void)
649 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use 649 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
650 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the 650 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
651 * other type of cpumask_var_t implementation is configured. 651 * other type of cpumask_var_t implementation is configured.
652 *
653 * Please also note that __cpumask_var_read_mostly can be used to declare
654 * a cpumask_var_t variable itself (not its content) as read mostly.
652 */ 655 */
653#ifdef CONFIG_CPUMASK_OFFSTACK 656#ifdef CONFIG_CPUMASK_OFFSTACK
654typedef struct cpumask *cpumask_var_t; 657typedef struct cpumask *cpumask_var_t;
655 658
656#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) 659#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
660#define __cpumask_var_read_mostly __read_mostly
657 661
658bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 662bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
659bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 663bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
@@ -667,6 +671,7 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
667typedef struct cpumask cpumask_var_t[1]; 671typedef struct cpumask cpumask_var_t[1];
668 672
669#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) 673#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
674#define __cpumask_var_read_mostly
670 675
671static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 676static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
672{ 677{
diff --git a/include/linux/cputime.h b/include/linux/cputime.h
index f2eb2ee535ca..a691dc4ddc13 100644
--- a/include/linux/cputime.h
+++ b/include/linux/cputime.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_CPUTIME_H 1#ifndef __LINUX_CPUTIME_H
2#define __LINUX_CPUTIME_H 2#define __LINUX_CPUTIME_H
3 3
4#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
4#include <asm/cputime.h> 5#include <asm/cputime.h>
5 6
6#ifndef cputime_to_nsecs 7#ifndef cputime_to_nsecs
@@ -8,9 +9,5 @@
8 (cputime_to_usecs(__ct) * NSEC_PER_USEC) 9 (cputime_to_usecs(__ct) * NSEC_PER_USEC)
9#endif 10#endif
10 11
11#ifndef nsecs_to_cputime 12#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
12# define nsecs_to_cputime(__nsecs) \
13 usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
14#endif
15
16#endif /* __LINUX_CPUTIME_H */ 13#endif /* __LINUX_CPUTIME_H */
diff --git a/include/linux/delay.h b/include/linux/delay.h
index a6ecb34cf547..2ecb3c46b20a 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -5,6 +5,17 @@
5 * Copyright (C) 1993 Linus Torvalds 5 * Copyright (C) 1993 Linus Torvalds
6 * 6 *
7 * Delay routines, using a pre-computed "loops_per_jiffy" value. 7 * Delay routines, using a pre-computed "loops_per_jiffy" value.
8 *
9 * Please note that ndelay(), udelay() and mdelay() may return early for
10 * several reasons:
11 * 1. computed loops_per_jiffy too low (due to the time taken to
12 * execute the timer interrupt.)
13 * 2. cache behaviour affecting the time it takes to execute the
14 * loop function.
15 * 3. CPU clock rate changes.
16 *
17 * Please see this thread:
18 * http://lists.openwall.net/linux-kernel/2011/01/09/56
8 */ 19 */
9 20
10#include <linux/kernel.h> 21#include <linux/kernel.h>
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 6cee17c22313..00e60f79a9cc 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -17,6 +17,7 @@
17#ifndef _LINUX_DELAYACCT_H 17#ifndef _LINUX_DELAYACCT_H
18#define _LINUX_DELAYACCT_H 18#define _LINUX_DELAYACCT_H
19 19
20#include <uapi/linux/taskstats.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 2de4e2eea180..e0acb0e5243b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -104,6 +104,8 @@ struct devfreq_dev_profile {
104 * struct devfreq_governor - Devfreq policy governor 104 * struct devfreq_governor - Devfreq policy governor
105 * @node: list node - contains registered devfreq governors 105 * @node: list node - contains registered devfreq governors
106 * @name: Governor's name 106 * @name: Governor's name
107 * @immutable: Immutable flag for governor. If the value is 1,
108 * this govenror is never changeable to other governor.
107 * @get_target_freq: Returns desired operating frequency for the device. 109 * @get_target_freq: Returns desired operating frequency for the device.
108 * Basically, get_target_freq will run 110 * Basically, get_target_freq will run
109 * devfreq_dev_profile.get_dev_status() to get the 111 * devfreq_dev_profile.get_dev_status() to get the
@@ -121,6 +123,7 @@ struct devfreq_governor {
121 struct list_head node; 123 struct list_head node;
122 124
123 const char name[DEVFREQ_NAME_LEN]; 125 const char name[DEVFREQ_NAME_LEN];
126 const unsigned int immutable;
124 int (*get_target_freq)(struct devfreq *this, unsigned long *freq); 127 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
125 int (*event_handler)(struct devfreq *devfreq, 128 int (*event_handler)(struct devfreq *devfreq,
126 unsigned int event, void *data); 129 unsigned int event, void *data);
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 7f7e9a7e3839..5725c94b1f12 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -27,6 +27,7 @@ int iommu_dma_init(void);
27 27
28/* Domain management interface for IOMMU drivers */ 28/* Domain management interface for IOMMU drivers */
29int iommu_get_dma_cookie(struct iommu_domain *domain); 29int iommu_get_dma_cookie(struct iommu_domain *domain);
30int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
30void iommu_put_dma_cookie(struct iommu_domain *domain); 31void iommu_put_dma_cookie(struct iommu_domain *domain);
31 32
32/* Setup call for arch DMA mapping code */ 33/* Setup call for arch DMA mapping code */
@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
34 u64 size, struct device *dev); 35 u64 size, struct device *dev);
35 36
36/* General helpers for DMA-API <-> IOMMU-API interaction */ 37/* General helpers for DMA-API <-> IOMMU-API interaction */
37int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); 38int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
39 unsigned long attrs);
38 40
39/* 41/*
40 * These implement the bulk of the relevant DMA mapping callbacks, but require 42 * These implement the bulk of the relevant DMA mapping callbacks, but require
@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
65 size_t size, enum dma_data_direction dir, unsigned long attrs); 67 size_t size, enum dma_data_direction dir, unsigned long attrs);
66void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 68void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
67 size_t size, enum dma_data_direction dir, unsigned long attrs); 69 size_t size, enum dma_data_direction dir, unsigned long attrs);
68int iommu_dma_supported(struct device *dev, u64 mask);
69int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 70int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
70 71
71/* The DMA API isn't _quite_ the whole story, though... */ 72/* The DMA API isn't _quite_ the whole story, though... */
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
86 return -ENODEV; 87 return -ENODEV;
87} 88}
88 89
90static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
91{
92 return -ENODEV;
93}
94
89static inline void iommu_put_dma_cookie(struct iommu_domain *domain) 95static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
90{ 96{
91} 97}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 10c5a17b1f51..c24721a33b4c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -63,6 +63,13 @@
63#define DMA_ATTR_NO_WARN (1UL << 8) 63#define DMA_ATTR_NO_WARN (1UL << 8)
64 64
65/* 65/*
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
69 */
70#define DMA_ATTR_PRIVILEGED (1UL << 9)
71
72/*
66 * A dma_addr_t can hold any valid DMA or bus address for the platform. 73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot 74 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between 75 * reference a dma_addr_t directly because there may be translation between
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 07c52c0af62d..5b6adf964248 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -190,8 +190,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
190 * part of the memory details to the memory controller. 190 * part of the memory details to the memory controller.
191 * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. 191 * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers.
192 * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. 192 * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F.
193 * Those memories are labed as "PC2-" instead of "PC" to 193 * Those memories are labeled as "PC2-" instead of "PC" to
194 * differenciate from DDR. 194 * differentiate from DDR.
195 * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 195 * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205
196 * and JESD206. 196 * and JESD206.
197 * Those memories are accessed per DIMM slot, and not by 197 * Those memories are accessed per DIMM slot, and not by
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index 051b21fedf68..2fd3993c370b 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -1,20 +1,19 @@
1#ifndef _LINUX_EFI_BGRT_H 1#ifndef _LINUX_EFI_BGRT_H
2#define _LINUX_EFI_BGRT_H 2#define _LINUX_EFI_BGRT_H
3 3
4#ifdef CONFIG_ACPI_BGRT
5
6#include <linux/acpi.h> 4#include <linux/acpi.h>
7 5
8void efi_bgrt_init(void); 6#ifdef CONFIG_ACPI_BGRT
7
8void efi_bgrt_init(struct acpi_table_header *table);
9 9
10/* The BGRT data itself; only valid if bgrt_image != NULL. */ 10/* The BGRT data itself; only valid if bgrt_image != NULL. */
11extern void *bgrt_image;
12extern size_t bgrt_image_size; 11extern size_t bgrt_image_size;
13extern struct acpi_table_bgrt *bgrt_tab; 12extern struct acpi_table_bgrt bgrt_tab;
14 13
15#else /* !CONFIG_ACPI_BGRT */ 14#else /* !CONFIG_ACPI_BGRT */
16 15
17static inline void efi_bgrt_init(void) {} 16static inline void efi_bgrt_init(struct acpi_table_header *table) {}
18 17
19#endif /* !CONFIG_ACPI_BGRT */ 18#endif /* !CONFIG_ACPI_BGRT */
20 19
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5b1af30ece55..94d34e0be24f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -509,24 +509,6 @@ typedef struct {
509 u64 query_variable_info; 509 u64 query_variable_info;
510} efi_runtime_services_64_t; 510} efi_runtime_services_64_t;
511 511
512typedef struct {
513 efi_table_hdr_t hdr;
514 void *get_time;
515 void *set_time;
516 void *get_wakeup_time;
517 void *set_wakeup_time;
518 void *set_virtual_address_map;
519 void *convert_pointer;
520 void *get_variable;
521 void *get_next_variable;
522 void *set_variable;
523 void *get_next_high_mono_count;
524 void *reset_system;
525 void *update_capsule;
526 void *query_capsule_caps;
527 void *query_variable_info;
528} efi_runtime_services_t;
529
530typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); 512typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
531typedef efi_status_t efi_set_time_t (efi_time_t *tm); 513typedef efi_status_t efi_set_time_t (efi_time_t *tm);
532typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending, 514typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -561,6 +543,24 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
561 unsigned long size, 543 unsigned long size,
562 bool nonblocking); 544 bool nonblocking);
563 545
546typedef struct {
547 efi_table_hdr_t hdr;
548 efi_get_time_t *get_time;
549 efi_set_time_t *set_time;
550 efi_get_wakeup_time_t *get_wakeup_time;
551 efi_set_wakeup_time_t *set_wakeup_time;
552 efi_set_virtual_address_map_t *set_virtual_address_map;
553 void *convert_pointer;
554 efi_get_variable_t *get_variable;
555 efi_get_next_variable_t *get_next_variable;
556 efi_set_variable_t *set_variable;
557 efi_get_next_high_mono_count_t *get_next_high_mono_count;
558 efi_reset_system_t *reset_system;
559 efi_update_capsule_t *update_capsule;
560 efi_query_capsule_caps_t *query_capsule_caps;
561 efi_query_variable_info_t *query_variable_info;
562} efi_runtime_services_t;
563
564void efi_native_runtime_setup(void); 564void efi_native_runtime_setup(void);
565 565
566/* 566/*
@@ -611,6 +611,9 @@ void efi_native_runtime_setup(void);
611#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) 611#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
612#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) 612#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
613 613
614#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
615#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
616
614/* 617/*
615 * This GUID is used to pass to the kernel proper the struct screen_info 618 * This GUID is used to pass to the kernel proper the struct screen_info
616 * structure that was populated by the stub based on the GOP protocol instance 619 * structure that was populated by the stub based on the GOP protocol instance
@@ -1065,6 +1068,7 @@ extern int __init efi_setup_pcdp_console(char *);
1065#define EFI_ARCH_1 7 /* First arch-specific bit */ 1068#define EFI_ARCH_1 7 /* First arch-specific bit */
1066#define EFI_DBG 8 /* Print additional debug info at runtime */ 1069#define EFI_DBG 8 /* Print additional debug info at runtime */
1067#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ 1070#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
1071#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
1068 1072
1069#ifdef CONFIG_EFI 1073#ifdef CONFIG_EFI
1070/* 1074/*
@@ -1240,17 +1244,17 @@ struct efivar_entry {
1240 bool deleting; 1244 bool deleting;
1241}; 1245};
1242 1246
1243struct efi_simple_text_output_protocol_32 { 1247typedef struct {
1244 u32 reset; 1248 u32 reset;
1245 u32 output_string; 1249 u32 output_string;
1246 u32 test_string; 1250 u32 test_string;
1247}; 1251} efi_simple_text_output_protocol_32_t;
1248 1252
1249struct efi_simple_text_output_protocol_64 { 1253typedef struct {
1250 u64 reset; 1254 u64 reset;
1251 u64 output_string; 1255 u64 output_string;
1252 u64 test_string; 1256 u64 test_string;
1253}; 1257} efi_simple_text_output_protocol_64_t;
1254 1258
1255struct efi_simple_text_output_protocol { 1259struct efi_simple_text_output_protocol {
1256 void *reset; 1260 void *reset;
@@ -1476,6 +1480,14 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
1476bool efi_runtime_disabled(void); 1480bool efi_runtime_disabled(void);
1477extern void efi_call_virt_check_flags(unsigned long flags, const char *call); 1481extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
1478 1482
1483enum efi_secureboot_mode {
1484 efi_secureboot_mode_unset,
1485 efi_secureboot_mode_unknown,
1486 efi_secureboot_mode_disabled,
1487 efi_secureboot_mode_enabled,
1488};
1489enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
1490
1479/* 1491/*
1480 * Arch code can implement the following three template macros, avoiding 1492 * Arch code can implement the following three template macros, avoiding
1481 * reptition for the void/non-void return cases of {__,}efi_call_virt(): 1493 * reptition for the void/non-void return cases of {__,}efi_call_virt():
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 3f9778cbc79d..c332f0a45607 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -733,8 +733,12 @@ struct fsl_ifc_nand {
733 __be32 nand_erattr1; 733 __be32 nand_erattr1;
734 u32 res19[0x10]; 734 u32 res19[0x10];
735 __be32 nand_fsr; 735 __be32 nand_fsr;
736 u32 res20[0x3]; 736 u32 res20;
737 __be32 nand_eccstat[6]; 737 /* The V1 nand_eccstat is actually 4 words that overlaps the
738 * V2 nand_eccstat.
739 */
740 __be32 v1_nand_eccstat[2];
741 __be32 v2_nand_eccstat[6];
738 u32 res21[0x1c]; 742 u32 res21[0x1c];
739 __be32 nanndcr; 743 __be32 nanndcr;
740 u32 res22[0x2]; 744 u32 res22[0x2];
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index cdab81ba29f8..e52b427223ba 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -88,12 +88,6 @@ enum hrtimer_restart {
88 * @base: pointer to the timer base (per cpu and per clock) 88 * @base: pointer to the timer base (per cpu and per clock)
89 * @state: state information (See bit values above) 89 * @state: state information (See bit values above)
90 * @is_rel: Set if the timer was armed relative 90 * @is_rel: Set if the timer was armed relative
91 * @start_pid: timer statistics field to store the pid of the task which
92 * started the timer
93 * @start_site: timer statistics field to store the site where the timer
94 * was started
95 * @start_comm: timer statistics field to store the name of the process which
96 * started the timer
97 * 91 *
98 * The hrtimer structure must be initialized by hrtimer_init() 92 * The hrtimer structure must be initialized by hrtimer_init()
99 */ 93 */
@@ -104,11 +98,6 @@ struct hrtimer {
104 struct hrtimer_clock_base *base; 98 struct hrtimer_clock_base *base;
105 u8 state; 99 u8 state;
106 u8 is_rel; 100 u8 is_rel;
107#ifdef CONFIG_TIMER_STATS
108 int start_pid;
109 void *start_site;
110 char start_comm[16];
111#endif
112}; 101};
113 102
114/** 103/**
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 325f649d77ff..3a85d61f7614 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -42,6 +42,27 @@ extern struct fs_struct init_fs;
42#define INIT_PREV_CPUTIME(x) 42#define INIT_PREV_CPUTIME(x)
43#endif 43#endif
44 44
45#ifdef CONFIG_POSIX_TIMERS
46#define INIT_POSIX_TIMERS(s) \
47 .posix_timers = LIST_HEAD_INIT(s.posix_timers),
48#define INIT_CPU_TIMERS(s) \
49 .cpu_timers = { \
50 LIST_HEAD_INIT(s.cpu_timers[0]), \
51 LIST_HEAD_INIT(s.cpu_timers[1]), \
52 LIST_HEAD_INIT(s.cpu_timers[2]), \
53 },
54#define INIT_CPUTIMER(s) \
55 .cputimer = { \
56 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
57 .running = false, \
58 .checking_timer = false, \
59 },
60#else
61#define INIT_POSIX_TIMERS(s)
62#define INIT_CPU_TIMERS(s)
63#define INIT_CPUTIMER(s)
64#endif
65
45#define INIT_SIGNALS(sig) { \ 66#define INIT_SIGNALS(sig) { \
46 .nr_threads = 1, \ 67 .nr_threads = 1, \
47 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \ 68 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -49,14 +70,10 @@ extern struct fs_struct init_fs;
49 .shared_pending = { \ 70 .shared_pending = { \
50 .list = LIST_HEAD_INIT(sig.shared_pending.list), \ 71 .list = LIST_HEAD_INIT(sig.shared_pending.list), \
51 .signal = {{0}}}, \ 72 .signal = {{0}}}, \
52 .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ 73 INIT_POSIX_TIMERS(sig) \
53 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ 74 INIT_CPU_TIMERS(sig) \
54 .rlim = INIT_RLIMITS, \ 75 .rlim = INIT_RLIMITS, \
55 .cputimer = { \ 76 INIT_CPUTIMER(sig) \
56 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
57 .running = false, \
58 .checking_timer = false, \
59 }, \
60 INIT_PREV_CPUTIME(sig) \ 77 INIT_PREV_CPUTIME(sig) \
61 .cred_guard_mutex = \ 78 .cred_guard_mutex = \
62 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 79 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
@@ -247,7 +264,7 @@ extern struct task_group root_task_group;
247 .blocked = {{0}}, \ 264 .blocked = {{0}}, \
248 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 265 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
249 .journal_info = NULL, \ 266 .journal_info = NULL, \
250 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 267 INIT_CPU_TIMERS(tsk) \
251 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 268 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
252 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 269 .timer_slack_ns = 50000, /* 50 usec default slack */ \
253 .pids = { \ 270 .pids = { \
@@ -274,13 +291,6 @@ extern struct task_group root_task_group;
274} 291}
275 292
276 293
277#define INIT_CPU_TIMERS(cpu_timers) \
278{ \
279 LIST_HEAD_INIT(cpu_timers[0]), \
280 LIST_HEAD_INIT(cpu_timers[1]), \
281 LIST_HEAD_INIT(cpu_timers[2]), \
282}
283
284/* Attach to the init_task data structure for proper alignment */ 294/* Attach to the init_task data structure for proper alignment */
285#define __init_task_data __attribute__((__section__(".data..init_task"))) 295#define __init_task_data __attribute__((__section__(".data..init_task")))
286 296
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d49e26c6cdc7..c573a52ae440 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
29#include <linux/dma_remapping.h> 29#include <linux/dma_remapping.h>
30#include <linux/mmu_notifier.h> 30#include <linux/mmu_notifier.h>
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/iommu.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/iommu.h> 34#include <asm/iommu.h>
34 35
@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
153#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) 154#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
154#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) 155#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
155#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) 156#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
156#define DMA_TLB_IIRG(type) ((type >> 60) & 7) 157#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
157#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) 158#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
158#define DMA_TLB_READ_DRAIN (((u64)1) << 49) 159#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
159#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) 160#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
160#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) 161#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
164 165
165/* INVALID_DESC */ 166/* INVALID_DESC */
166#define DMA_CCMD_INVL_GRANU_OFFSET 61 167#define DMA_CCMD_INVL_GRANU_OFFSET 61
167#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) 168#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
168#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) 169#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
169#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) 170#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
170#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) 171#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
171#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) 172#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
172#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) 173#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
@@ -316,8 +317,8 @@ enum {
316#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) 317#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
317#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) 318#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
318#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) 319#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
319#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) 320#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
320#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) 321#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
321#define QI_DEV_EIOTLB_MAX_INVS 32 322#define QI_DEV_EIOTLB_MAX_INVS 32
322 323
323#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) 324#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@@ -439,7 +440,7 @@ struct intel_iommu {
439 struct irq_domain *ir_domain; 440 struct irq_domain *ir_domain;
440 struct irq_domain *ir_msi_domain; 441 struct irq_domain *ir_msi_domain;
441#endif 442#endif
442 struct device *iommu_dev; /* IOMMU-sysfs device */ 443 struct iommu_device iommu; /* IOMMU core code handle */
443 int node; 444 int node;
444 u32 flags; /* Software defined flags */ 445 u32 flags; /* Software defined flags */
445}; 446};
diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h
deleted file mode 100644
index 920109a29191..000000000000
--- a/include/linux/intel_pmic_gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef LINUX_INTEL_PMIC_H
2#define LINUX_INTEL_PMIC_H
3
4struct intel_pmic_gpio_platform_data {
5 /* the first IRQ of the chip */
6 unsigned irq_base;
7 /* number assigned to the first GPIO */
8 unsigned gpio_base;
9 /* sram address for gpiointr register, the langwell chip will map
10 * the PMIC spi GPIO expander's GPIOINTR register in sram.
11 */
12 unsigned gpiointr;
13};
14
15#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0ff5111f6959..6a6de187ddc0 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,6 +31,13 @@
31#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 31#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
32#define IOMMU_NOEXEC (1 << 3) 32#define IOMMU_NOEXEC (1 << 3)
33#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 33#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
34/*
35 * This is to make the IOMMU API setup privileged
36 * mapppings accessible by the master only at higher
37 * privileged execution level and inaccessible at
38 * less privileged levels.
39 */
40#define IOMMU_PRIV (1 << 5)
34 41
35struct iommu_ops; 42struct iommu_ops;
36struct iommu_group; 43struct iommu_group;
@@ -117,18 +124,25 @@ enum iommu_attr {
117 DOMAIN_ATTR_MAX, 124 DOMAIN_ATTR_MAX,
118}; 125};
119 126
127/* These are the possible reserved region types */
128#define IOMMU_RESV_DIRECT (1 << 0)
129#define IOMMU_RESV_RESERVED (1 << 1)
130#define IOMMU_RESV_MSI (1 << 2)
131
120/** 132/**
121 * struct iommu_dm_region - descriptor for a direct mapped memory region 133 * struct iommu_resv_region - descriptor for a reserved memory region
122 * @list: Linked list pointers 134 * @list: Linked list pointers
123 * @start: System physical start address of the region 135 * @start: System physical start address of the region
124 * @length: Length of the region in bytes 136 * @length: Length of the region in bytes
125 * @prot: IOMMU Protection flags (READ/WRITE/...) 137 * @prot: IOMMU Protection flags (READ/WRITE/...)
138 * @type: Type of the reserved region
126 */ 139 */
127struct iommu_dm_region { 140struct iommu_resv_region {
128 struct list_head list; 141 struct list_head list;
129 phys_addr_t start; 142 phys_addr_t start;
130 size_t length; 143 size_t length;
131 int prot; 144 int prot;
145 int type;
132}; 146};
133 147
134#ifdef CONFIG_IOMMU_API 148#ifdef CONFIG_IOMMU_API
@@ -150,9 +164,9 @@ struct iommu_dm_region {
150 * @device_group: find iommu group for a particular device 164 * @device_group: find iommu group for a particular device
151 * @domain_get_attr: Query domain attributes 165 * @domain_get_attr: Query domain attributes
152 * @domain_set_attr: Change domain attributes 166 * @domain_set_attr: Change domain attributes
153 * @get_dm_regions: Request list of direct mapping requirements for a device 167 * @get_resv_regions: Request list of reserved regions for a device
154 * @put_dm_regions: Free list of direct mapping requirements for a device 168 * @put_resv_regions: Free list of reserved regions for a device
155 * @apply_dm_region: Temporary helper call-back for iova reserved ranges 169 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
156 * @domain_window_enable: Configure and enable a particular window for a domain 170 * @domain_window_enable: Configure and enable a particular window for a domain
157 * @domain_window_disable: Disable a particular window for a domain 171 * @domain_window_disable: Disable a particular window for a domain
158 * @domain_set_windows: Set the number of windows for a domain 172 * @domain_set_windows: Set the number of windows for a domain
@@ -184,11 +198,12 @@ struct iommu_ops {
184 int (*domain_set_attr)(struct iommu_domain *domain, 198 int (*domain_set_attr)(struct iommu_domain *domain,
185 enum iommu_attr attr, void *data); 199 enum iommu_attr attr, void *data);
186 200
187 /* Request/Free a list of direct mapping requirements for a device */ 201 /* Request/Free a list of reserved regions for a device */
188 void (*get_dm_regions)(struct device *dev, struct list_head *list); 202 void (*get_resv_regions)(struct device *dev, struct list_head *list);
189 void (*put_dm_regions)(struct device *dev, struct list_head *list); 203 void (*put_resv_regions)(struct device *dev, struct list_head *list);
190 void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, 204 void (*apply_resv_region)(struct device *dev,
191 struct iommu_dm_region *region); 205 struct iommu_domain *domain,
206 struct iommu_resv_region *region);
192 207
193 /* Window handling functions */ 208 /* Window handling functions */
194 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 209 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
@@ -204,6 +219,42 @@ struct iommu_ops {
204 unsigned long pgsize_bitmap; 219 unsigned long pgsize_bitmap;
205}; 220};
206 221
222/**
223 * struct iommu_device - IOMMU core representation of one IOMMU hardware
224 * instance
225 * @list: Used by the iommu-core to keep a list of registered iommus
226 * @ops: iommu-ops for talking to this iommu
227 * @dev: struct device for sysfs handling
228 */
229struct iommu_device {
230 struct list_head list;
231 const struct iommu_ops *ops;
232 struct fwnode_handle *fwnode;
233 struct device dev;
234};
235
236int iommu_device_register(struct iommu_device *iommu);
237void iommu_device_unregister(struct iommu_device *iommu);
238int iommu_device_sysfs_add(struct iommu_device *iommu,
239 struct device *parent,
240 const struct attribute_group **groups,
241 const char *fmt, ...) __printf(4, 5);
242void iommu_device_sysfs_remove(struct iommu_device *iommu);
243int iommu_device_link(struct iommu_device *iommu, struct device *link);
244void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
245
246static inline void iommu_device_set_ops(struct iommu_device *iommu,
247 const struct iommu_ops *ops)
248{
249 iommu->ops = ops;
250}
251
252static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
253 struct fwnode_handle *fwnode)
254{
255 iommu->fwnode = fwnode;
256}
257
207#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 258#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
208#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 259#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
209#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 260#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
233extern void iommu_set_fault_handler(struct iommu_domain *domain, 284extern void iommu_set_fault_handler(struct iommu_domain *domain,
234 iommu_fault_handler_t handler, void *token); 285 iommu_fault_handler_t handler, void *token);
235 286
236extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); 287extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
237extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); 288extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
238extern int iommu_request_dm_for_dev(struct device *dev); 289extern int iommu_request_dm_for_dev(struct device *dev);
290extern struct iommu_resv_region *
291iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
292extern int iommu_get_group_resv_regions(struct iommu_group *group,
293 struct list_head *head);
239 294
240extern int iommu_attach_group(struct iommu_domain *domain, 295extern int iommu_attach_group(struct iommu_domain *domain,
241 struct iommu_group *group); 296 struct iommu_group *group);
@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
267 void *data); 322 void *data);
268extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, 323extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
269 void *data); 324 void *data);
270struct device *iommu_device_create(struct device *parent, void *drvdata,
271 const struct attribute_group **groups,
272 const char *fmt, ...) __printf(4, 5);
273void iommu_device_destroy(struct device *dev);
274int iommu_device_link(struct device *dev, struct device *link);
275void iommu_device_unlink(struct device *dev, struct device *link);
276 325
277/* Window handling function prototypes */ 326/* Window handling function prototypes */
278extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 327extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
352 const struct iommu_ops *ops); 401 const struct iommu_ops *ops);
353void iommu_fwspec_free(struct device *dev); 402void iommu_fwspec_free(struct device *dev);
354int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 403int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
355void iommu_register_instance(struct fwnode_handle *fwnode, 404const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
356 const struct iommu_ops *ops);
357const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
358 405
359#else /* CONFIG_IOMMU_API */ 406#else /* CONFIG_IOMMU_API */
360 407
361struct iommu_ops {}; 408struct iommu_ops {};
362struct iommu_group {}; 409struct iommu_group {};
363struct iommu_fwspec {}; 410struct iommu_fwspec {};
411struct iommu_device {};
364 412
365static inline bool iommu_present(struct bus_type *bus) 413static inline bool iommu_present(struct bus_type *bus)
366{ 414{
@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
443{ 491{
444} 492}
445 493
446static inline void iommu_get_dm_regions(struct device *dev, 494static inline void iommu_get_resv_regions(struct device *dev,
447 struct list_head *list) 495 struct list_head *list)
448{ 496{
449} 497}
450 498
451static inline void iommu_put_dm_regions(struct device *dev, 499static inline void iommu_put_resv_regions(struct device *dev,
452 struct list_head *list) 500 struct list_head *list)
453{ 501{
454} 502}
455 503
504static inline int iommu_get_group_resv_regions(struct iommu_group *group,
505 struct list_head *head)
506{
507 return -ENODEV;
508}
509
456static inline int iommu_request_dm_for_dev(struct device *dev) 510static inline int iommu_request_dm_for_dev(struct device *dev)
457{ 511{
458 return -ENODEV; 512 return -ENODEV;
@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
546 return -EINVAL; 600 return -EINVAL;
547} 601}
548 602
549static inline struct device *iommu_device_create(struct device *parent, 603static inline int iommu_device_register(struct iommu_device *iommu)
550 void *drvdata, 604{
551 const struct attribute_group **groups, 605 return -ENODEV;
552 const char *fmt, ...) 606}
607
608static inline void iommu_device_set_ops(struct iommu_device *iommu,
609 const struct iommu_ops *ops)
610{
611}
612
613static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
614 struct fwnode_handle *fwnode)
615{
616}
617
618static inline void iommu_device_unregister(struct iommu_device *iommu)
553{ 619{
554 return ERR_PTR(-ENODEV);
555} 620}
556 621
557static inline void iommu_device_destroy(struct device *dev) 622static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
623 struct device *parent,
624 const struct attribute_group **groups,
625 const char *fmt, ...)
626{
627 return -ENODEV;
628}
629
630static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
558{ 631{
559} 632}
560 633
@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
584 return -ENODEV; 657 return -ENODEV;
585} 658}
586 659
587static inline void iommu_register_instance(struct fwnode_handle *fwnode,
588 const struct iommu_ops *ops)
589{
590}
591
592static inline 660static inline
593const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) 661const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
594{ 662{
595 return NULL; 663 return NULL;
596} 664}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 39e3254e5769..f887351aa80e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -732,6 +732,10 @@ unsigned int arch_dynirq_lower_bound(unsigned int from);
732int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 732int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
733 struct module *owner, const struct cpumask *affinity); 733 struct module *owner, const struct cpumask *affinity);
734 734
735int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
736 unsigned int cnt, int node, struct module *owner,
737 const struct cpumask *affinity);
738
735/* use macros to avoid needing export.h for THIS_MODULE */ 739/* use macros to avoid needing export.h for THIS_MODULE */
736#define irq_alloc_descs(irq, from, cnt, node) \ 740#define irq_alloc_descs(irq, from, cnt, node) \
737 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) 741 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
@@ -748,6 +752,21 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
748#define irq_alloc_descs_from(from, cnt, node) \ 752#define irq_alloc_descs_from(from, cnt, node) \
749 irq_alloc_descs(-1, from, cnt, node) 753 irq_alloc_descs(-1, from, cnt, node)
750 754
755#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
756 __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
757
758#define devm_irq_alloc_desc(dev, node) \
759 devm_irq_alloc_descs(dev, -1, 0, 1, node)
760
761#define devm_irq_alloc_desc_at(dev, at, node) \
762 devm_irq_alloc_descs(dev, at, at, 1, node)
763
764#define devm_irq_alloc_desc_from(dev, from, node) \
765 devm_irq_alloc_descs(dev, -1, from, 1, node)
766
767#define devm_irq_alloc_descs_from(dev, from, cnt, node) \
768 devm_irq_alloc_descs(dev, -1, from, cnt, node)
769
751void irq_free_descs(unsigned int irq, unsigned int cnt); 770void irq_free_descs(unsigned int irq, unsigned int cnt);
752static inline void irq_free_desc(unsigned int irq) 771static inline void irq_free_desc(unsigned int irq)
753{ 772{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index e808f8ae6f14..725e86b506f3 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,7 +73,6 @@
73 73
74#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) 74#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
75#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) 75#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
76#define GICD_TYPER_LPIS (1U << 17)
77 76
78#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) 77#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
79#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) 78#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -306,7 +305,7 @@
306#define GITS_BASER_TYPE_NONE 0 305#define GITS_BASER_TYPE_NONE 0
307#define GITS_BASER_TYPE_DEVICE 1 306#define GITS_BASER_TYPE_DEVICE 1
308#define GITS_BASER_TYPE_VCPU 2 307#define GITS_BASER_TYPE_VCPU 2
309#define GITS_BASER_TYPE_CPU 3 308#define GITS_BASER_TYPE_RESERVED3 3
310#define GITS_BASER_TYPE_COLLECTION 4 309#define GITS_BASER_TYPE_COLLECTION 4
311#define GITS_BASER_TYPE_RESERVED5 5 310#define GITS_BASER_TYPE_RESERVED5 5
312#define GITS_BASER_TYPE_RESERVED6 6 311#define GITS_BASER_TYPE_RESERVED6 6
@@ -320,8 +319,6 @@
320#define GITS_CMD_MAPD 0x08 319#define GITS_CMD_MAPD 0x08
321#define GITS_CMD_MAPC 0x09 320#define GITS_CMD_MAPC 0x09
322#define GITS_CMD_MAPTI 0x0a 321#define GITS_CMD_MAPTI 0x0a
323/* older GIC documentation used MAPVI for this command */
324#define GITS_CMD_MAPVI GITS_CMD_MAPTI
325#define GITS_CMD_MAPI 0x0b 322#define GITS_CMD_MAPI 0x0b
326#define GITS_CMD_MOVI 0x01 323#define GITS_CMD_MOVI 0x01
327#define GITS_CMD_DISCARD 0x0f 324#define GITS_CMD_DISCARD 0x0f
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ffb84604c1de..188eced6813e 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -183,6 +183,12 @@ enum {
183 /* Irq domain is an IPI domain with single virq */ 183 /* Irq domain is an IPI domain with single virq */
184 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), 184 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
185 185
186 /* Irq domain implements MSIs */
187 IRQ_DOMAIN_FLAG_MSI = (1 << 4),
188
189 /* Irq domain implements MSI remapping */
190 IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
191
186 /* 192 /*
187 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved 193 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
188 * for implementation specific purposes and ignored by the 194 * for implementation specific purposes and ignored by the
@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
216 void *host_data); 222 void *host_data);
217extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, 223extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
218 enum irq_domain_bus_token bus_token); 224 enum irq_domain_bus_token bus_token);
225extern bool irq_domain_check_msi_remap(void);
219extern void irq_set_default_host(struct irq_domain *host); 226extern void irq_set_default_host(struct irq_domain *host);
220extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 227extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
221 irq_hw_number_t hwirq, int node, 228 irq_hw_number_t hwirq, int node,
@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
446{ 453{
447 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; 454 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
448} 455}
456
457static inline bool irq_domain_is_msi(struct irq_domain *domain)
458{
459 return domain->flags & IRQ_DOMAIN_FLAG_MSI;
460}
461
462static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
463{
464 return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
465}
466
467extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
468
449#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 469#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
450static inline void irq_domain_activate_irq(struct irq_data *data) { } 470static inline void irq_domain_activate_irq(struct irq_data *data) { }
451static inline void irq_domain_deactivate_irq(struct irq_data *data) { } 471static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
477{ 497{
478 return false; 498 return false;
479} 499}
500
501static inline bool irq_domain_is_msi(struct irq_domain *domain)
502{
503 return false;
504}
505
506static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
507{
508 return false;
509}
510
511static inline bool
512irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
513{
514 return false;
515}
480#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 516#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
481 517
482#else /* CONFIG_IRQ_DOMAIN */ 518#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 589d14e970ad..624215cebee5 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -293,6 +293,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
293 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; 293 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
294} 294}
295 295
296extern u64 jiffies64_to_nsecs(u64 j);
297
296extern unsigned long __msecs_to_jiffies(const unsigned int m); 298extern unsigned long __msecs_to_jiffies(const unsigned int m);
297#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 299#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
298/* 300/*
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a0547c571800..b63d6b7b0db0 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -402,6 +402,6 @@ extern bool ____wrong_branch_error(void);
402#define static_branch_enable(x) static_key_enable(&(x)->key) 402#define static_branch_enable(x) static_key_enable(&(x)->key)
403#define static_branch_disable(x) static_key_disable(&(x)->key) 403#define static_branch_disable(x) static_key_disable(&(x)->key)
404 404
405#endif /* _LINUX_JUMP_LABEL_H */
406
407#endif /* __ASSEMBLY__ */ 405#endif /* __ASSEMBLY__ */
406
407#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 00f776816aa3..66be8b6beceb 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -9,7 +9,6 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/vtime.h> 10#include <linux/vtime.h>
11#include <asm/irq.h> 11#include <asm/irq.h>
12#include <linux/cputime.h>
13 12
14/* 13/*
15 * 'kernel_stat.h' contains the definitions needed for doing 14 * 'kernel_stat.h' contains the definitions needed for doing
@@ -78,15 +77,18 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
78 return kstat_cpu(cpu).irqs_sum; 77 return kstat_cpu(cpu).irqs_sum;
79} 78}
80 79
81extern void account_user_time(struct task_struct *, cputime_t); 80extern void account_user_time(struct task_struct *, u64);
82extern void account_system_time(struct task_struct *, int, cputime_t); 81extern void account_guest_time(struct task_struct *, u64);
83extern void account_steal_time(cputime_t); 82extern void account_system_time(struct task_struct *, int, u64);
84extern void account_idle_time(cputime_t); 83extern void account_system_index_time(struct task_struct *, u64,
84 enum cpu_usage_stat);
85extern void account_steal_time(u64);
86extern void account_idle_time(u64);
85 87
86#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 88#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
87static inline void account_process_tick(struct task_struct *tsk, int user) 89static inline void account_process_tick(struct task_struct *tsk, int user)
88{ 90{
89 vtime_account_user(tsk); 91 vtime_flush(tsk);
90} 92}
91#else 93#else
92extern void account_process_tick(struct task_struct *, int user); 94extern void account_process_tick(struct task_struct *, int user);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8f6849084248..16ddfb8b304a 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -278,9 +278,13 @@ struct kprobe_insn_cache {
278 int nr_garbage; 278 int nr_garbage;
279}; 279};
280 280
281#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
281extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c); 282extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
282extern void __free_insn_slot(struct kprobe_insn_cache *c, 283extern void __free_insn_slot(struct kprobe_insn_cache *c,
283 kprobe_opcode_t *slot, int dirty); 284 kprobe_opcode_t *slot, int dirty);
285/* sleep-less address checking routine */
286extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
287 unsigned long addr);
284 288
285#define DEFINE_INSN_CACHE_OPS(__name) \ 289#define DEFINE_INSN_CACHE_OPS(__name) \
286extern struct kprobe_insn_cache kprobe_##__name##_slots; \ 290extern struct kprobe_insn_cache kprobe_##__name##_slots; \
@@ -294,6 +298,18 @@ static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
294{ \ 298{ \
295 __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \ 299 __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
296} \ 300} \
301 \
302static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
303{ \
304 return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
305}
306#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
307#define DEFINE_INSN_CACHE_OPS(__name) \
308static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
309{ \
310 return 0; \
311}
312#endif
297 313
298DEFINE_INSN_CACHE_OPS(insn); 314DEFINE_INSN_CACHE_OPS(insn);
299 315
@@ -330,7 +346,6 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
330 int write, void __user *buffer, 346 int write, void __user *buffer,
331 size_t *length, loff_t *ppos); 347 size_t *length, loff_t *ppos);
332#endif 348#endif
333
334#endif /* CONFIG_OPTPROBES */ 349#endif /* CONFIG_OPTPROBES */
335#ifdef CONFIG_KPROBES_ON_FTRACE 350#ifdef CONFIG_KPROBES_ON_FTRACE
336extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 351extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
@@ -481,6 +496,19 @@ static inline int enable_jprobe(struct jprobe *jp)
481 return enable_kprobe(&jp->kp); 496 return enable_kprobe(&jp->kp);
482} 497}
483 498
499#ifndef CONFIG_KPROBES
500static inline bool is_kprobe_insn_slot(unsigned long addr)
501{
502 return false;
503}
504#endif
505#ifndef CONFIG_OPTPROBES
506static inline bool is_kprobe_optinsn_slot(unsigned long addr)
507{
508 return false;
509}
510#endif
511
484#ifdef CONFIG_KPROBES 512#ifdef CONFIG_KPROBES
485/* 513/*
486 * Blacklist ganerating macro. Specify functions which is not probed 514 * Blacklist ganerating macro. Specify functions which is not probed
diff --git a/include/linux/kref.h b/include/linux/kref.h
index e15828fd71f1..f4156f88f557 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -15,22 +15,27 @@
15#ifndef _KREF_H_ 15#ifndef _KREF_H_
16#define _KREF_H_ 16#define _KREF_H_
17 17
18#include <linux/bug.h> 18#include <linux/spinlock.h>
19#include <linux/atomic.h> 19#include <linux/refcount.h>
20#include <linux/kernel.h>
21#include <linux/mutex.h>
22 20
23struct kref { 21struct kref {
24 atomic_t refcount; 22 refcount_t refcount;
25}; 23};
26 24
25#define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
26
27/** 27/**
28 * kref_init - initialize object. 28 * kref_init - initialize object.
29 * @kref: object in question. 29 * @kref: object in question.
30 */ 30 */
31static inline void kref_init(struct kref *kref) 31static inline void kref_init(struct kref *kref)
32{ 32{
33 atomic_set(&kref->refcount, 1); 33 refcount_set(&kref->refcount, 1);
34}
35
36static inline unsigned int kref_read(const struct kref *kref)
37{
38 return refcount_read(&kref->refcount);
34} 39}
35 40
36/** 41/**
@@ -39,17 +44,12 @@ static inline void kref_init(struct kref *kref)
39 */ 44 */
40static inline void kref_get(struct kref *kref) 45static inline void kref_get(struct kref *kref)
41{ 46{
42 /* If refcount was 0 before incrementing then we have a race 47 refcount_inc(&kref->refcount);
43 * condition when this kref is freeing by some other thread right now.
44 * In this case one should use kref_get_unless_zero()
45 */
46 WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
47} 48}
48 49
49/** 50/**
50 * kref_sub - subtract a number of refcounts for object. 51 * kref_put - decrement refcount for object.
51 * @kref: object. 52 * @kref: object.
52 * @count: Number of recounts to subtract.
53 * @release: pointer to the function that will clean up the object when the 53 * @release: pointer to the function that will clean up the object when the
54 * last reference to the object is released. 54 * last reference to the object is released.
55 * This pointer is required, and it is not acceptable to pass kfree 55 * This pointer is required, and it is not acceptable to pass kfree
@@ -58,57 +58,43 @@ static inline void kref_get(struct kref *kref)
58 * maintainer, and anyone else who happens to notice it. You have 58 * maintainer, and anyone else who happens to notice it. You have
59 * been warned. 59 * been warned.
60 * 60 *
61 * Subtract @count from the refcount, and if 0, call release(). 61 * Decrement the refcount, and if 0, call release().
62 * Return 1 if the object was removed, otherwise return 0. Beware, if this 62 * Return 1 if the object was removed, otherwise return 0. Beware, if this
63 * function returns 0, you still can not count on the kref from remaining in 63 * function returns 0, you still can not count on the kref from remaining in
64 * memory. Only use the return value if you want to see if the kref is now 64 * memory. Only use the return value if you want to see if the kref is now
65 * gone, not present. 65 * gone, not present.
66 */ 66 */
67static inline int kref_sub(struct kref *kref, unsigned int count, 67static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
68 void (*release)(struct kref *kref))
69{ 68{
70 WARN_ON(release == NULL); 69 WARN_ON(release == NULL);
71 70
72 if (atomic_sub_and_test((int) count, &kref->refcount)) { 71 if (refcount_dec_and_test(&kref->refcount)) {
73 release(kref); 72 release(kref);
74 return 1; 73 return 1;
75 } 74 }
76 return 0; 75 return 0;
77} 76}
78 77
79/**
80 * kref_put - decrement refcount for object.
81 * @kref: object.
82 * @release: pointer to the function that will clean up the object when the
83 * last reference to the object is released.
84 * This pointer is required, and it is not acceptable to pass kfree
85 * in as this function. If the caller does pass kfree to this
86 * function, you will be publicly mocked mercilessly by the kref
87 * maintainer, and anyone else who happens to notice it. You have
88 * been warned.
89 *
90 * Decrement the refcount, and if 0, call release().
91 * Return 1 if the object was removed, otherwise return 0. Beware, if this
92 * function returns 0, you still can not count on the kref from remaining in
93 * memory. Only use the return value if you want to see if the kref is now
94 * gone, not present.
95 */
96static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
97{
98 return kref_sub(kref, 1, release);
99}
100
101static inline int kref_put_mutex(struct kref *kref, 78static inline int kref_put_mutex(struct kref *kref,
102 void (*release)(struct kref *kref), 79 void (*release)(struct kref *kref),
103 struct mutex *lock) 80 struct mutex *lock)
104{ 81{
105 WARN_ON(release == NULL); 82 WARN_ON(release == NULL);
106 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { 83
107 mutex_lock(lock); 84 if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
108 if (unlikely(!atomic_dec_and_test(&kref->refcount))) { 85 release(kref);
109 mutex_unlock(lock); 86 return 1;
110 return 0; 87 }
111 } 88 return 0;
89}
90
91static inline int kref_put_lock(struct kref *kref,
92 void (*release)(struct kref *kref),
93 spinlock_t *lock)
94{
95 WARN_ON(release == NULL);
96
97 if (refcount_dec_and_lock(&kref->refcount, lock)) {
112 release(kref); 98 release(kref);
113 return 1; 99 return 1;
114 } 100 }
@@ -133,6 +119,6 @@ static inline int kref_put_mutex(struct kref *kref,
133 */ 119 */
134static inline int __must_check kref_get_unless_zero(struct kref *kref) 120static inline int __must_check kref_get_unless_zero(struct kref *kref)
135{ 121{
136 return atomic_add_unless(&kref->refcount, 1, 0); 122 return refcount_inc_not_zero(&kref->refcount);
137} 123}
138#endif /* _KREF_H_ */ 124#endif /* _KREF_H_ */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 569cb531094c..38c0bd7ca107 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,6 +13,7 @@
13#define __LINUX_LEDS_H_INCLUDED 13#define __LINUX_LEDS_H_INCLUDED
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/kernfs.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/mutex.h> 18#include <linux/mutex.h>
18#include <linux/rwsem.h> 19#include <linux/rwsem.h>
@@ -27,6 +28,7 @@ struct device;
27 28
28enum led_brightness { 29enum led_brightness {
29 LED_OFF = 0, 30 LED_OFF = 0,
31 LED_ON = 1,
30 LED_HALF = 127, 32 LED_HALF = 127,
31 LED_FULL = 255, 33 LED_FULL = 255,
32}; 34};
@@ -46,6 +48,7 @@ struct led_classdev {
46#define LED_DEV_CAP_FLASH (1 << 18) 48#define LED_DEV_CAP_FLASH (1 << 18)
47#define LED_HW_PLUGGABLE (1 << 19) 49#define LED_HW_PLUGGABLE (1 << 19)
48#define LED_PANIC_INDICATOR (1 << 20) 50#define LED_PANIC_INDICATOR (1 << 20)
51#define LED_BRIGHT_HW_CHANGED (1 << 21)
49 52
50 /* set_brightness_work / blink_timer flags, atomic, private. */ 53 /* set_brightness_work / blink_timer flags, atomic, private. */
51 unsigned long work_flags; 54 unsigned long work_flags;
@@ -110,6 +113,11 @@ struct led_classdev {
110 bool activated; 113 bool activated;
111#endif 114#endif
112 115
116#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
117 int brightness_hw_changed;
118 struct kernfs_node *brightness_hw_changed_kn;
119#endif
120
113 /* Ensures consistent access to the LED Flash Class device */ 121 /* Ensures consistent access to the LED Flash Class device */
114 struct mutex led_access; 122 struct mutex led_access;
115}; 123};
@@ -422,4 +430,12 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
422} 430}
423#endif 431#endif
424 432
433#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
434extern void led_classdev_notify_brightness_hw_changed(
435 struct led_classdev *led_cdev, enum led_brightness brightness);
436#else
437static inline void led_classdev_notify_brightness_hw_changed(
438 struct led_classdev *led_cdev, enum led_brightness brightness) { }
439#endif
440
425#endif /* __LINUX_LEDS_H_INCLUDED */ 441#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fd4ca0b4fe0f..171baa90f6f6 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -3,28 +3,33 @@
3/* 3/*
4 * Lock-less NULL terminated single linked list 4 * Lock-less NULL terminated single linked list
5 * 5 *
6 * If there are multiple producers and multiple consumers, llist_add 6 * Cases where locking is not needed:
7 * can be used in producers and llist_del_all can be used in 7 * If there are multiple producers and multiple consumers, llist_add can be
8 * consumers. They can work simultaneously without lock. But 8 * used in producers and llist_del_all can be used in consumers simultaneously
9 * llist_del_first can not be used here. Because llist_del_first 9 * without locking. Also a single consumer can use llist_del_first while
10 * depends on list->first->next does not changed if list->first is not 10 * multiple producers simultaneously use llist_add, without any locking.
11 * changed during its operation, but llist_del_first, llist_add, 11 *
12 * llist_add (or llist_del_all, llist_add, llist_add) sequence in 12 * Cases where locking is needed:
13 * another consumer may violate that. 13 * If we have multiple consumers with llist_del_first used in one consumer, and
14 * 14 * llist_del_first or llist_del_all used in other consumers, then a lock is
15 * If there are multiple producers and one consumer, llist_add can be 15 * needed. This is because llist_del_first depends on list->first->next not
16 * used in producers and llist_del_all or llist_del_first can be used 16 * changing, but without lock protection, there's no way to be sure about that
17 * in the consumer. 17 * if a preemption happens in the middle of the delete operation and on being
18 * 18 * preempted back, the list->first is the same as before causing the cmpxchg in
19 * This can be summarized as follow: 19 * llist_del_first to succeed. For example, while a llist_del_first operation
20 * is in progress in one consumer, then a llist_del_first, llist_add,
21 * llist_add (or llist_del_all, llist_add, llist_add) sequence in another
22 * consumer may cause violations.
23 *
24 * This can be summarized as follows:
20 * 25 *
21 * | add | del_first | del_all 26 * | add | del_first | del_all
22 * add | - | - | - 27 * add | - | - | -
23 * del_first | | L | L 28 * del_first | | L | L
24 * del_all | | | - 29 * del_all | | | -
25 * 30 *
26 * Where "-" stands for no lock is needed, while "L" stands for lock 31 * Where, a particular row's operation can happen concurrently with a column's
27 * is needed. 32 * operation, with "-" being no lock needed, while "L" being lock is needed.
28 * 33 *
29 * The list entries deleted via llist_del_all can be traversed with 34 * The list entries deleted via llist_del_all can be traversed with
30 * traversing function such as llist_for_each etc. But the list 35 * traversing function such as llist_for_each etc. But the list
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6e8b5b270ffe..80690c96c734 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#ifndef mul_u32_u32
137/*
138 * Many a GCC version messes this up and generates a 64x64 mult :-(
139 */
140static inline u64 mul_u32_u32(u32 a, u32 b)
141{
142 return (u64)a * b;
143}
144#endif
145
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) 146#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137 147
138#ifndef mul_u64_u32_shr 148#ifndef mul_u64_u32_shr
@@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
160 al = a; 170 al = a;
161 ah = a >> 32; 171 ah = a >> 32;
162 172
163 ret = ((u64)al * mul) >> shift; 173 ret = mul_u32_u32(al, mul) >> shift;
164 if (ah) 174 if (ah)
165 ret += ((u64)ah * mul) << (32 - shift); 175 ret += mul_u32_u32(ah, mul) << (32 - shift);
166 176
167 return ret; 177 return ret;
168} 178}
@@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
186 a0.ll = a; 196 a0.ll = a;
187 b0.ll = b; 197 b0.ll = b;
188 198
189 rl.ll = (u64)a0.l.low * b0.l.low; 199 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
190 rm.ll = (u64)a0.l.low * b0.l.high; 200 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
191 rn.ll = (u64)a0.l.high * b0.l.low; 201 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
192 rh.ll = (u64)a0.l.high * b0.l.high; 202 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
193 203
194 /* 204 /*
195 * Each of these lines computes a 64-bit intermediate result into "c", 205 * Each of these lines computes a 64-bit intermediate result into "c",
@@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
229 } u, rl, rh; 239 } u, rl, rh;
230 240
231 u.ll = a; 241 u.ll = a;
232 rl.ll = (u64)u.l.low * mul; 242 rl.ll = mul_u32_u32(u.l.low, mul);
233 rh.ll = (u64)u.l.high * mul + rl.l.high; 243 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
234 244
235 /* Bits 32-63 of the result will be in rh.l.low. */ 245 /* Bits 32-63 of the result will be in rh.l.low. */
236 rl.l.high = do_div(rh.ll, divisor); 246 rl.l.high = do_div(rh.ll, divisor);
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index a4860bc9b73d..f848ee86a339 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -13,7 +13,7 @@
13 13
14#include <linux/regmap.h> 14#include <linux/regmap.h>
15 15
16enum { 16enum axp20x_variants {
17 AXP152_ID = 0, 17 AXP152_ID = 0,
18 AXP202_ID, 18 AXP202_ID,
19 AXP209_ID, 19 AXP209_ID,
@@ -532,35 +532,6 @@ struct axp20x_dev {
532 const struct regmap_irq_chip *regmap_irq_chip; 532 const struct regmap_irq_chip *regmap_irq_chip;
533}; 533};
534 534
535#define BATTID_LEN 64
536#define OCV_CURVE_SIZE 32
537#define MAX_THERM_CURVE_SIZE 25
538#define PD_DEF_MIN_TEMP 0
539#define PD_DEF_MAX_TEMP 55
540
541struct axp20x_fg_pdata {
542 char battid[BATTID_LEN + 1];
543 int design_cap;
544 int min_volt;
545 int max_volt;
546 int max_temp;
547 int min_temp;
548 int cap1;
549 int cap0;
550 int rdc1;
551 int rdc0;
552 int ocv_curve[OCV_CURVE_SIZE];
553 int tcsz;
554 int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
555};
556
557struct axp20x_chrg_pdata {
558 int max_cc;
559 int max_cv;
560 int def_cc;
561 int def_cv;
562};
563
564struct axp288_extcon_pdata { 535struct axp288_extcon_pdata {
565 /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */ 536 /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
566 struct gpio_desc *gpio_mux_cntl; 537 struct gpio_desc *gpio_mux_cntl;
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 2b300b44f994..fba8fcb54f8c 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,6 +20,8 @@
20#ifndef LPC_ICH_H 20#ifndef LPC_ICH_H
21#define LPC_ICH_H 21#define LPC_ICH_H
22 22
23#include <linux/platform_data/intel-spi.h>
24
23/* GPIO resources */ 25/* GPIO resources */
24#define ICH_RES_GPIO 0 26#define ICH_RES_GPIO 0
25#define ICH_RES_GPE0 1 27#define ICH_RES_GPE0 1
@@ -40,6 +42,7 @@ struct lpc_ich_info {
40 char name[32]; 42 char name[32];
41 unsigned int iTCO_version; 43 unsigned int iTCO_version;
42 unsigned int gpio_version; 44 unsigned int gpio_version;
45 enum intel_spi_type spi_type;
43 u8 use_gpio; 46 u8 use_gpio;
44}; 47};
45 48
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 0db320b7bb15..a83b84ff70e5 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -17,7 +17,13 @@ struct msi_desc;
17struct pci_dev; 17struct pci_dev;
18struct platform_msi_priv_data; 18struct platform_msi_priv_data;
19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
20#ifdef CONFIG_GENERIC_MSI_IRQ
20void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 21void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
22#else
23static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
24{
25}
26#endif
21 27
22typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, 28typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
23 struct msi_msg *msg); 29 struct msi_msg *msg);
@@ -116,11 +122,15 @@ struct msi_desc {
116 122
117struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); 123struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
118void *msi_desc_to_pci_sysdata(struct msi_desc *desc); 124void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
125void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
119#else /* CONFIG_PCI_MSI */ 126#else /* CONFIG_PCI_MSI */
120static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) 127static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
121{ 128{
122 return NULL; 129 return NULL;
123} 130}
131static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
132{
133}
124#endif /* CONFIG_PCI_MSI */ 134#endif /* CONFIG_PCI_MSI */
125 135
126struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, 136struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
@@ -128,7 +138,6 @@ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
128void free_msi_entry(struct msi_desc *entry); 138void free_msi_entry(struct msi_desc *entry);
129void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 139void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
130void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 140void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
131void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
132 141
133u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); 142u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
134u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); 143u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
deleted file mode 100644
index ad3c3488073c..000000000000
--- a/include/linux/mtd/fsmc.h
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * incude/mtd/fsmc.h
3 *
4 * ST Microelectronics
5 * Flexible Static Memory Controller (FSMC)
6 * platform data interface and header file
7 *
8 * Copyright © 2010 ST Microelectronics
9 * Vipin Kumar <vipin.kumar@st.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#ifndef __MTD_FSMC_H
17#define __MTD_FSMC_H
18
19#include <linux/io.h>
20#include <linux/platform_device.h>
21#include <linux/mtd/physmap.h>
22#include <linux/types.h>
23#include <linux/mtd/partitions.h>
24#include <asm/param.h>
25
26#define FSMC_NAND_BW8 1
27#define FSMC_NAND_BW16 2
28
29#define FSMC_MAX_NOR_BANKS 4
30#define FSMC_MAX_NAND_BANKS 4
31
32#define FSMC_FLASH_WIDTH8 1
33#define FSMC_FLASH_WIDTH16 2
34
35/* fsmc controller registers for NOR flash */
36#define CTRL 0x0
37 /* ctrl register definitions */
38 #define BANK_ENABLE (1 << 0)
39 #define MUXED (1 << 1)
40 #define NOR_DEV (2 << 2)
41 #define WIDTH_8 (0 << 4)
42 #define WIDTH_16 (1 << 4)
43 #define RSTPWRDWN (1 << 6)
44 #define WPROT (1 << 7)
45 #define WRT_ENABLE (1 << 12)
46 #define WAIT_ENB (1 << 13)
47
48#define CTRL_TIM 0x4
49 /* ctrl_tim register definitions */
50
51#define FSMC_NOR_BANK_SZ 0x8
52#define FSMC_NOR_REG_SIZE 0x40
53
54#define FSMC_NOR_REG(base, bank, reg) (base + \
55 FSMC_NOR_BANK_SZ * (bank) + \
56 reg)
57
58/* fsmc controller registers for NAND flash */
59#define PC 0x00
60 /* pc register definitions */
61 #define FSMC_RESET (1 << 0)
62 #define FSMC_WAITON (1 << 1)
63 #define FSMC_ENABLE (1 << 2)
64 #define FSMC_DEVTYPE_NAND (1 << 3)
65 #define FSMC_DEVWID_8 (0 << 4)
66 #define FSMC_DEVWID_16 (1 << 4)
67 #define FSMC_ECCEN (1 << 6)
68 #define FSMC_ECCPLEN_512 (0 << 7)
69 #define FSMC_ECCPLEN_256 (1 << 7)
70 #define FSMC_TCLR_1 (1)
71 #define FSMC_TCLR_SHIFT (9)
72 #define FSMC_TCLR_MASK (0xF)
73 #define FSMC_TAR_1 (1)
74 #define FSMC_TAR_SHIFT (13)
75 #define FSMC_TAR_MASK (0xF)
76#define STS 0x04
77 /* sts register definitions */
78 #define FSMC_CODE_RDY (1 << 15)
79#define COMM 0x08
80 /* comm register definitions */
81 #define FSMC_TSET_0 0
82 #define FSMC_TSET_SHIFT 0
83 #define FSMC_TSET_MASK 0xFF
84 #define FSMC_TWAIT_6 6
85 #define FSMC_TWAIT_SHIFT 8
86 #define FSMC_TWAIT_MASK 0xFF
87 #define FSMC_THOLD_4 4
88 #define FSMC_THOLD_SHIFT 16
89 #define FSMC_THOLD_MASK 0xFF
90 #define FSMC_THIZ_1 1
91 #define FSMC_THIZ_SHIFT 24
92 #define FSMC_THIZ_MASK 0xFF
93#define ATTRIB 0x0C
94#define IOATA 0x10
95#define ECC1 0x14
96#define ECC2 0x18
97#define ECC3 0x1C
98#define FSMC_NAND_BANK_SZ 0x20
99
100#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
101 (FSMC_NAND_BANK_SZ * (bank)) + \
102 reg)
103
104#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
105
106struct fsmc_nand_timings {
107 uint8_t tclr;
108 uint8_t tar;
109 uint8_t thiz;
110 uint8_t thold;
111 uint8_t twait;
112 uint8_t tset;
113};
114
115enum access_mode {
116 USE_DMA_ACCESS = 1,
117 USE_WORD_ACCESS,
118};
119
120/**
121 * fsmc_nand_platform_data - platform specific NAND controller config
122 * @nand_timings: timing setup for the physical NAND interface
123 * @partitions: partition table for the platform, use a default fallback
124 * if this is NULL
125 * @nr_partitions: the number of partitions in the previous entry
126 * @options: different options for the driver
127 * @width: bus width
128 * @bank: default bank
129 * @select_bank: callback to select a certain bank, this is
130 * platform-specific. If the controller only supports one bank
131 * this may be set to NULL
132 */
133struct fsmc_nand_platform_data {
134 struct fsmc_nand_timings *nand_timings;
135 struct mtd_partition *partitions;
136 unsigned int nr_partitions;
137 unsigned int options;
138 unsigned int width;
139 unsigned int bank;
140
141 enum access_mode mode;
142
143 void (*select_bank)(uint32_t bank, uint32_t busw);
144
145 /* priv structures for dma accesses */
146 void *read_dma_priv;
147 void *write_dma_priv;
148};
149
150extern int __init fsmc_nor_init(struct platform_device *pdev,
151 unsigned long base, uint32_t bank, uint32_t width);
152extern void __init fsmc_init_board_info(struct platform_device *pdev,
153 struct mtd_partition *partitions, unsigned int nr_partitions,
154 unsigned int width);
155
156#endif /* __MTD_FSMC_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 13f8052b9ff9..eebdc63cf6af 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -24,6 +24,7 @@
24#include <linux/uio.h> 24#include <linux/uio.h>
25#include <linux/notifier.h> 25#include <linux/notifier.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/of.h>
27 28
28#include <mtd/mtd-abi.h> 29#include <mtd/mtd-abi.h>
29 30
@@ -322,6 +323,7 @@ struct mtd_info {
322 int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); 323 int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
323 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); 324 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
324 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); 325 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
326 int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
325 int (*_suspend) (struct mtd_info *mtd); 327 int (*_suspend) (struct mtd_info *mtd);
326 void (*_resume) (struct mtd_info *mtd); 328 void (*_resume) (struct mtd_info *mtd);
327 void (*_reboot) (struct mtd_info *mtd); 329 void (*_reboot) (struct mtd_info *mtd);
@@ -385,6 +387,8 @@ static inline void mtd_set_of_node(struct mtd_info *mtd,
385 struct device_node *np) 387 struct device_node *np)
386{ 388{
387 mtd->dev.of_node = np; 389 mtd->dev.of_node = np;
390 if (!mtd->name)
391 of_property_read_string(np, "label", &mtd->name);
388} 392}
389 393
390static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) 394static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
@@ -397,6 +401,18 @@ static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
397 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; 401 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
398} 402}
399 403
404static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
405 loff_t ofs, size_t len)
406{
407 if (!mtd->_max_bad_blocks)
408 return -ENOTSUPP;
409
410 if (mtd->size < (len + ofs) || ofs < 0)
411 return -EINVAL;
412
413 return mtd->_max_bad_blocks(mtd, ofs, len);
414}
415
400int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 416int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
401 struct mtd_pairing_info *info); 417 struct mtd_pairing_info *info);
402int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 418int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5f3a012ae62..9591e0fbe5bd 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -615,7 +615,7 @@ struct nand_buffers {
615 * @tALS_min: ALE setup time 615 * @tALS_min: ALE setup time
616 * @tAR_min: ALE to RE# delay 616 * @tAR_min: ALE to RE# delay
617 * @tCEA_max: CE# access time 617 * @tCEA_max: CE# access time
618 * @tCEH_min: 618 * @tCEH_min: CE# high hold time
619 * @tCH_min: CE# hold time 619 * @tCH_min: CE# hold time
620 * @tCHZ_max: CE# high to output hi-Z 620 * @tCHZ_max: CE# high to output hi-Z
621 * @tCLH_min: CLE hold time 621 * @tCLH_min: CLE hold time
@@ -801,6 +801,10 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
801 * supported, 0 otherwise. 801 * supported, 0 otherwise.
802 * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is 802 * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is
803 * supported, 0 otherwise. 803 * supported, 0 otherwise.
804 * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a
805 * this nand device will encounter their life times.
806 * @blocks_per_die: [INTERN] The number of PEBs in a die
807 * @data_interface: [INTERN] NAND interface timing information
804 * @read_retries: [INTERN] the number of read retry modes supported 808 * @read_retries: [INTERN] the number of read retry modes supported
805 * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand 809 * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
806 * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand 810 * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
@@ -883,6 +887,8 @@ struct nand_chip {
883 struct nand_onfi_params onfi_params; 887 struct nand_onfi_params onfi_params;
884 struct nand_jedec_params jedec_params; 888 struct nand_jedec_params jedec_params;
885 }; 889 };
890 u16 max_bb_per_die;
891 u32 blocks_per_die;
886 892
887 struct nand_data_interface *data_interface; 893 struct nand_data_interface *data_interface;
888 894
@@ -958,6 +964,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
958#define NAND_MFR_SANDISK 0x45 964#define NAND_MFR_SANDISK 0x45
959#define NAND_MFR_INTEL 0x89 965#define NAND_MFR_INTEL 0x89
960#define NAND_MFR_ATO 0x9b 966#define NAND_MFR_ATO 0x9b
967#define NAND_MFR_WINBOND 0xef
961 968
962/* The maximum expected count of bytes in the NAND ID sequence */ 969/* The maximum expected count of bytes in the NAND ID sequence */
963#define NAND_MAX_ID_LEN 8 970#define NAND_MAX_ID_LEN 8
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 70736e1e6c8f..06df1e06b6e0 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -41,6 +41,7 @@ struct mtd_partition {
41 uint64_t size; /* partition size */ 41 uint64_t size; /* partition size */
42 uint64_t offset; /* offset within the master MTD space */ 42 uint64_t offset; /* offset within the master MTD space */
43 uint32_t mask_flags; /* master MTD flags to mask out for this partition */ 43 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
44 struct device_node *of_node;
44}; 45};
45 46
46#define MTDPART_OFS_RETAIN (-3) 47#define MTDPART_OFS_RETAIN (-3)
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index c425c7b4c2a0..f2a718030476 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -43,9 +43,13 @@
43#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ 43#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
44#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ 44#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */
45#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ 45#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
46#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual SPI) */ 46#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
47#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad SPI) */ 47#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
48#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
49#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
48#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ 50#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
51#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */
52#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */
49#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ 53#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */
50#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ 54#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
51#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ 55#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */
@@ -56,11 +60,17 @@
56#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ 60#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
57 61
58/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ 62/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
59#define SPINOR_OP_READ4 0x13 /* Read data bytes (low frequency) */ 63#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
60#define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */ 64#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */
61#define SPINOR_OP_READ4_1_1_2 0x3c /* Read data bytes (Dual SPI) */ 65#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */
62#define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */ 66#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
67#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
68#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
63#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ 69#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
70#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */
71#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */
72#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */
73#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
64#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ 74#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
65 75
66/* Used for SST flashes only. */ 76/* Used for SST flashes only. */
@@ -68,6 +78,15 @@
68#define SPINOR_OP_WRDI 0x04 /* Write disable */ 78#define SPINOR_OP_WRDI 0x04 /* Write disable */
69#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ 79#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
70 80
81/* Used for S3AN flashes only */
82#define SPINOR_OP_XSE 0x50 /* Sector erase */
83#define SPINOR_OP_XPP 0x82 /* Page program */
84#define SPINOR_OP_XRDSR 0xd7 /* Read status register */
85
86#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
87#define XSR_RDY BIT(7) /* Ready */
88
89
71/* Used for Macronix and Winbond flashes. */ 90/* Used for Macronix and Winbond flashes. */
72#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ 91#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
73#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ 92#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
@@ -119,6 +138,9 @@ enum spi_nor_ops {
119enum spi_nor_option_flags { 138enum spi_nor_option_flags {
120 SNOR_F_USE_FSR = BIT(0), 139 SNOR_F_USE_FSR = BIT(0),
121 SNOR_F_HAS_SR_TB = BIT(1), 140 SNOR_F_HAS_SR_TB = BIT(1),
141 SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
142 SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
143 SNOR_F_READY_XSR_RDY = BIT(4),
122}; 144};
123 145
124/** 146/**
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b97870f2debd..1127fe31645d 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,6 +20,8 @@
20#include <linux/osq_lock.h> 20#include <linux/osq_lock.h>
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22 22
23struct ww_acquire_ctx;
24
23/* 25/*
24 * Simple, straightforward mutexes with strict semantics: 26 * Simple, straightforward mutexes with strict semantics:
25 * 27 *
@@ -65,7 +67,7 @@ struct mutex {
65 67
66static inline struct task_struct *__mutex_owner(struct mutex *lock) 68static inline struct task_struct *__mutex_owner(struct mutex *lock)
67{ 69{
68 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03); 70 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
69} 71}
70 72
71/* 73/*
@@ -75,6 +77,7 @@ static inline struct task_struct *__mutex_owner(struct mutex *lock)
75struct mutex_waiter { 77struct mutex_waiter {
76 struct list_head list; 78 struct list_head list;
77 struct task_struct *task; 79 struct task_struct *task;
80 struct ww_acquire_ctx *ww_ctx;
78#ifdef CONFIG_DEBUG_MUTEXES 81#ifdef CONFIG_DEBUG_MUTEXES
79 void *magic; 82 void *magic;
80#endif 83#endif
@@ -156,10 +159,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
156 unsigned int subclass); 159 unsigned int subclass);
157extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 160extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
158 unsigned int subclass); 161 unsigned int subclass);
162extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
159 163
160#define mutex_lock(lock) mutex_lock_nested(lock, 0) 164#define mutex_lock(lock) mutex_lock_nested(lock, 0)
161#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 165#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
162#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) 166#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
167#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
163 168
164#define mutex_lock_nest_lock(lock, nest_lock) \ 169#define mutex_lock_nest_lock(lock, nest_lock) \
165do { \ 170do { \
@@ -171,11 +176,13 @@ do { \
171extern void mutex_lock(struct mutex *lock); 176extern void mutex_lock(struct mutex *lock);
172extern int __must_check mutex_lock_interruptible(struct mutex *lock); 177extern int __must_check mutex_lock_interruptible(struct mutex *lock);
173extern int __must_check mutex_lock_killable(struct mutex *lock); 178extern int __must_check mutex_lock_killable(struct mutex *lock);
179extern void mutex_lock_io(struct mutex *lock);
174 180
175# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 181# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
176# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 182# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
177# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 183# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
178# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) 184# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
185# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
179#endif 186#endif
180 187
181/* 188/*
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 6a7fc5051099..13394ac83c66 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
31 31
32#endif /* CONFIG_OF_IOMMU */ 32#endif /* CONFIG_OF_IOMMU */
33 33
34static inline void of_iommu_set_ops(struct device_node *np,
35 const struct iommu_ops *ops)
36{
37 iommu_register_instance(&np->fwnode, ops);
38}
39
40static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
41{
42 return iommu_get_instance(&np->fwnode);
43}
44
45extern struct of_device_id __iommu_of_table; 34extern struct of_device_id __iommu_of_table;
46 35
47typedef int (*of_iommu_init_fn)(struct device_node *); 36typedef int (*of_iommu_init_fn)(struct device_node *);
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 5b2e6159b744..93664f022ecf 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -4,15 +4,15 @@
4#include <linux/atomic.h> 4#include <linux/atomic.h>
5#include <linux/rwsem.h> 5#include <linux/rwsem.h>
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/wait.h> 7#include <linux/rcuwait.h>
8#include <linux/rcu_sync.h> 8#include <linux/rcu_sync.h>
9#include <linux/lockdep.h> 9#include <linux/lockdep.h>
10 10
11struct percpu_rw_semaphore { 11struct percpu_rw_semaphore {
12 struct rcu_sync rss; 12 struct rcu_sync rss;
13 unsigned int __percpu *read_count; 13 unsigned int __percpu *read_count;
14 struct rw_semaphore rw_sem; 14 struct rw_semaphore rw_sem; /* slowpath */
15 wait_queue_head_t writer; 15 struct rcuwait writer; /* blocked writer */
16 int readers_block; 16 int readers_block;
17}; 17};
18 18
@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name = { \
22 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ 22 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
23 .read_count = &__percpu_rwsem_rc_##name, \ 23 .read_count = &__percpu_rwsem_rc_##name, \
24 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ 24 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
25 .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ 25 .writer = __RCUWAIT_INITIALIZER(name.writer), \
26} 26}
27 27
28extern int __percpu_down_read(struct percpu_rw_semaphore *, int); 28extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 78ed8105e64d..000fdb211c7d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -482,6 +482,7 @@ struct perf_addr_filter {
482 * @list: list of filters for this event 482 * @list: list of filters for this event
483 * @lock: spinlock that serializes accesses to the @list and event's 483 * @lock: spinlock that serializes accesses to the @list and event's
484 * (and its children's) filter generations. 484 * (and its children's) filter generations.
485 * @nr_file_filters: number of file-based filters
485 * 486 *
486 * A child event will use parent's @list (and therefore @lock), so they are 487 * A child event will use parent's @list (and therefore @lock), so they are
487 * bundled together; see perf_event_addr_filters(). 488 * bundled together; see perf_event_addr_filters().
@@ -489,6 +490,7 @@ struct perf_addr_filter {
489struct perf_addr_filters_head { 490struct perf_addr_filters_head {
490 struct list_head list; 491 struct list_head list;
491 raw_spinlock_t lock; 492 raw_spinlock_t lock;
493 unsigned int nr_file_filters;
492}; 494};
493 495
494/** 496/**
@@ -785,9 +787,9 @@ struct perf_cpu_context {
785 ktime_t hrtimer_interval; 787 ktime_t hrtimer_interval;
786 unsigned int hrtimer_active; 788 unsigned int hrtimer_active;
787 789
788 struct pmu *unique_pmu;
789#ifdef CONFIG_CGROUP_PERF 790#ifdef CONFIG_CGROUP_PERF
790 struct perf_cgroup *cgrp; 791 struct perf_cgroup *cgrp;
792 struct list_head cgrp_cpuctx_entry;
791#endif 793#endif
792 794
793 struct list_head sched_cb_entry; 795 struct list_head sched_cb_entry;
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
new file mode 100644
index 000000000000..942b0c3f8f08
--- /dev/null
+++ b/include/linux/platform_data/intel-spi.h
@@ -0,0 +1,31 @@
1/*
2 * Intel PCH/PCU SPI flash driver.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef INTEL_SPI_PDATA_H
13#define INTEL_SPI_PDATA_H
14
15enum intel_spi_type {
16 INTEL_SPI_BYT = 1,
17 INTEL_SPI_LPT,
18 INTEL_SPI_BXT,
19};
20
21/**
22 * struct intel_spi_boardinfo - Board specific data for Intel SPI driver
23 * @type: Type which this controller is compatible with
24 * @writeable: The chip is writeable
25 */
26struct intel_spi_boardinfo {
27 enum intel_spi_type type;
28 bool writeable;
29};
30
31#endif /* INTEL_SPI_PDATA_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
index 9bb63ac13f04..171a271c2cbd 100644
--- a/include/linux/platform_data/spi-ep93xx.h
+++ b/include/linux/platform_data/spi-ep93xx.h
@@ -5,25 +5,14 @@ struct spi_device;
5 5
6/** 6/**
7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor 7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor
8 * @num_chipselect: number of chip selects on this board, must be 8 * @chipselect: array of gpio numbers to use as chip selects
9 * at least one 9 * @num_chipselect: ARRAY_SIZE(chipselect)
10 * @use_dma: use DMA for the transfers 10 * @use_dma: use DMA for the transfers
11 */ 11 */
12struct ep93xx_spi_info { 12struct ep93xx_spi_info {
13 int *chipselect;
13 int num_chipselect; 14 int num_chipselect;
14 bool use_dma; 15 bool use_dma;
15}; 16};
16 17
17/**
18 * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device
19 * @setup: setup the chip select mechanism
20 * @cleanup: cleanup the chip select mechanism
21 * @cs_control: control the device chip select
22 */
23struct ep93xx_spi_chip_ops {
24 int (*setup)(struct spi_device *spi);
25 void (*cleanup)(struct spi_device *spi);
26 void (*cs_control)(struct spi_device *spi, int value);
27};
28
29#endif /* __ASM_MACH_EP93XX_SPI_H */ 18#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 81ece61075df..5339ed5bd6f9 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -182,6 +182,9 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
182{ 182{
183 return -ENOTSUPP; 183 return -ENOTSUPP;
184} 184}
185
186#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
187#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
185#endif 188#endif
186 189
187static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 190static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0edd88f93904..a6685b3dde26 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -78,6 +78,9 @@ struct dev_pm_set_opp_data {
78 78
79#if defined(CONFIG_PM_OPP) 79#if defined(CONFIG_PM_OPP)
80 80
81struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
82void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
83
81unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); 84unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
82 85
83unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); 86unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
@@ -88,7 +91,7 @@ int dev_pm_opp_get_opp_count(struct device *dev);
88unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); 91unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
89unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev); 92unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
90unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev); 93unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
91struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev); 94unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
92 95
93struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 96struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
94 unsigned long freq, 97 unsigned long freq,
@@ -99,6 +102,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
99 102
100struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 103struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
101 unsigned long *freq); 104 unsigned long *freq);
105void dev_pm_opp_put(struct dev_pm_opp *opp);
102 106
103int dev_pm_opp_add(struct device *dev, unsigned long freq, 107int dev_pm_opp_add(struct device *dev, unsigned long freq,
104 unsigned long u_volt); 108 unsigned long u_volt);
@@ -108,22 +112,30 @@ int dev_pm_opp_enable(struct device *dev, unsigned long freq);
108 112
109int dev_pm_opp_disable(struct device *dev, unsigned long freq); 113int dev_pm_opp_disable(struct device *dev, unsigned long freq);
110 114
111struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev); 115int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
112int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, 116int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
113 unsigned int count); 117
114void dev_pm_opp_put_supported_hw(struct device *dev); 118struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
115int dev_pm_opp_set_prop_name(struct device *dev, const char *name); 119void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
116void dev_pm_opp_put_prop_name(struct device *dev); 120struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
121void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
117struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); 122struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
118void dev_pm_opp_put_regulators(struct opp_table *opp_table); 123void dev_pm_opp_put_regulators(struct opp_table *opp_table);
119int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); 124struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
120void dev_pm_opp_register_put_opp_helper(struct device *dev); 125void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table);
121int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); 126int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
122int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); 127int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
123int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 128int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
124void dev_pm_opp_remove_table(struct device *dev); 129void dev_pm_opp_remove_table(struct device *dev);
125void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); 130void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
126#else 131#else
132static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
133{
134 return ERR_PTR(-ENOTSUPP);
135}
136
137static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
138
127static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 139static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
128{ 140{
129 return 0; 141 return 0;
@@ -159,9 +171,9 @@ static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device
159 return 0; 171 return 0;
160} 172}
161 173
162static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) 174static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
163{ 175{
164 return NULL; 176 return 0;
165} 177}
166 178
167static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 179static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -182,6 +194,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
182 return ERR_PTR(-ENOTSUPP); 194 return ERR_PTR(-ENOTSUPP);
183} 195}
184 196
197static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
198
185static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, 199static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
186 unsigned long u_volt) 200 unsigned long u_volt)
187{ 201{
@@ -202,35 +216,39 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
202 return 0; 216 return 0;
203} 217}
204 218
205static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( 219static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
206 struct device *dev)
207{ 220{
208 return ERR_PTR(-ENOTSUPP); 221 return -ENOTSUPP;
209} 222}
210 223
211static inline int dev_pm_opp_set_supported_hw(struct device *dev, 224static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
212 const u32 *versions,
213 unsigned int count)
214{ 225{
215 return -ENOTSUPP; 226 return -ENOTSUPP;
216} 227}
217 228
218static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} 229static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
230 const u32 *versions,
231 unsigned int count)
232{
233 return ERR_PTR(-ENOTSUPP);
234}
219 235
220static inline int dev_pm_opp_register_set_opp_helper(struct device *dev, 236static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
237
238static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
221 int (*set_opp)(struct dev_pm_set_opp_data *data)) 239 int (*set_opp)(struct dev_pm_set_opp_data *data))
222{ 240{
223 return -ENOTSUPP; 241 return ERR_PTR(-ENOTSUPP);
224} 242}
225 243
226static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {} 244static inline void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table) {}
227 245
228static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) 246static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
229{ 247{
230 return -ENOTSUPP; 248 return ERR_PTR(-ENOTSUPP);
231} 249}
232 250
233static inline void dev_pm_opp_put_prop_name(struct device *dev) {} 251static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
234 252
235static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) 253static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
236{ 254{
@@ -270,6 +288,7 @@ void dev_pm_opp_of_remove_table(struct device *dev);
270int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); 288int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
271void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); 289void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
272int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 290int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
291struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
273#else 292#else
274static inline int dev_pm_opp_of_add_table(struct device *dev) 293static inline int dev_pm_opp_of_add_table(struct device *dev)
275{ 294{
@@ -293,6 +312,11 @@ static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct
293{ 312{
294 return -ENOTSUPP; 313 return -ENOTSUPP;
295} 314}
315
316static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
317{
318 return NULL;
319}
296#endif 320#endif
297 321
298#endif /* __LINUX_OPP_H__ */ 322#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 0f65d36c2a75..3e2547d6e207 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -6,7 +6,6 @@
6 */ 6 */
7#include <linux/plist.h> 7#include <linux/plist.h>
8#include <linux/notifier.h> 8#include <linux/notifier.h>
9#include <linux/miscdevice.h>
10#include <linux/device.h> 9#include <linux/device.h>
11#include <linux/workqueue.h> 10#include <linux/workqueue.h>
12 11
@@ -147,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev,
147 struct notifier_block *notifier); 146 struct notifier_block *notifier);
148int dev_pm_qos_remove_notifier(struct device *dev, 147int dev_pm_qos_remove_notifier(struct device *dev,
149 struct notifier_block *notifier); 148 struct notifier_block *notifier);
150int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
151int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
152void dev_pm_qos_constraints_init(struct device *dev); 149void dev_pm_qos_constraints_init(struct device *dev);
153void dev_pm_qos_constraints_destroy(struct device *dev); 150void dev_pm_qos_constraints_destroy(struct device *dev);
154int dev_pm_qos_add_ancestor_request(struct device *dev, 151int dev_pm_qos_add_ancestor_request(struct device *dev,
@@ -200,12 +197,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev,
200static inline int dev_pm_qos_remove_notifier(struct device *dev, 197static inline int dev_pm_qos_remove_notifier(struct device *dev,
201 struct notifier_block *notifier) 198 struct notifier_block *notifier)
202 { return 0; } 199 { return 0; }
203static inline int dev_pm_qos_add_global_notifier(
204 struct notifier_block *notifier)
205 { return 0; }
206static inline int dev_pm_qos_remove_global_notifier(
207 struct notifier_block *notifier)
208 { return 0; }
209static inline void dev_pm_qos_constraints_init(struct device *dev) 200static inline void dev_pm_qos_constraints_init(struct device *dev)
210{ 201{
211 dev->power.power_state = PMSG_ON; 202 dev->power.power_state = PMSG_ON;
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 51334edec506..a39540326417 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -80,6 +80,7 @@
80/********** kernel/mutexes **********/ 80/********** kernel/mutexes **********/
81#define MUTEX_DEBUG_INIT 0x11 81#define MUTEX_DEBUG_INIT 0x11
82#define MUTEX_DEBUG_FREE 0x22 82#define MUTEX_DEBUG_FREE 0x22
83#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA)
83 84
84/********** lib/flex_array.c **********/ 85/********** lib/flex_array.c **********/
85#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ 86#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 62d44c176071..64aa189efe21 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -8,19 +8,9 @@
8#include <linux/alarmtimer.h> 8#include <linux/alarmtimer.h>
9 9
10 10
11static inline unsigned long long cputime_to_expires(cputime_t expires)
12{
13 return (__force unsigned long long)expires;
14}
15
16static inline cputime_t expires_to_cputime(unsigned long long expires)
17{
18 return (__force cputime_t)expires;
19}
20
21struct cpu_timer_list { 11struct cpu_timer_list {
22 struct list_head entry; 12 struct list_head entry;
23 unsigned long long expires, incr; 13 u64 expires, incr;
24 struct task_struct *task; 14 struct task_struct *task;
25 int firing; 15 int firing;
26}; 16};
@@ -129,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task);
129void posix_cpu_timers_exit(struct task_struct *task); 119void posix_cpu_timers_exit(struct task_struct *task);
130void posix_cpu_timers_exit_group(struct task_struct *task); 120void posix_cpu_timers_exit_group(struct task_struct *task);
131void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, 121void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
132 cputime_t *newval, cputime_t *oldval); 122 u64 *newval, u64 *oldval);
133 123
134long clock_nanosleep_restart(struct restart_block *restart_block); 124long clock_nanosleep_restart(struct restart_block *restart_block);
135 125
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index bed9557b69e7..b312bcef53da 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -4,8 +4,16 @@
4enum bq27xxx_chip { 4enum bq27xxx_chip {
5 BQ27000 = 1, /* bq27000, bq27200 */ 5 BQ27000 = 1, /* bq27000, bq27200 */
6 BQ27010, /* bq27010, bq27210 */ 6 BQ27010, /* bq27010, bq27210 */
7 BQ27500, /* bq27500 */ 7 BQ2750X, /* bq27500 deprecated alias */
8 BQ27510, /* bq27510, bq27520 */ 8 BQ2751X, /* bq27510, bq27520 deprecated alias */
9 BQ27500, /* bq27500/1 */
10 BQ27510G1, /* bq27510G1 */
11 BQ27510G2, /* bq27510G2 */
12 BQ27510G3, /* bq27510G3 */
13 BQ27520G1, /* bq27520G1 */
14 BQ27520G2, /* bq27520G2 */
15 BQ27520G3, /* bq27520G3 */
16 BQ27520G4, /* bq27520G4 */
9 BQ27530, /* bq27530, bq27531 */ 17 BQ27530, /* bq27530, bq27531 */
10 BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ 18 BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
11 BQ27545, /* bq27545 */ 19 BQ27545, /* bq27545 */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 2d6f0c39ed68..a0522328d7aa 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -90,9 +90,9 @@
90#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ 90#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */
91 91
92#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ 92#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
93#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ 93#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
94#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ 94#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
95#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ 95#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
96 96
97#define RX_THRESH_CE4100_DFLT 2 97#define RX_THRESH_CE4100_DFLT 2
98#define TX_THRESH_CE4100_DFLT 2 98#define TX_THRESH_CE4100_DFLT 2
@@ -106,9 +106,9 @@
106#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ 106#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
107 107
108/* QUARK_X1000 SSCR0 bit definition */ 108/* QUARK_X1000 SSCR0 bit definition */
109#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ 109#define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */
110#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ 110#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
111#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ 111#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
112#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ 112#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
113 113
114#define RX_THRESH_QUARK_X1000_DFLT 1 114#define RX_THRESH_QUARK_X1000_DFLT 1
@@ -121,8 +121,8 @@
121#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ 121#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
122#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ 122#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
123#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ 123#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
124#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ 124#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
125#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ 125#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
126 126
127/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ 127/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
128#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ 128#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 01f71e1d2e94..6ade6a52d9d4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -1161,5 +1161,17 @@ do { \
1161 ftrace_dump(oops_dump_mode); \ 1161 ftrace_dump(oops_dump_mode); \
1162} while (0) 1162} while (0)
1163 1163
1164/*
1165 * Place this after a lock-acquisition primitive to guarantee that
1166 * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
1167 * if the UNLOCK and LOCK are executed by the same CPU or if the
1168 * UNLOCK and LOCK operate on the same lock variable.
1169 */
1170#ifdef CONFIG_PPC
1171#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
1172#else /* #ifdef CONFIG_PPC */
1173#define smp_mb__after_unlock_lock() do { } while (0)
1174#endif /* #else #ifdef CONFIG_PPC */
1175
1164 1176
1165#endif /* __LINUX_RCUPDATE_H */ 1177#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index ac81e4063b40..4f9b2fa2173d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,6 +27,12 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
30static inline unsigned long get_state_synchronize_rcu(void) 36static inline unsigned long get_state_synchronize_rcu(void)
31{ 37{
32 return 0; 38 return 0;
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
new file mode 100644
index 000000000000..a4ede51b3e7c
--- /dev/null
+++ b/include/linux/rcuwait.h
@@ -0,0 +1,63 @@
1#ifndef _LINUX_RCUWAIT_H_
2#define _LINUX_RCUWAIT_H_
3
4#include <linux/rcupdate.h>
5
6/*
7 * rcuwait provides a way of blocking and waking up a single
8 * task in an rcu-safe manner; where it is forbidden to use
9 * after exit_notify(). task_struct is not properly rcu protected,
10 * unless dealing with rcu-aware lists, ie: find_task_by_*().
11 *
12 * Alternatively we have task_rcu_dereference(), but the return
13 * semantics have different implications which would break the
14 * wakeup side. The only time @task is non-nil is when a user is
15 * blocked (or checking if it needs to) on a condition, and reset
16 * as soon as we know that the condition has succeeded and are
17 * awoken.
18 */
19struct rcuwait {
20 struct task_struct *task;
21};
22
23#define __RCUWAIT_INITIALIZER(name) \
24 { .task = NULL, }
25
26static inline void rcuwait_init(struct rcuwait *w)
27{
28 w->task = NULL;
29}
30
31extern void rcuwait_wake_up(struct rcuwait *w);
32
33/*
34 * The caller is responsible for locking around rcuwait_wait_event(),
35 * such that writes to @task are properly serialized.
36 */
37#define rcuwait_wait_event(w, condition) \
38({ \
39 /* \
40 * Complain if we are called after do_exit()/exit_notify(), \
41 * as we cannot rely on the rcu critical region for the \
42 * wakeup side. \
43 */ \
44 WARN_ON(current->exit_state); \
45 \
46 rcu_assign_pointer((w)->task, current); \
47 for (;;) { \
48 /* \
49 * Implicit barrier (A) pairs with (B) in \
50 * rcuwait_wake_up(). \
51 */ \
52 set_current_state(TASK_UNINTERRUPTIBLE); \
53 if (condition) \
54 break; \
55 \
56 schedule(); \
57 } \
58 \
59 WRITE_ONCE((w)->task, NULL); \
60 __set_current_state(TASK_RUNNING); \
61})
62
63#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
new file mode 100644
index 000000000000..600aadf9cca4
--- /dev/null
+++ b/include/linux/refcount.h
@@ -0,0 +1,294 @@
1#ifndef _LINUX_REFCOUNT_H
2#define _LINUX_REFCOUNT_H
3
4/*
5 * Variant of atomic_t specialized for reference counts.
6 *
7 * The interface matches the atomic_t interface (to aid in porting) but only
8 * provides the few functions one should use for reference counting.
9 *
10 * It differs in that the counter saturates at UINT_MAX and will not move once
11 * there. This avoids wrapping the counter and causing 'spurious'
12 * use-after-free issues.
13 *
14 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
15 * and provide only what is strictly required for refcounts.
16 *
17 * The increments are fully relaxed; these will not provide ordering. The
18 * rationale is that whatever is used to obtain the object we're increasing the
19 * reference count on will provide the ordering. For locked data structures,
20 * its the lock acquire, for RCU/lockless data structures its the dependent
21 * load.
22 *
23 * Do note that inc_not_zero() provides a control dependency which will order
24 * future stores against the inc, this ensures we'll never modify the object
25 * if we did not in fact acquire a reference.
26 *
27 * The decrements will provide release order, such that all the prior loads and
28 * stores will be issued before, it also provides a control dependency, which
29 * will order us against the subsequent free().
30 *
31 * The control dependency is against the load of the cmpxchg (ll/sc) that
32 * succeeded. This means the stores aren't fully ordered, but this is fine
33 * because the 1->0 transition indicates no concurrency.
34 *
35 * Note that the allocator is responsible for ordering things between free()
36 * and alloc().
37 *
38 */
39
40#include <linux/atomic.h>
41#include <linux/bug.h>
42#include <linux/mutex.h>
43#include <linux/spinlock.h>
44
45#ifdef CONFIG_DEBUG_REFCOUNT
46#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
47#define __refcount_check __must_check
48#else
49#define REFCOUNT_WARN(cond, str) (void)(cond)
50#define __refcount_check
51#endif
52
53typedef struct refcount_struct {
54 atomic_t refs;
55} refcount_t;
56
57#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
58
59static inline void refcount_set(refcount_t *r, unsigned int n)
60{
61 atomic_set(&r->refs, n);
62}
63
64static inline unsigned int refcount_read(const refcount_t *r)
65{
66 return atomic_read(&r->refs);
67}
68
69static inline __refcount_check
70bool refcount_add_not_zero(unsigned int i, refcount_t *r)
71{
72 unsigned int old, new, val = atomic_read(&r->refs);
73
74 for (;;) {
75 if (!val)
76 return false;
77
78 if (unlikely(val == UINT_MAX))
79 return true;
80
81 new = val + i;
82 if (new < val)
83 new = UINT_MAX;
84 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
85 if (old == val)
86 break;
87
88 val = old;
89 }
90
91 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
92
93 return true;
94}
95
96static inline void refcount_add(unsigned int i, refcount_t *r)
97{
98 REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
99}
100
101/*
102 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
103 *
104 * Provides no memory ordering, it is assumed the caller has guaranteed the
105 * object memory to be stable (RCU, etc.). It does provide a control dependency
106 * and thereby orders future stores. See the comment on top.
107 */
108static inline __refcount_check
109bool refcount_inc_not_zero(refcount_t *r)
110{
111 unsigned int old, new, val = atomic_read(&r->refs);
112
113 for (;;) {
114 new = val + 1;
115
116 if (!val)
117 return false;
118
119 if (unlikely(!new))
120 return true;
121
122 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
123 if (old == val)
124 break;
125
126 val = old;
127 }
128
129 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
130
131 return true;
132}
133
134/*
135 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
136 *
137 * Provides no memory ordering, it is assumed the caller already has a
138 * reference on the object, will WARN when this is not so.
139 */
140static inline void refcount_inc(refcount_t *r)
141{
142 REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
143}
144
145/*
146 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
147 * decrement when saturated at UINT_MAX.
148 *
149 * Provides release memory ordering, such that prior loads and stores are done
150 * before, and provides a control dependency such that free() must come after.
151 * See the comment on top.
152 */
153static inline __refcount_check
154bool refcount_sub_and_test(unsigned int i, refcount_t *r)
155{
156 unsigned int old, new, val = atomic_read(&r->refs);
157
158 for (;;) {
159 if (unlikely(val == UINT_MAX))
160 return false;
161
162 new = val - i;
163 if (new > val) {
164 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
165 return false;
166 }
167
168 old = atomic_cmpxchg_release(&r->refs, val, new);
169 if (old == val)
170 break;
171
172 val = old;
173 }
174
175 return !new;
176}
177
178static inline __refcount_check
179bool refcount_dec_and_test(refcount_t *r)
180{
181 return refcount_sub_and_test(1, r);
182}
183
184/*
185 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
186 * when saturated at UINT_MAX.
187 *
188 * Provides release memory ordering, such that prior loads and stores are done
189 * before.
190 */
191static inline
192void refcount_dec(refcount_t *r)
193{
194 REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
195}
196
197/*
198 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
199 * success thereof.
200 *
201 * Like all decrement operations, it provides release memory order and provides
202 * a control dependency.
203 *
204 * It can be used like a try-delete operator; this explicit case is provided
205 * and not cmpxchg in generic, because that would allow implementing unsafe
206 * operations.
207 */
208static inline __refcount_check
209bool refcount_dec_if_one(refcount_t *r)
210{
211 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
212}
213
214/*
215 * No atomic_t counterpart, it decrements unless the value is 1, in which case
216 * it will return false.
217 *
218 * Was often done like: atomic_add_unless(&var, -1, 1)
219 */
220static inline __refcount_check
221bool refcount_dec_not_one(refcount_t *r)
222{
223 unsigned int old, new, val = atomic_read(&r->refs);
224
225 for (;;) {
226 if (unlikely(val == UINT_MAX))
227 return true;
228
229 if (val == 1)
230 return false;
231
232 new = val - 1;
233 if (new > val) {
234 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
235 return true;
236 }
237
238 old = atomic_cmpxchg_release(&r->refs, val, new);
239 if (old == val)
240 break;
241
242 val = old;
243 }
244
245 return true;
246}
247
248/*
249 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
250 * to decrement when saturated at UINT_MAX.
251 *
252 * Provides release memory ordering, such that prior loads and stores are done
253 * before, and provides a control dependency such that free() must come after.
254 * See the comment on top.
255 */
256static inline __refcount_check
257bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
258{
259 if (refcount_dec_not_one(r))
260 return false;
261
262 mutex_lock(lock);
263 if (!refcount_dec_and_test(r)) {
264 mutex_unlock(lock);
265 return false;
266 }
267
268 return true;
269}
270
271/*
272 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
273 * decrement when saturated at UINT_MAX.
274 *
275 * Provides release memory ordering, such that prior loads and stores are done
276 * before, and provides a control dependency such that free() must come after.
277 * See the comment on top.
278 */
279static inline __refcount_check
280bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
281{
282 if (refcount_dec_not_one(r))
283 return false;
284
285 spin_lock(lock);
286 if (!refcount_dec_and_test(r)) {
287 spin_unlock(lock);
288 return false;
289 }
290
291 return true;
292}
293
294#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f6673132431d..e88649225a60 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -40,12 +40,13 @@ enum regcache_type {
40}; 40};
41 41
42/** 42/**
43 * Default value for a register. We use an array of structs rather 43 * struct reg_default - Default value for a register.
44 * than a simple array as many modern devices have very sparse
45 * register maps.
46 * 44 *
47 * @reg: Register address. 45 * @reg: Register address.
48 * @def: Register default value. 46 * @def: Register default value.
47 *
48 * We use an array of structs rather than a simple array as many modern devices
49 * have very sparse register maps.
49 */ 50 */
50struct reg_default { 51struct reg_default {
51 unsigned int reg; 52 unsigned int reg;
@@ -53,12 +54,14 @@ struct reg_default {
53}; 54};
54 55
55/** 56/**
56 * Register/value pairs for sequences of writes with an optional delay in 57 * struct reg_sequence - An individual write from a sequence of writes.
57 * microseconds to be applied after each write.
58 * 58 *
59 * @reg: Register address. 59 * @reg: Register address.
60 * @def: Register value. 60 * @def: Register value.
61 * @delay_us: Delay to be applied after the register write in microseconds 61 * @delay_us: Delay to be applied after the register write in microseconds
62 *
63 * Register/value pairs for sequences of writes with an optional delay in
64 * microseconds to be applied after each write.
62 */ 65 */
63struct reg_sequence { 66struct reg_sequence {
64 unsigned int reg; 67 unsigned int reg;
@@ -98,6 +101,7 @@ struct reg_sequence {
98 101
99/** 102/**
100 * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs 103 * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
104 *
101 * @map: Regmap to read from 105 * @map: Regmap to read from
102 * @addr: Address to poll 106 * @addr: Address to poll
103 * @val: Unsigned integer variable to read the value into 107 * @val: Unsigned integer variable to read the value into
@@ -146,8 +150,8 @@ enum regmap_endian {
146}; 150};
147 151
148/** 152/**
149 * A register range, used for access related checks 153 * struct regmap_range - A register range, used for access related checks
150 * (readable/writeable/volatile/precious checks) 154 * (readable/writeable/volatile/precious checks)
151 * 155 *
152 * @range_min: address of first register 156 * @range_min: address of first register
153 * @range_max: address of last register 157 * @range_max: address of last register
@@ -159,16 +163,18 @@ struct regmap_range {
159 163
160#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, } 164#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
161 165
162/* 166/**
163 * A table of ranges including some yes ranges and some no ranges. 167 * struct regmap_access_table - A table of register ranges for access checks
164 * If a register belongs to a no_range, the corresponding check function
165 * will return false. If a register belongs to a yes range, the corresponding
166 * check function will return true. "no_ranges" are searched first.
167 * 168 *
168 * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges" 169 * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges"
169 * @n_yes_ranges: size of the above array 170 * @n_yes_ranges: size of the above array
170 * @no_ranges: pointer to an array of regmap ranges used as "no ranges" 171 * @no_ranges: pointer to an array of regmap ranges used as "no ranges"
171 * @n_no_ranges: size of the above array 172 * @n_no_ranges: size of the above array
173 *
174 * A table of ranges including some yes ranges and some no ranges.
175 * If a register belongs to a no_range, the corresponding check function
176 * will return false. If a register belongs to a yes range, the corresponding
177 * check function will return true. "no_ranges" are searched first.
172 */ 178 */
173struct regmap_access_table { 179struct regmap_access_table {
174 const struct regmap_range *yes_ranges; 180 const struct regmap_range *yes_ranges;
@@ -181,7 +187,7 @@ typedef void (*regmap_lock)(void *);
181typedef void (*regmap_unlock)(void *); 187typedef void (*regmap_unlock)(void *);
182 188
183/** 189/**
184 * Configuration for the register map of a device. 190 * struct regmap_config - Configuration for the register map of a device.
185 * 191 *
186 * @name: Optional name of the regmap. Useful when a device has multiple 192 * @name: Optional name of the regmap. Useful when a device has multiple
187 * register regions. 193 * register regions.
@@ -314,22 +320,24 @@ struct regmap_config {
314}; 320};
315 321
316/** 322/**
317 * Configuration for indirectly accessed or paged registers. 323 * struct regmap_range_cfg - Configuration for indirectly accessed or paged
318 * Registers, mapped to this virtual range, are accessed in two steps: 324 * registers.
319 * 1. page selector register update;
320 * 2. access through data window registers.
321 * 325 *
322 * @name: Descriptive name for diagnostics 326 * @name: Descriptive name for diagnostics
323 * 327 *
324 * @range_min: Address of the lowest register address in virtual range. 328 * @range_min: Address of the lowest register address in virtual range.
325 * @range_max: Address of the highest register in virtual range. 329 * @range_max: Address of the highest register in virtual range.
326 * 330 *
327 * @page_sel_reg: Register with selector field. 331 * @selector_reg: Register with selector field.
328 * @page_sel_mask: Bit shift for selector value. 332 * @selector_mask: Bit shift for selector value.
329 * @page_sel_shift: Bit mask for selector value. 333 * @selector_shift: Bit mask for selector value.
330 * 334 *
331 * @window_start: Address of first (lowest) register in data window. 335 * @window_start: Address of first (lowest) register in data window.
332 * @window_len: Number of registers in data window. 336 * @window_len: Number of registers in data window.
337 *
338 * Registers, mapped to this virtual range, are accessed in two steps:
339 * 1. page selector register update;
340 * 2. access through data window registers.
333 */ 341 */
334struct regmap_range_cfg { 342struct regmap_range_cfg {
335 const char *name; 343 const char *name;
@@ -372,7 +380,8 @@ typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
372typedef void (*regmap_hw_free_context)(void *context); 380typedef void (*regmap_hw_free_context)(void *context);
373 381
374/** 382/**
375 * Description of a hardware bus for the register map infrastructure. 383 * struct regmap_bus - Description of a hardware bus for the register map
384 * infrastructure.
376 * 385 *
377 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex 386 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
378 * to perform locking. This field is ignored if custom lock/unlock 387 * to perform locking. This field is ignored if custom lock/unlock
@@ -385,6 +394,10 @@ typedef void (*regmap_hw_free_context)(void *context);
385 * must serialise with respect to non-async I/O. 394 * must serialise with respect to non-async I/O.
386 * @reg_write: Write a single register value to the given register address. This 395 * @reg_write: Write a single register value to the given register address. This
387 * write operation has to complete when returning from the function. 396 * write operation has to complete when returning from the function.
397 * @reg_update_bits: Update bits operation to be used against volatile
398 * registers, intended for devices supporting some mechanism
399 * for setting clearing bits without having to
400 * read/modify/write.
388 * @read: Read operation. Data is returned in the buffer used to transmit 401 * @read: Read operation. Data is returned in the buffer used to transmit
389 * data. 402 * data.
390 * @reg_read: Read a single register value from a given register address. 403 * @reg_read: Read a single register value from a given register address.
@@ -514,7 +527,7 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
514#endif 527#endif
515 528
516/** 529/**
517 * regmap_init(): Initialise register map 530 * regmap_init() - Initialise register map
518 * 531 *
519 * @dev: Device that will be interacted with 532 * @dev: Device that will be interacted with
520 * @bus: Bus-specific callbacks to use with device 533 * @bus: Bus-specific callbacks to use with device
@@ -532,7 +545,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
532 const struct regmap_config *config); 545 const struct regmap_config *config);
533 546
534/** 547/**
535 * regmap_init_i2c(): Initialise register map 548 * regmap_init_i2c() - Initialise register map
536 * 549 *
537 * @i2c: Device that will be interacted with 550 * @i2c: Device that will be interacted with
538 * @config: Configuration for register map 551 * @config: Configuration for register map
@@ -545,9 +558,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
545 i2c, config) 558 i2c, config)
546 559
547/** 560/**
548 * regmap_init_spi(): Initialise register map 561 * regmap_init_spi() - Initialise register map
549 * 562 *
550 * @spi: Device that will be interacted with 563 * @dev: Device that will be interacted with
551 * @config: Configuration for register map 564 * @config: Configuration for register map
552 * 565 *
553 * The return value will be an ERR_PTR() on error or a valid pointer to 566 * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -558,8 +571,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
558 dev, config) 571 dev, config)
559 572
560/** 573/**
561 * regmap_init_spmi_base(): Create regmap for the Base register space 574 * regmap_init_spmi_base() - Create regmap for the Base register space
562 * @sdev: SPMI device that will be interacted with 575 *
576 * @dev: SPMI device that will be interacted with
563 * @config: Configuration for register map 577 * @config: Configuration for register map
564 * 578 *
565 * The return value will be an ERR_PTR() on error or a valid pointer to 579 * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -570,8 +584,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
570 dev, config) 584 dev, config)
571 585
572/** 586/**
573 * regmap_init_spmi_ext(): Create regmap for Ext register space 587 * regmap_init_spmi_ext() - Create regmap for Ext register space
574 * @sdev: Device that will be interacted with 588 *
589 * @dev: Device that will be interacted with
575 * @config: Configuration for register map 590 * @config: Configuration for register map
576 * 591 *
577 * The return value will be an ERR_PTR() on error or a valid pointer to 592 * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -582,7 +597,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
582 dev, config) 597 dev, config)
583 598
584/** 599/**
585 * regmap_init_mmio_clk(): Initialise register map with register clock 600 * regmap_init_mmio_clk() - Initialise register map with register clock
586 * 601 *
587 * @dev: Device that will be interacted with 602 * @dev: Device that will be interacted with
588 * @clk_id: register clock consumer ID 603 * @clk_id: register clock consumer ID
@@ -597,7 +612,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
597 dev, clk_id, regs, config) 612 dev, clk_id, regs, config)
598 613
599/** 614/**
600 * regmap_init_mmio(): Initialise register map 615 * regmap_init_mmio() - Initialise register map
601 * 616 *
602 * @dev: Device that will be interacted with 617 * @dev: Device that will be interacted with
603 * @regs: Pointer to memory-mapped IO region 618 * @regs: Pointer to memory-mapped IO region
@@ -610,7 +625,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
610 regmap_init_mmio_clk(dev, NULL, regs, config) 625 regmap_init_mmio_clk(dev, NULL, regs, config)
611 626
612/** 627/**
613 * regmap_init_ac97(): Initialise AC'97 register map 628 * regmap_init_ac97() - Initialise AC'97 register map
614 * 629 *
615 * @ac97: Device that will be interacted with 630 * @ac97: Device that will be interacted with
616 * @config: Configuration for register map 631 * @config: Configuration for register map
@@ -624,7 +639,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
624bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); 639bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
625 640
626/** 641/**
627 * devm_regmap_init(): Initialise managed register map 642 * devm_regmap_init() - Initialise managed register map
628 * 643 *
629 * @dev: Device that will be interacted with 644 * @dev: Device that will be interacted with
630 * @bus: Bus-specific callbacks to use with device 645 * @bus: Bus-specific callbacks to use with device
@@ -641,7 +656,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
641 dev, bus, bus_context, config) 656 dev, bus, bus_context, config)
642 657
643/** 658/**
644 * devm_regmap_init_i2c(): Initialise managed register map 659 * devm_regmap_init_i2c() - Initialise managed register map
645 * 660 *
646 * @i2c: Device that will be interacted with 661 * @i2c: Device that will be interacted with
647 * @config: Configuration for register map 662 * @config: Configuration for register map
@@ -655,9 +670,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
655 i2c, config) 670 i2c, config)
656 671
657/** 672/**
658 * devm_regmap_init_spi(): Initialise register map 673 * devm_regmap_init_spi() - Initialise register map
659 * 674 *
660 * @spi: Device that will be interacted with 675 * @dev: Device that will be interacted with
661 * @config: Configuration for register map 676 * @config: Configuration for register map
662 * 677 *
663 * The return value will be an ERR_PTR() on error or a valid pointer 678 * The return value will be an ERR_PTR() on error or a valid pointer
@@ -669,8 +684,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
669 dev, config) 684 dev, config)
670 685
671/** 686/**
672 * devm_regmap_init_spmi_base(): Create managed regmap for Base register space 687 * devm_regmap_init_spmi_base() - Create managed regmap for Base register space
673 * @sdev: SPMI device that will be interacted with 688 *
689 * @dev: SPMI device that will be interacted with
674 * @config: Configuration for register map 690 * @config: Configuration for register map
675 * 691 *
676 * The return value will be an ERR_PTR() on error or a valid pointer 692 * The return value will be an ERR_PTR() on error or a valid pointer
@@ -682,8 +698,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
682 dev, config) 698 dev, config)
683 699
684/** 700/**
685 * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space 701 * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space
686 * @sdev: SPMI device that will be interacted with 702 *
703 * @dev: SPMI device that will be interacted with
687 * @config: Configuration for register map 704 * @config: Configuration for register map
688 * 705 *
689 * The return value will be an ERR_PTR() on error or a valid pointer 706 * The return value will be an ERR_PTR() on error or a valid pointer
@@ -695,7 +712,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
695 dev, config) 712 dev, config)
696 713
697/** 714/**
698 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock 715 * devm_regmap_init_mmio_clk() - Initialise managed register map with clock
699 * 716 *
700 * @dev: Device that will be interacted with 717 * @dev: Device that will be interacted with
701 * @clk_id: register clock consumer ID 718 * @clk_id: register clock consumer ID
@@ -711,7 +728,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
711 dev, clk_id, regs, config) 728 dev, clk_id, regs, config)
712 729
713/** 730/**
714 * devm_regmap_init_mmio(): Initialise managed register map 731 * devm_regmap_init_mmio() - Initialise managed register map
715 * 732 *
716 * @dev: Device that will be interacted with 733 * @dev: Device that will be interacted with
717 * @regs: Pointer to memory-mapped IO region 734 * @regs: Pointer to memory-mapped IO region
@@ -725,7 +742,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
725 devm_regmap_init_mmio_clk(dev, NULL, regs, config) 742 devm_regmap_init_mmio_clk(dev, NULL, regs, config)
726 743
727/** 744/**
728 * devm_regmap_init_ac97(): Initialise AC'97 register map 745 * devm_regmap_init_ac97() - Initialise AC'97 register map
729 * 746 *
730 * @ac97: Device that will be interacted with 747 * @ac97: Device that will be interacted with
731 * @config: Configuration for register map 748 * @config: Configuration for register map
@@ -800,7 +817,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
800 unsigned int nranges); 817 unsigned int nranges);
801 818
802/** 819/**
803 * Description of an register field 820 * struct reg_field - Description of an register field
804 * 821 *
805 * @reg: Offset of the register within the regmap bank 822 * @reg: Offset of the register within the regmap bank
806 * @lsb: lsb of the register field. 823 * @lsb: lsb of the register field.
@@ -841,7 +858,7 @@ int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
841 bool *change, bool async, bool force); 858 bool *change, bool async, bool force);
842 859
843/** 860/**
844 * Description of an IRQ for the generic regmap irq_chip. 861 * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
845 * 862 *
846 * @reg_offset: Offset of the status/mask register within the bank 863 * @reg_offset: Offset of the status/mask register within the bank
847 * @mask: Mask used to flag/control the register. 864 * @mask: Mask used to flag/control the register.
@@ -861,9 +878,7 @@ struct regmap_irq {
861 [_irq] = { .reg_offset = (_off), .mask = (_mask) } 878 [_irq] = { .reg_offset = (_off), .mask = (_mask) }
862 879
863/** 880/**
864 * Description of a generic regmap irq_chip. This is not intended to 881 * struct regmap_irq_chip - Description of a generic regmap irq_chip.
865 * handle every possible interrupt controller, but it should handle a
866 * substantial proportion of those that are found in the wild.
867 * 882 *
868 * @name: Descriptive name for IRQ controller. 883 * @name: Descriptive name for IRQ controller.
869 * 884 *
@@ -897,6 +912,10 @@ struct regmap_irq {
897 * after handling the interrupts in regmap_irq_handler(). 912 * after handling the interrupts in regmap_irq_handler().
898 * @irq_drv_data: Driver specific IRQ data which is passed as parameter when 913 * @irq_drv_data: Driver specific IRQ data which is passed as parameter when
899 * driver specific pre/post interrupt handler is called. 914 * driver specific pre/post interrupt handler is called.
915 *
916 * This is not intended to handle every possible interrupt controller, but
917 * it should handle a substantial proportion of those that are found in the
918 * wild.
900 */ 919 */
901struct regmap_irq_chip { 920struct regmap_irq_chip {
902 const char *name; 921 const char *name;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ad3ec9ec61f7..c8e519d0b4a3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -29,7 +29,6 @@ struct sched_param {
29 29
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/ptrace.h> 31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33 32
34#include <linux/smp.h> 33#include <linux/smp.h>
35#include <linux/sem.h> 34#include <linux/sem.h>
@@ -227,7 +226,7 @@ extern void proc_sched_set_task(struct task_struct *p);
227extern char ___assert_task_state[1 - 2*!!( 226extern char ___assert_task_state[1 - 2*!!(
228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 227 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
229 228
230/* Convenience macros for the sake of set_task_state */ 229/* Convenience macros for the sake of set_current_state */
231#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 230#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
232#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 231#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
233#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 232#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
@@ -254,17 +253,6 @@ extern char ___assert_task_state[1 - 2*!!(
254 253
255#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 254#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
256 255
257#define __set_task_state(tsk, state_value) \
258 do { \
259 (tsk)->task_state_change = _THIS_IP_; \
260 (tsk)->state = (state_value); \
261 } while (0)
262#define set_task_state(tsk, state_value) \
263 do { \
264 (tsk)->task_state_change = _THIS_IP_; \
265 smp_store_mb((tsk)->state, (state_value)); \
266 } while (0)
267
268#define __set_current_state(state_value) \ 256#define __set_current_state(state_value) \
269 do { \ 257 do { \
270 current->task_state_change = _THIS_IP_; \ 258 current->task_state_change = _THIS_IP_; \
@@ -277,20 +265,6 @@ extern char ___assert_task_state[1 - 2*!!(
277 } while (0) 265 } while (0)
278 266
279#else 267#else
280
281/*
282 * @tsk had better be current, or you get to keep the pieces.
283 *
284 * The only reason is that computing current can be more expensive than
285 * using a pointer that's already available.
286 *
287 * Therefore, see set_current_state().
288 */
289#define __set_task_state(tsk, state_value) \
290 do { (tsk)->state = (state_value); } while (0)
291#define set_task_state(tsk, state_value) \
292 smp_store_mb((tsk)->state, (state_value))
293
294/* 268/*
295 * set_current_state() includes a barrier so that the write of current->state 269 * set_current_state() includes a barrier so that the write of current->state
296 * is correctly serialised wrt the caller's subsequent test of whether to 270 * is correctly serialised wrt the caller's subsequent test of whether to
@@ -461,12 +435,10 @@ extern signed long schedule_timeout_idle(signed long timeout);
461asmlinkage void schedule(void); 435asmlinkage void schedule(void);
462extern void schedule_preempt_disabled(void); 436extern void schedule_preempt_disabled(void);
463 437
438extern int __must_check io_schedule_prepare(void);
439extern void io_schedule_finish(int token);
464extern long io_schedule_timeout(long timeout); 440extern long io_schedule_timeout(long timeout);
465 441extern void io_schedule(void);
466static inline void io_schedule(void)
467{
468 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
469}
470 442
471void __noreturn do_task_dead(void); 443void __noreturn do_task_dead(void);
472 444
@@ -565,15 +537,13 @@ struct pacct_struct {
565 int ac_flag; 537 int ac_flag;
566 long ac_exitcode; 538 long ac_exitcode;
567 unsigned long ac_mem; 539 unsigned long ac_mem;
568 cputime_t ac_utime, ac_stime; 540 u64 ac_utime, ac_stime;
569 unsigned long ac_minflt, ac_majflt; 541 unsigned long ac_minflt, ac_majflt;
570}; 542};
571 543
572struct cpu_itimer { 544struct cpu_itimer {
573 cputime_t expires; 545 u64 expires;
574 cputime_t incr; 546 u64 incr;
575 u32 error;
576 u32 incr_error;
577}; 547};
578 548
579/** 549/**
@@ -587,8 +557,8 @@ struct cpu_itimer {
587 */ 557 */
588struct prev_cputime { 558struct prev_cputime {
589#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 559#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
590 cputime_t utime; 560 u64 utime;
591 cputime_t stime; 561 u64 stime;
592 raw_spinlock_t lock; 562 raw_spinlock_t lock;
593#endif 563#endif
594}; 564};
@@ -603,8 +573,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
603 573
604/** 574/**
605 * struct task_cputime - collected CPU time counts 575 * struct task_cputime - collected CPU time counts
606 * @utime: time spent in user mode, in &cputime_t units 576 * @utime: time spent in user mode, in nanoseconds
607 * @stime: time spent in kernel mode, in &cputime_t units 577 * @stime: time spent in kernel mode, in nanoseconds
608 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 578 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
609 * 579 *
610 * This structure groups together three kinds of CPU time that are tracked for 580 * This structure groups together three kinds of CPU time that are tracked for
@@ -612,8 +582,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
612 * these counts together and treat all three of them in parallel. 582 * these counts together and treat all three of them in parallel.
613 */ 583 */
614struct task_cputime { 584struct task_cputime {
615 cputime_t utime; 585 u64 utime;
616 cputime_t stime; 586 u64 stime;
617 unsigned long long sum_exec_runtime; 587 unsigned long long sum_exec_runtime;
618}; 588};
619 589
@@ -622,13 +592,6 @@ struct task_cputime {
622#define prof_exp stime 592#define prof_exp stime
623#define sched_exp sum_exec_runtime 593#define sched_exp sum_exec_runtime
624 594
625#define INIT_CPUTIME \
626 (struct task_cputime) { \
627 .utime = 0, \
628 .stime = 0, \
629 .sum_exec_runtime = 0, \
630 }
631
632/* 595/*
633 * This is the atomic variant of task_cputime, which can be used for 596 * This is the atomic variant of task_cputime, which can be used for
634 * storing and updating task_cputime statistics without locking. 597 * storing and updating task_cputime statistics without locking.
@@ -734,13 +697,14 @@ struct signal_struct {
734 unsigned int is_child_subreaper:1; 697 unsigned int is_child_subreaper:1;
735 unsigned int has_child_subreaper:1; 698 unsigned int has_child_subreaper:1;
736 699
700#ifdef CONFIG_POSIX_TIMERS
701
737 /* POSIX.1b Interval Timers */ 702 /* POSIX.1b Interval Timers */
738 int posix_timer_id; 703 int posix_timer_id;
739 struct list_head posix_timers; 704 struct list_head posix_timers;
740 705
741 /* ITIMER_REAL timer for the process */ 706 /* ITIMER_REAL timer for the process */
742 struct hrtimer real_timer; 707 struct hrtimer real_timer;
743 struct pid *leader_pid;
744 ktime_t it_real_incr; 708 ktime_t it_real_incr;
745 709
746 /* 710 /*
@@ -759,12 +723,16 @@ struct signal_struct {
759 /* Earliest-expiration cache. */ 723 /* Earliest-expiration cache. */
760 struct task_cputime cputime_expires; 724 struct task_cputime cputime_expires;
761 725
726 struct list_head cpu_timers[3];
727
728#endif
729
730 struct pid *leader_pid;
731
762#ifdef CONFIG_NO_HZ_FULL 732#ifdef CONFIG_NO_HZ_FULL
763 atomic_t tick_dep_mask; 733 atomic_t tick_dep_mask;
764#endif 734#endif
765 735
766 struct list_head cpu_timers[3];
767
768 struct pid *tty_old_pgrp; 736 struct pid *tty_old_pgrp;
769 737
770 /* boolean value for session group leader */ 738 /* boolean value for session group leader */
@@ -782,9 +750,9 @@ struct signal_struct {
782 * in __exit_signal, except for the group leader. 750 * in __exit_signal, except for the group leader.
783 */ 751 */
784 seqlock_t stats_lock; 752 seqlock_t stats_lock;
785 cputime_t utime, stime, cutime, cstime; 753 u64 utime, stime, cutime, cstime;
786 cputime_t gtime; 754 u64 gtime;
787 cputime_t cgtime; 755 u64 cgtime;
788 struct prev_cputime prev_cputime; 756 struct prev_cputime prev_cputime;
789 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 757 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
790 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 758 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
@@ -1025,8 +993,8 @@ enum cpu_idle_type {
1025 * 993 *
1026 * The DEFINE_WAKE_Q macro declares and initializes the list head. 994 * The DEFINE_WAKE_Q macro declares and initializes the list head.
1027 * wake_up_q() does NOT reinitialize the list; it's expected to be 995 * wake_up_q() does NOT reinitialize the list; it's expected to be
1028 * called near the end of a function, where the fact that the queue is 996 * called near the end of a function. Otherwise, the list can be
1029 * not used again will be easy to see by inspection. 997 * re-initialized for later re-use by wake_q_init().
1030 * 998 *
1031 * Note that this can cause spurious wakeups. schedule() callers 999 * Note that this can cause spurious wakeups. schedule() callers
1032 * must ensure the call is done inside a loop, confirming that the 1000 * must ensure the call is done inside a loop, confirming that the
@@ -1046,6 +1014,12 @@ struct wake_q_head {
1046#define DEFINE_WAKE_Q(name) \ 1014#define DEFINE_WAKE_Q(name) \
1047 struct wake_q_head name = { WAKE_Q_TAIL, &name.first } 1015 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1048 1016
1017static inline void wake_q_init(struct wake_q_head *head)
1018{
1019 head->first = WAKE_Q_TAIL;
1020 head->lastp = &head->first;
1021}
1022
1049extern void wake_q_add(struct wake_q_head *head, 1023extern void wake_q_add(struct wake_q_head *head,
1050 struct task_struct *task); 1024 struct task_struct *task);
1051extern void wake_up_q(struct wake_q_head *head); 1025extern void wake_up_q(struct wake_q_head *head);
@@ -1663,11 +1637,11 @@ struct task_struct {
1663 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1637 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1664 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1638 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1665 1639
1666 cputime_t utime, stime; 1640 u64 utime, stime;
1667#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 1641#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1668 cputime_t utimescaled, stimescaled; 1642 u64 utimescaled, stimescaled;
1669#endif 1643#endif
1670 cputime_t gtime; 1644 u64 gtime;
1671 struct prev_cputime prev_cputime; 1645 struct prev_cputime prev_cputime;
1672#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1646#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1673 seqcount_t vtime_seqcount; 1647 seqcount_t vtime_seqcount;
@@ -1691,8 +1665,10 @@ struct task_struct {
1691/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1665/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1692 unsigned long min_flt, maj_flt; 1666 unsigned long min_flt, maj_flt;
1693 1667
1668#ifdef CONFIG_POSIX_TIMERS
1694 struct task_cputime cputime_expires; 1669 struct task_cputime cputime_expires;
1695 struct list_head cpu_timers[3]; 1670 struct list_head cpu_timers[3];
1671#endif
1696 1672
1697/* process credentials */ 1673/* process credentials */
1698 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ 1674 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
@@ -1817,7 +1793,7 @@ struct task_struct {
1817#if defined(CONFIG_TASK_XACCT) 1793#if defined(CONFIG_TASK_XACCT)
1818 u64 acct_rss_mem1; /* accumulated rss usage */ 1794 u64 acct_rss_mem1; /* accumulated rss usage */
1819 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1795 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1820 cputime_t acct_timexpd; /* stime + utime since last update */ 1796 u64 acct_timexpd; /* stime + utime since last update */
1821#endif 1797#endif
1822#ifdef CONFIG_CPUSETS 1798#ifdef CONFIG_CPUSETS
1823 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1799 nodemask_t mems_allowed; /* Protected by alloc_lock */
@@ -2262,17 +2238,17 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
2262 2238
2263#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2239#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2264extern void task_cputime(struct task_struct *t, 2240extern void task_cputime(struct task_struct *t,
2265 cputime_t *utime, cputime_t *stime); 2241 u64 *utime, u64 *stime);
2266extern cputime_t task_gtime(struct task_struct *t); 2242extern u64 task_gtime(struct task_struct *t);
2267#else 2243#else
2268static inline void task_cputime(struct task_struct *t, 2244static inline void task_cputime(struct task_struct *t,
2269 cputime_t *utime, cputime_t *stime) 2245 u64 *utime, u64 *stime)
2270{ 2246{
2271 *utime = t->utime; 2247 *utime = t->utime;
2272 *stime = t->stime; 2248 *stime = t->stime;
2273} 2249}
2274 2250
2275static inline cputime_t task_gtime(struct task_struct *t) 2251static inline u64 task_gtime(struct task_struct *t)
2276{ 2252{
2277 return t->gtime; 2253 return t->gtime;
2278} 2254}
@@ -2280,23 +2256,23 @@ static inline cputime_t task_gtime(struct task_struct *t)
2280 2256
2281#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 2257#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2282static inline void task_cputime_scaled(struct task_struct *t, 2258static inline void task_cputime_scaled(struct task_struct *t,
2283 cputime_t *utimescaled, 2259 u64 *utimescaled,
2284 cputime_t *stimescaled) 2260 u64 *stimescaled)
2285{ 2261{
2286 *utimescaled = t->utimescaled; 2262 *utimescaled = t->utimescaled;
2287 *stimescaled = t->stimescaled; 2263 *stimescaled = t->stimescaled;
2288} 2264}
2289#else 2265#else
2290static inline void task_cputime_scaled(struct task_struct *t, 2266static inline void task_cputime_scaled(struct task_struct *t,
2291 cputime_t *utimescaled, 2267 u64 *utimescaled,
2292 cputime_t *stimescaled) 2268 u64 *stimescaled)
2293{ 2269{
2294 task_cputime(t, utimescaled, stimescaled); 2270 task_cputime(t, utimescaled, stimescaled);
2295} 2271}
2296#endif 2272#endif
2297 2273
2298extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2274extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2299extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2275extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2300 2276
2301/* 2277/*
2302 * Per process flags 2278 * Per process flags
@@ -2515,10 +2491,18 @@ extern u64 sched_clock_cpu(int cpu);
2515extern void sched_clock_init(void); 2491extern void sched_clock_init(void);
2516 2492
2517#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2493#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2494static inline void sched_clock_init_late(void)
2495{
2496}
2497
2518static inline void sched_clock_tick(void) 2498static inline void sched_clock_tick(void)
2519{ 2499{
2520} 2500}
2521 2501
2502static inline void clear_sched_clock_stable(void)
2503{
2504}
2505
2522static inline void sched_clock_idle_sleep_event(void) 2506static inline void sched_clock_idle_sleep_event(void)
2523{ 2507{
2524} 2508}
@@ -2537,6 +2521,7 @@ static inline u64 local_clock(void)
2537 return sched_clock(); 2521 return sched_clock();
2538} 2522}
2539#else 2523#else
2524extern void sched_clock_init_late(void);
2540/* 2525/*
2541 * Architectures can set this to 1 if they have specified 2526 * Architectures can set this to 1 if they have specified
2542 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 2527 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
@@ -2544,7 +2529,6 @@ static inline u64 local_clock(void)
2544 * is reliable after all: 2529 * is reliable after all:
2545 */ 2530 */
2546extern int sched_clock_stable(void); 2531extern int sched_clock_stable(void);
2547extern void set_sched_clock_stable(void);
2548extern void clear_sched_clock_stable(void); 2532extern void clear_sched_clock_stable(void);
2549 2533
2550extern void sched_clock_tick(void); 2534extern void sched_clock_tick(void);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 441145351301..49308e142aae 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -59,6 +59,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
59extern unsigned int sysctl_sched_autogroup_enabled; 59extern unsigned int sysctl_sched_autogroup_enabled;
60#endif 60#endif
61 61
62extern int sysctl_sched_rr_timeslice;
62extern int sched_rr_timeslice; 63extern int sched_rr_timeslice;
63 64
64extern int sched_rr_handler(struct ctl_table *table, int write, 65extern int sched_rr_handler(struct ctl_table *table, int write,
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 47dd0cebd204..59248dcc6ef3 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -180,8 +180,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
180#ifdef CONFIG_DEBUG_LOCK_ALLOC 180#ifdef CONFIG_DEBUG_LOCK_ALLOC
181# define raw_spin_lock_nested(lock, subclass) \ 181# define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass) 182 _raw_spin_lock_nested(lock, subclass)
183# define raw_spin_lock_bh_nested(lock, subclass) \
184 _raw_spin_lock_bh_nested(lock, subclass)
185 183
186# define raw_spin_lock_nest_lock(lock, nest_lock) \ 184# define raw_spin_lock_nest_lock(lock, nest_lock) \
187 do { \ 185 do { \
@@ -197,7 +195,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
197# define raw_spin_lock_nested(lock, subclass) \ 195# define raw_spin_lock_nested(lock, subclass) \
198 _raw_spin_lock(((void)(subclass), (lock))) 196 _raw_spin_lock(((void)(subclass), (lock)))
199# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 197# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
200# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
201#endif 198#endif
202 199
203#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 200#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -317,11 +314,6 @@ do { \
317 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 314 raw_spin_lock_nested(spinlock_check(lock), subclass); \
318} while (0) 315} while (0)
319 316
320#define spin_lock_bh_nested(lock, subclass) \
321do { \
322 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
323} while (0)
324
325#define spin_lock_nest_lock(lock, nest_lock) \ 317#define spin_lock_nest_lock(lock, nest_lock) \
326do { \ 318do { \
327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 319 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 5344268e6e62..42dfab89e740 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,8 +22,6 @@ int in_lock_functions(unsigned long addr);
22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
26 __acquires(lock);
27void __lockfunc 25void __lockfunc
28_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
29 __acquires(lock); 27 __acquires(lock);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d3afef9d8dbe..d0d188861ad6 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -57,7 +57,6 @@
57 57
58#define _raw_spin_lock(lock) __LOCK(lock) 58#define _raw_spin_lock(lock) __LOCK(lock)
59#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) 59#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
60#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
61#define _raw_read_lock(lock) __LOCK(lock) 60#define _raw_read_lock(lock) __LOCK(lock)
62#define _raw_write_lock(lock) __LOCK(lock) 61#define _raw_write_lock(lock) __LOCK(lock)
63#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) 62#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index dc8eb63c6568..a598cf3ac70c 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -33,9 +33,9 @@
33#include <linux/rcupdate.h> 33#include <linux/rcupdate.h>
34#include <linux/workqueue.h> 34#include <linux/workqueue.h>
35 35
36struct srcu_struct_array { 36struct srcu_array {
37 unsigned long c[2]; 37 unsigned long lock_count[2];
38 unsigned long seq[2]; 38 unsigned long unlock_count[2];
39}; 39};
40 40
41struct rcu_batch { 41struct rcu_batch {
@@ -46,7 +46,7 @@ struct rcu_batch {
46 46
47struct srcu_struct { 47struct srcu_struct {
48 unsigned long completed; 48 unsigned long completed;
49 struct srcu_struct_array __percpu *per_cpu_ref; 49 struct srcu_array __percpu *per_cpu_ref;
50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */
51 bool running; 51 bool running;
52 /* callbacks just queued */ 52 /* callbacks just queued */
@@ -118,7 +118,7 @@ void process_srcu(struct work_struct *work);
118 * See include/linux/percpu-defs.h for the rules on per-CPU variables. 118 * See include/linux/percpu-defs.h for the rules on per-CPU variables.
119 */ 119 */
120#define __DEFINE_SRCU(name, is_static) \ 120#define __DEFINE_SRCU(name, is_static) \
121 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 121 static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
122 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) 122 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
123#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) 123#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
124#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) 124#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 62a60eeacb0a..8a511c0985aa 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -198,7 +198,7 @@ static inline struct cache_head *cache_get(struct cache_head *h)
198 198
199static inline void cache_put(struct cache_head *h, struct cache_detail *cd) 199static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
200{ 200{
201 if (atomic_read(&h->ref.refcount) <= 2 && 201 if (kref_read(&h->ref) <= 2 &&
202 h->expiry_time < cd->nextcheck) 202 h->expiry_time < cd->nextcheck)
203 cd->nextcheck = h->expiry_time; 203 cd->nextcheck = h->expiry_time;
204 kref_put(&h->ref, cd->cache_put); 204 kref_put(&h->ref, cd->cache_put);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 51d601f192d4..5a209b84fd9e 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -20,11 +20,6 @@ struct timer_list {
20 unsigned long data; 20 unsigned long data;
21 u32 flags; 21 u32 flags;
22 22
23#ifdef CONFIG_TIMER_STATS
24 int start_pid;
25 void *start_site;
26 char start_comm[16];
27#endif
28#ifdef CONFIG_LOCKDEP 23#ifdef CONFIG_LOCKDEP
29 struct lockdep_map lockdep_map; 24 struct lockdep_map lockdep_map;
30#endif 25#endif
@@ -197,46 +192,6 @@ extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
197 */ 192 */
198#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) 193#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
199 194
200/*
201 * Timer-statistics info:
202 */
203#ifdef CONFIG_TIMER_STATS
204
205extern int timer_stats_active;
206
207extern void init_timer_stats(void);
208
209extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
210 void *timerf, char *comm, u32 flags);
211
212extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
213 void *addr);
214
215static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
216{
217 if (likely(!timer_stats_active))
218 return;
219 __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
220}
221
222static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
223{
224 timer->start_site = NULL;
225}
226#else
227static inline void init_timer_stats(void)
228{
229}
230
231static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
232{
233}
234
235static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
236{
237}
238#endif
239
240extern void add_timer(struct timer_list *timer); 195extern void add_timer(struct timer_list *timer);
241 196
242extern int try_to_del_timer_sync(struct timer_list *timer); 197extern int try_to_del_timer_sync(struct timer_list *timer);
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index aa9bfea8804a..0681fe25abeb 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -58,27 +58,28 @@ static inline void vtime_task_switch(struct task_struct *prev)
58 58
59extern void vtime_account_system(struct task_struct *tsk); 59extern void vtime_account_system(struct task_struct *tsk);
60extern void vtime_account_idle(struct task_struct *tsk); 60extern void vtime_account_idle(struct task_struct *tsk);
61extern void vtime_account_user(struct task_struct *tsk);
62 61
63#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 62#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
64 63
65static inline void vtime_task_switch(struct task_struct *prev) { } 64static inline void vtime_task_switch(struct task_struct *prev) { }
66static inline void vtime_account_system(struct task_struct *tsk) { } 65static inline void vtime_account_system(struct task_struct *tsk) { }
67static inline void vtime_account_user(struct task_struct *tsk) { }
68#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 66#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
69 67
70#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 68#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
71extern void arch_vtime_task_switch(struct task_struct *tsk); 69extern void arch_vtime_task_switch(struct task_struct *tsk);
70extern void vtime_account_user(struct task_struct *tsk);
72extern void vtime_user_enter(struct task_struct *tsk); 71extern void vtime_user_enter(struct task_struct *tsk);
73 72
74static inline void vtime_user_exit(struct task_struct *tsk) 73static inline void vtime_user_exit(struct task_struct *tsk)
75{ 74{
76 vtime_account_user(tsk); 75 vtime_account_user(tsk);
77} 76}
77
78extern void vtime_guest_enter(struct task_struct *tsk); 78extern void vtime_guest_enter(struct task_struct *tsk);
79extern void vtime_guest_exit(struct task_struct *tsk); 79extern void vtime_guest_exit(struct task_struct *tsk);
80extern void vtime_init_idle(struct task_struct *tsk, int cpu); 80extern void vtime_init_idle(struct task_struct *tsk, int cpu);
81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
82static inline void vtime_account_user(struct task_struct *tsk) { }
82static inline void vtime_user_enter(struct task_struct *tsk) { } 83static inline void vtime_user_enter(struct task_struct *tsk) { }
83static inline void vtime_user_exit(struct task_struct *tsk) { } 84static inline void vtime_user_exit(struct task_struct *tsk) { }
84static inline void vtime_guest_enter(struct task_struct *tsk) { } 85static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -93,9 +94,11 @@ static inline void vtime_account_irq_exit(struct task_struct *tsk)
93 /* On hard|softirq exit we always account to hard|softirq cputime */ 94 /* On hard|softirq exit we always account to hard|softirq cputime */
94 vtime_account_system(tsk); 95 vtime_account_system(tsk);
95} 96}
97extern void vtime_flush(struct task_struct *tsk);
96#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 98#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
97static inline void vtime_account_irq_enter(struct task_struct *tsk) { } 99static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
98static inline void vtime_account_irq_exit(struct task_struct *tsk) { } 100static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
101static inline void vtime_flush(struct task_struct *tsk) { }
99#endif 102#endif
100 103
101 104
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 7b0066814fa0..5dd9a7682227 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -51,10 +51,10 @@ struct ww_mutex {
51}; 51};
52 52
53#ifdef CONFIG_DEBUG_LOCK_ALLOC 53#ifdef CONFIG_DEBUG_LOCK_ALLOC
54# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ 54# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
55 , .ww_class = &ww_class 55 , .ww_class = class
56#else 56#else
57# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) 57# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
58#endif 58#endif
59 59
60#define __WW_CLASS_INITIALIZER(ww_class) \ 60#define __WW_CLASS_INITIALIZER(ww_class) \
@@ -63,7 +63,7 @@ struct ww_mutex {
63 , .mutex_name = #ww_class "_mutex" } 63 , .mutex_name = #ww_class "_mutex" }
64 64
65#define __WW_MUTEX_INITIALIZER(lockname, class) \ 65#define __WW_MUTEX_INITIALIZER(lockname, class) \
66 { .base = { \__MUTEX_INITIALIZER(lockname) } \ 66 { .base = __MUTEX_INITIALIZER(lockname.base) \
67 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } 67 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
68 68
69#define DEFINE_WW_CLASS(classname) \ 69#define DEFINE_WW_CLASS(classname) \
@@ -186,11 +186,6 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
186#endif 186#endif
187} 187}
188 188
189extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
190 struct ww_acquire_ctx *ctx);
191extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
192 struct ww_acquire_ctx *ctx);
193
194/** 189/**
195 * ww_mutex_lock - acquire the w/w mutex 190 * ww_mutex_lock - acquire the w/w mutex
196 * @lock: the mutex to be acquired 191 * @lock: the mutex to be acquired
@@ -220,14 +215,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
220 * 215 *
221 * A mutex acquired with this function must be released with ww_mutex_unlock. 216 * A mutex acquired with this function must be released with ww_mutex_unlock.
222 */ 217 */
223static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 218extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
224{
225 if (ctx)
226 return __ww_mutex_lock(lock, ctx);
227
228 mutex_lock(&lock->base);
229 return 0;
230}
231 219
232/** 220/**
233 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible 221 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -259,14 +247,8 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct
259 * 247 *
260 * A mutex acquired with this function must be released with ww_mutex_unlock. 248 * A mutex acquired with this function must be released with ww_mutex_unlock.
261 */ 249 */
262static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, 250extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
263 struct ww_acquire_ctx *ctx) 251 struct ww_acquire_ctx *ctx);
264{
265 if (ctx)
266 return __ww_mutex_lock_interruptible(lock, ctx);
267 else
268 return mutex_lock_interruptible(&lock->base);
269}
270 252
271/** 253/**
272 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex 254 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex