aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_drivers.h1
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/video.h6
-rw-r--r--include/asm-generic/atomic-long.h58
-rw-r--r--include/asm-generic/atomic.h47
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/barrier.h41
-rw-r--r--include/asm-generic/cputime_nsecs.h2
-rw-r--r--include/asm-generic/mutex-dec.h2
-rw-r--r--include/asm-generic/mutex-xchg.h6
-rw-r--r--include/asm-generic/qspinlock.h58
-rw-r--r--include/asm-generic/rwsem.h22
-rw-r--r--include/asm-generic/vmlinux.lds.h14
-rw-r--r--include/clocksource/timer-sp804.h8
-rw-r--r--include/drm/i915_pciids.h10
-rw-r--r--include/drm/ttm/ttm_bo_api.h14
-rw-r--r--include/kvm/arm_pmu.h4
-rw-r--r--include/linux/alarmtimer.h6
-rw-r--r--include/linux/atomic.h747
-rw-r--r--include/linux/audit.h24
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bitmap.h4
-rw-r--r--include/linux/bpf.h32
-rw-r--r--include/linux/buffer_head.h3
-rw-r--r--include/linux/ceph/osd_client.h5
-rw-r--r--include/linux/ceph/osdmap.h5
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/console.h13
-rw-r--r--include/linux/console_struct.h38
-rw-r--r--include/linux/context_tracking.h15
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/dcache.h13
-rw-r--r--include/linux/devpts_fs.h9
-rw-r--r--include/linux/dma-buf.h13
-rw-r--r--include/linux/dma/hsu.h14
-rw-r--r--include/linux/efi.h202
-rw-r--r--include/linux/extcon.h115
-rw-r--r--include/linux/extcon/extcon-adc-jack.h2
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/filter.h6
-rw-r--r--include/linux/fscache-cache.h2
-rw-r--r--include/linux/huge_mm.h4
-rw-r--r--include/linux/iio/common/st_sensors.h15
-rw-r--r--include/linux/iio/iio.h22
-rw-r--r--include/linux/iio/sw_device.h70
-rw-r--r--include/linux/inet_diag.h6
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/irq.h16
-rw-r--r--include/linux/irqchip/arm-gic-v3.h10
-rw-r--r--include/linux/irqchip/arm-gic.h11
-rw-r--r--include/linux/irqdomain.h12
-rw-r--r--include/linux/isa.h5
-rw-r--r--include/linux/jump_label.h16
-rw-r--r--include/linux/kasan.h11
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/leds.h23
-rw-r--r--include/linux/list.h10
-rw-r--r--include/linux/memcontrol.h25
-rw-r--r--include/linux/mfd/da9052/da9052.h2
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/device.h8
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h12
-rw-r--r--include/linux/mlx5/qp.h7
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mod_devicetable.h16
-rw-r--r--include/linux/msi.h8
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/nvmem-consumer.h2
-rw-r--r--include/linux/of.h5
-rw-r--r--include/linux/of_pci.h2
-rw-r--r--include/linux/of_reserved_mem.h7
-rw-r--r--include/linux/page_idle.h43
-rw-r--r--include/linux/percpu-refcount.h12
-rw-r--r--include/linux/perf_event.h15
-rw-r--r--include/linux/phy/phy.h17
-rw-r--r--include/linux/platform_data/sht3x.h25
-rw-r--r--include/linux/posix_acl.h6
-rw-r--r--include/linux/printk.h17
-rw-r--r--include/linux/pwm.h19
-rw-r--r--include/linux/qed/qed_eth_if.h1
-rw-r--r--include/linux/radix-tree.h1
-rw-r--r--include/linux/random.h12
-rw-r--r--include/linux/rcupdate.h23
-rw-r--r--include/linux/reservation.h53
-rw-r--r--include/linux/reset.h211
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sctp.h2
-rw-r--r--include/linux/seqlock.h7
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/serial_core.h8
-rw-r--r--include/linux/sfi.h1
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/linux/sock_diag.h6
-rw-r--r--include/linux/spinlock_up.h10
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/time.h15
-rw-r--r--include/linux/timekeeping.h3
-rw-r--r--include/linux/timer.h34
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/linux/usb/ehci_def.h4
-rw-r--r--include/linux/usb/gadget.h592
-rw-r--r--include/linux/usb/msm_hsusb.h200
-rw-r--r--include/linux/usb/musb.h5
-rw-r--r--include/linux/usb/of.h4
-rw-r--r--include/linux/usb/xhci_pdriver.h27
-rw-r--r--include/linux/vt_kern.h7
-rw-r--r--include/linux/vtime.h50
-rw-r--r--include/media/v4l2-mc.h2
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/compat.h1
-rw-r--r--include/net/gre.h2
-rw-r--r--include/net/ip.h5
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h8
-rw-r--r--include/net/netfilter/nf_queue.h4
-rw-r--r--include/net/netfilter/nf_tables.h1
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/pkt_cls.h10
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/sch_generic.h6
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/switchdev.h2
-rw-r--r--include/net/tc_act/tc_ife.h6
-rw-r--r--include/rdma/ib_verbs.h6
-rw-r--r--include/rdma/rdma_vt.h4
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/btrfs.h2
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--include/uapi/linux/iio/types.h1
-rw-r--r--include/uapi/linux/input-event-codes.h32
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/netfilter/Kbuild1
-rw-r--r--include/uapi/linux/netfilter/xt_SYNPROXY.h2
-rw-r--r--include/uapi/linux/perf_event.h6
-rw-r--r--include/uapi/linux/pkt_cls.h4
-rw-r--r--include/uapi/sound/Kbuild3
154 files changed, 2103 insertions, 1504 deletions
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 797ae2ec8eee..29c691265b49 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -78,6 +78,7 @@
78 78
79/* ACPI PCI Interrupt Link (pci_link.c) */ 79/* ACPI PCI Interrupt Link (pci_link.c) */
80 80
81int acpi_irq_penalty_init(void);
81int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, 82int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
82 int *polarity, char **name); 83 int *polarity, char **name);
83int acpi_pci_link_free_irq(acpi_handle handle); 84int acpi_pci_link_free_irq(acpi_handle handle);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 4e4c21491c41..1ff3a76c265d 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -192,7 +192,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
192/* 192/*
193 * Optionally support group module level code. 193 * Optionally support group module level code.
194 */ 194 */
195ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE); 195ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
196 196
197/* 197/*
198 * Optionally use 32-bit FADT addresses if and when there is a conflict 198 * Optionally use 32-bit FADT addresses if and when there is a conflict
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 70a41f742037..5731ccb42585 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
51 */ 51 */
52extern bool acpi_video_handles_brightness_key_presses(void); 52extern bool acpi_video_handles_brightness_key_presses(void);
53extern int acpi_video_get_levels(struct acpi_device *device, 53extern int acpi_video_get_levels(struct acpi_device *device,
54 struct acpi_video_device_brightness **dev_br); 54 struct acpi_video_device_brightness **dev_br,
55 int *pmax_level);
55#else 56#else
56static inline int acpi_video_register(void) { return 0; } 57static inline int acpi_video_register(void) { return 0; }
57static inline void acpi_video_unregister(void) { return; } 58static inline void acpi_video_unregister(void) { return; }
@@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
72 return false; 73 return false;
73} 74}
74static inline int acpi_video_get_levels(struct acpi_device *device, 75static inline int acpi_video_get_levels(struct acpi_device *device,
75 struct acpi_video_device_brightness **dev_br) 76 struct acpi_video_device_brightness **dev_br,
77 int *pmax_level)
76{ 78{
77 return -ENODEV; 79 return -ENODEV;
78} 80}
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 5e1f345b58dd..288cc9e96395 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -112,6 +112,62 @@ static __always_inline void atomic_long_dec(atomic_long_t *l)
112 ATOMIC_LONG_PFX(_dec)(v); 112 ATOMIC_LONG_PFX(_dec)(v);
113} 113}
114 114
115#define ATOMIC_LONG_FETCH_OP(op, mo) \
116static inline long \
117atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \
118{ \
119 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
120 \
121 return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \
122}
123
124ATOMIC_LONG_FETCH_OP(add, )
125ATOMIC_LONG_FETCH_OP(add, _relaxed)
126ATOMIC_LONG_FETCH_OP(add, _acquire)
127ATOMIC_LONG_FETCH_OP(add, _release)
128ATOMIC_LONG_FETCH_OP(sub, )
129ATOMIC_LONG_FETCH_OP(sub, _relaxed)
130ATOMIC_LONG_FETCH_OP(sub, _acquire)
131ATOMIC_LONG_FETCH_OP(sub, _release)
132ATOMIC_LONG_FETCH_OP(and, )
133ATOMIC_LONG_FETCH_OP(and, _relaxed)
134ATOMIC_LONG_FETCH_OP(and, _acquire)
135ATOMIC_LONG_FETCH_OP(and, _release)
136ATOMIC_LONG_FETCH_OP(andnot, )
137ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
138ATOMIC_LONG_FETCH_OP(andnot, _acquire)
139ATOMIC_LONG_FETCH_OP(andnot, _release)
140ATOMIC_LONG_FETCH_OP(or, )
141ATOMIC_LONG_FETCH_OP(or, _relaxed)
142ATOMIC_LONG_FETCH_OP(or, _acquire)
143ATOMIC_LONG_FETCH_OP(or, _release)
144ATOMIC_LONG_FETCH_OP(xor, )
145ATOMIC_LONG_FETCH_OP(xor, _relaxed)
146ATOMIC_LONG_FETCH_OP(xor, _acquire)
147ATOMIC_LONG_FETCH_OP(xor, _release)
148
149#undef ATOMIC_LONG_FETCH_OP
150
151#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \
152static inline long \
153atomic_long_fetch_##op##mo(atomic_long_t *l) \
154{ \
155 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
156 \
157 return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \
158}
159
160ATOMIC_LONG_FETCH_INC_DEC_OP(inc,)
161ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed)
162ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire)
163ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release)
164ATOMIC_LONG_FETCH_INC_DEC_OP(dec,)
165ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed)
166ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire)
167ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
168
169#undef ATOMIC_LONG_FETCH_INC_DEC_OP
170
115#define ATOMIC_LONG_OP(op) \ 171#define ATOMIC_LONG_OP(op) \
116static __always_inline void \ 172static __always_inline void \
117atomic_long_##op(long i, atomic_long_t *l) \ 173atomic_long_##op(long i, atomic_long_t *l) \
@@ -124,9 +180,9 @@ atomic_long_##op(long i, atomic_long_t *l) \
124ATOMIC_LONG_OP(add) 180ATOMIC_LONG_OP(add)
125ATOMIC_LONG_OP(sub) 181ATOMIC_LONG_OP(sub)
126ATOMIC_LONG_OP(and) 182ATOMIC_LONG_OP(and)
183ATOMIC_LONG_OP(andnot)
127ATOMIC_LONG_OP(or) 184ATOMIC_LONG_OP(or)
128ATOMIC_LONG_OP(xor) 185ATOMIC_LONG_OP(xor)
129ATOMIC_LONG_OP(andnot)
130 186
131#undef ATOMIC_LONG_OP 187#undef ATOMIC_LONG_OP
132 188
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 74f1a3704d7a..9ed8b987185b 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
61 return c c_op i; \ 61 return c c_op i; \
62} 62}
63 63
64#define ATOMIC_FETCH_OP(op, c_op) \
65static inline int atomic_fetch_##op(int i, atomic_t *v) \
66{ \
67 int c, old; \
68 \
69 c = v->counter; \
70 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
71 c = old; \
72 \
73 return c; \
74}
75
64#else 76#else
65 77
66#include <linux/irqflags.h> 78#include <linux/irqflags.h>
@@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
88 return ret; \ 100 return ret; \
89} 101}
90 102
103#define ATOMIC_FETCH_OP(op, c_op) \
104static inline int atomic_fetch_##op(int i, atomic_t *v) \
105{ \
106 unsigned long flags; \
107 int ret; \
108 \
109 raw_local_irq_save(flags); \
110 ret = v->counter; \
111 v->counter = v->counter c_op i; \
112 raw_local_irq_restore(flags); \
113 \
114 return ret; \
115}
116
91#endif /* CONFIG_SMP */ 117#endif /* CONFIG_SMP */
92 118
93#ifndef atomic_add_return 119#ifndef atomic_add_return
@@ -98,6 +124,26 @@ ATOMIC_OP_RETURN(add, +)
98ATOMIC_OP_RETURN(sub, -) 124ATOMIC_OP_RETURN(sub, -)
99#endif 125#endif
100 126
127#ifndef atomic_fetch_add
128ATOMIC_FETCH_OP(add, +)
129#endif
130
131#ifndef atomic_fetch_sub
132ATOMIC_FETCH_OP(sub, -)
133#endif
134
135#ifndef atomic_fetch_and
136ATOMIC_FETCH_OP(and, &)
137#endif
138
139#ifndef atomic_fetch_or
140ATOMIC_FETCH_OP(or, |)
141#endif
142
143#ifndef atomic_fetch_xor
144ATOMIC_FETCH_OP(xor, ^)
145#endif
146
101#ifndef atomic_and 147#ifndef atomic_and
102ATOMIC_OP(and, &) 148ATOMIC_OP(and, &)
103#endif 149#endif
@@ -110,6 +156,7 @@ ATOMIC_OP(or, |)
110ATOMIC_OP(xor, ^) 156ATOMIC_OP(xor, ^)
111#endif 157#endif
112 158
159#undef ATOMIC_FETCH_OP
113#undef ATOMIC_OP_RETURN 160#undef ATOMIC_OP_RETURN
114#undef ATOMIC_OP 161#undef ATOMIC_OP
115 162
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index d48e78ccad3d..dad68bf46c77 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v);
27#define ATOMIC64_OP_RETURN(op) \ 27#define ATOMIC64_OP_RETURN(op) \
28extern long long atomic64_##op##_return(long long a, atomic64_t *v); 28extern long long atomic64_##op##_return(long long a, atomic64_t *v);
29 29
30#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) 30#define ATOMIC64_FETCH_OP(op) \
31extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
32
33#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
31 34
32ATOMIC64_OPS(add) 35ATOMIC64_OPS(add)
33ATOMIC64_OPS(sub) 36ATOMIC64_OPS(sub)
34 37
35ATOMIC64_OP(and) 38#undef ATOMIC64_OPS
36ATOMIC64_OP(or) 39#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
37ATOMIC64_OP(xor) 40
41ATOMIC64_OPS(and)
42ATOMIC64_OPS(or)
43ATOMIC64_OPS(xor)
38 44
39#undef ATOMIC64_OPS 45#undef ATOMIC64_OPS
46#undef ATOMIC64_FETCH_OP
40#undef ATOMIC64_OP_RETURN 47#undef ATOMIC64_OP_RETURN
41#undef ATOMIC64_OP 48#undef ATOMIC64_OP
42 49
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 1cceca146905..fe297b599b0a 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -194,7 +194,7 @@ do { \
194}) 194})
195#endif 195#endif
196 196
197#endif 197#endif /* CONFIG_SMP */
198 198
199/* Barriers for virtual machine guests when talking to an SMP host */ 199/* Barriers for virtual machine guests when talking to an SMP host */
200#define virt_mb() __smp_mb() 200#define virt_mb() __smp_mb()
@@ -207,5 +207,44 @@ do { \
207#define virt_store_release(p, v) __smp_store_release(p, v) 207#define virt_store_release(p, v) __smp_store_release(p, v)
208#define virt_load_acquire(p) __smp_load_acquire(p) 208#define virt_load_acquire(p) __smp_load_acquire(p)
209 209
210/**
211 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
212 *
213 * A control dependency provides a LOAD->STORE order, the additional RMB
214 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
215 * aka. (load)-ACQUIRE.
216 *
217 * Architectures that do not do load speculation can have this be barrier().
218 */
219#ifndef smp_acquire__after_ctrl_dep
220#define smp_acquire__after_ctrl_dep() smp_rmb()
221#endif
222
223/**
224 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
225 * @ptr: pointer to the variable to wait on
226 * @cond: boolean expression to wait for
227 *
228 * Equivalent to using smp_load_acquire() on the condition variable but employs
229 * the control dependency of the wait to reduce the barrier on many platforms.
230 *
231 * Due to C lacking lambda expressions we load the value of *ptr into a
232 * pre-named variable @VAL to be used in @cond.
233 */
234#ifndef smp_cond_load_acquire
235#define smp_cond_load_acquire(ptr, cond_expr) ({ \
236 typeof(ptr) __PTR = (ptr); \
237 typeof(*ptr) VAL; \
238 for (;;) { \
239 VAL = READ_ONCE(*__PTR); \
240 if (cond_expr) \
241 break; \
242 cpu_relax(); \
243 } \
244 smp_acquire__after_ctrl_dep(); \
245 VAL; \
246})
247#endif
248
210#endif /* !__ASSEMBLY__ */ 249#endif /* !__ASSEMBLY__ */
211#endif /* __ASM_GENERIC_BARRIER_H */ 250#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 0f1c6f315cdc..a84e28e0c634 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -50,6 +50,8 @@ typedef u64 __nocast cputime64_t;
50 (__force u64)(__ct) 50 (__force u64)(__ct)
51#define nsecs_to_cputime(__nsecs) \ 51#define nsecs_to_cputime(__nsecs) \
52 (__force cputime_t)(__nsecs) 52 (__force cputime_t)(__nsecs)
53#define nsecs_to_cputime64(__nsecs) \
54 (__force cputime64_t)(__nsecs)
53 55
54 56
55/* 57/*
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index fd694cfd678a..c54829d3de37 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
80static inline int 80static inline int
81__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 81__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
82{ 82{
83 if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1)) 83 if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1))
84 return 1; 84 return 1;
85 return 0; 85 return 0;
86} 86}
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index a6b4a7bd6ac9..3269ec4e195f 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -91,8 +91,12 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
91static inline int 91static inline int
92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
93{ 93{
94 int prev = atomic_xchg_acquire(count, 0); 94 int prev;
95 95
96 if (atomic_read(count) != 1)
97 return 0;
98
99 prev = atomic_xchg_acquire(count, 0);
96 if (unlikely(prev < 0)) { 100 if (unlikely(prev < 0)) {
97 /* 101 /*
98 * The lock was marked contended so we must restore that 102 * The lock was marked contended so we must restore that
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 6bd05700d8c9..9f0681bf1e87 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -22,37 +22,33 @@
22#include <asm-generic/qspinlock_types.h> 22#include <asm-generic/qspinlock_types.h>
23 23
24/** 24/**
25 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
26 * @lock : Pointer to queued spinlock structure
27 *
28 * There is a very slight possibility of live-lock if the lockers keep coming
29 * and the waiter is just unfortunate enough to not see any unlock state.
30 */
31#ifndef queued_spin_unlock_wait
32extern void queued_spin_unlock_wait(struct qspinlock *lock);
33#endif
34
35/**
25 * queued_spin_is_locked - is the spinlock locked? 36 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure 37 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise 38 * Return: 1 if it is locked, 0 otherwise
28 */ 39 */
40#ifndef queued_spin_is_locked
29static __always_inline int queued_spin_is_locked(struct qspinlock *lock) 41static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30{ 42{
31 /* 43 /*
32 * queued_spin_lock_slowpath() can ACQUIRE the lock before 44 * See queued_spin_unlock_wait().
33 * issuing the unordered store that sets _Q_LOCKED_VAL.
34 *
35 * See both smp_cond_acquire() sites for more detail.
36 *
37 * This however means that in code like:
38 *
39 * spin_lock(A) spin_lock(B)
40 * spin_unlock_wait(B) spin_is_locked(A)
41 * do_something() do_something()
42 *
43 * Both CPUs can end up running do_something() because the store
44 * setting _Q_LOCKED_VAL will pass through the loads in
45 * spin_unlock_wait() and/or spin_is_locked().
46 * 45 *
47 * Avoid this by issuing a full memory barrier between the spin_lock() 46 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
48 * and the loads in spin_unlock_wait() and spin_is_locked(). 47 * isn't immediately observable.
49 *
50 * Note that regular mutual exclusion doesn't care about this
51 * delayed store.
52 */ 48 */
53 smp_mb(); 49 return atomic_read(&lock->val);
54 return atomic_read(&lock->val) & _Q_LOCKED_MASK;
55} 50}
51#endif
56 52
57/** 53/**
58 * queued_spin_value_unlocked - is the spinlock structure unlocked? 54 * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -115,28 +111,12 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
115static __always_inline void queued_spin_unlock(struct qspinlock *lock) 111static __always_inline void queued_spin_unlock(struct qspinlock *lock)
116{ 112{
117 /* 113 /*
118 * smp_mb__before_atomic() in order to guarantee release semantics 114 * unlock() needs release semantics:
119 */ 115 */
120 smp_mb__before_atomic(); 116 (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
121 atomic_sub(_Q_LOCKED_VAL, &lock->val);
122} 117}
123#endif 118#endif
124 119
125/**
126 * queued_spin_unlock_wait - wait until current lock holder releases the lock
127 * @lock : Pointer to queued spinlock structure
128 *
129 * There is a very slight possibility of live-lock if the lockers keep coming
130 * and the waiter is just unfortunate enough to not see any unlock state.
131 */
132static inline void queued_spin_unlock_wait(struct qspinlock *lock)
133{
134 /* See queued_spin_is_locked() */
135 smp_mb();
136 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
137 cpu_relax();
138}
139
140#ifndef virt_spin_lock 120#ifndef virt_spin_lock
141static __always_inline bool virt_spin_lock(struct qspinlock *lock) 121static __always_inline bool virt_spin_lock(struct qspinlock *lock)
142{ 122{
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index 3fc94a046bf5..5be122e3d326 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -41,8 +41,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
41{ 41{
42 long tmp; 42 long tmp;
43 43
44 while ((tmp = sem->count) >= 0) { 44 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
45 if (tmp == cmpxchg_acquire(&sem->count, tmp, 45 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
46 tmp + RWSEM_ACTIVE_READ_BIAS)) { 46 tmp + RWSEM_ACTIVE_READ_BIAS)) {
47 return 1; 47 return 1;
48 } 48 }
@@ -79,7 +79,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
79{ 79{
80 long tmp; 80 long tmp;
81 81
82 tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, 82 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
83 RWSEM_ACTIVE_WRITE_BIAS); 83 RWSEM_ACTIVE_WRITE_BIAS);
84 return tmp == RWSEM_UNLOCKED_VALUE; 84 return tmp == RWSEM_UNLOCKED_VALUE;
85} 85}
@@ -107,14 +107,6 @@ static inline void __up_write(struct rw_semaphore *sem)
107} 107}
108 108
109/* 109/*
110 * implement atomic add functionality
111 */
112static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
113{
114 atomic_long_add(delta, (atomic_long_t *)&sem->count);
115}
116
117/*
118 * downgrade write lock to read lock 110 * downgrade write lock to read lock
119 */ 111 */
120static inline void __downgrade_write(struct rw_semaphore *sem) 112static inline void __downgrade_write(struct rw_semaphore *sem)
@@ -134,13 +126,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
134 rwsem_downgrade_wake(sem); 126 rwsem_downgrade_wake(sem);
135} 127}
136 128
137/*
138 * implement exchange and add functionality
139 */
140static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
141{
142 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
143}
144
145#endif /* __KERNEL__ */ 129#endif /* __KERNEL__ */
146#endif /* _ASM_GENERIC_RWSEM_H */ 130#endif /* _ASM_GENERIC_RWSEM_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 6a67ab94b553..54643d1f5af4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -250,6 +250,14 @@
250 VMLINUX_SYMBOL(__end_init_task) = .; 250 VMLINUX_SYMBOL(__end_init_task) = .;
251 251
252/* 252/*
253 * Allow architectures to handle ro_after_init data on their
254 * own by defining an empty RO_AFTER_INIT_DATA.
255 */
256#ifndef RO_AFTER_INIT_DATA
257#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
258#endif
259
260/*
253 * Read only Data 261 * Read only Data
254 */ 262 */
255#define RO_DATA_SECTION(align) \ 263#define RO_DATA_SECTION(align) \
@@ -257,7 +265,7 @@
257 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 265 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
258 VMLINUX_SYMBOL(__start_rodata) = .; \ 266 VMLINUX_SYMBOL(__start_rodata) = .; \
259 *(.rodata) *(.rodata.*) \ 267 *(.rodata) *(.rodata.*) \
260 *(.data..ro_after_init) /* Read only after init */ \ 268 RO_AFTER_INIT_DATA /* Read only after init */ \
261 *(__vermagic) /* Kernel version magic */ \ 269 *(__vermagic) /* Kernel version magic */ \
262 . = ALIGN(8); \ 270 . = ALIGN(8); \
263 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 271 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
@@ -542,15 +550,19 @@
542 550
543#define INIT_TEXT \ 551#define INIT_TEXT \
544 *(.init.text) \ 552 *(.init.text) \
553 *(.text.startup) \
545 MEM_DISCARD(init.text) 554 MEM_DISCARD(init.text)
546 555
547#define EXIT_DATA \ 556#define EXIT_DATA \
548 *(.exit.data) \ 557 *(.exit.data) \
558 *(.fini_array) \
559 *(.dtors) \
549 MEM_DISCARD(exit.data) \ 560 MEM_DISCARD(exit.data) \
550 MEM_DISCARD(exit.rodata) 561 MEM_DISCARD(exit.rodata)
551 562
552#define EXIT_TEXT \ 563#define EXIT_TEXT \
553 *(.exit.text) \ 564 *(.exit.text) \
565 *(.text.exit) \
554 MEM_DISCARD(exit.text) 566 MEM_DISCARD(exit.text)
555 567
556#define EXIT_CALL \ 568#define EXIT_CALL \
diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h
index 1f8a1caa7cb4..7654d71243dd 100644
--- a/include/clocksource/timer-sp804.h
+++ b/include/clocksource/timer-sp804.h
@@ -3,10 +3,10 @@
3 3
4struct clk; 4struct clk;
5 5
6void __sp804_clocksource_and_sched_clock_init(void __iomem *, 6int __sp804_clocksource_and_sched_clock_init(void __iomem *,
7 const char *, struct clk *, int); 7 const char *, struct clk *, int);
8void __sp804_clockevents_init(void __iomem *, unsigned int, 8int __sp804_clockevents_init(void __iomem *, unsigned int,
9 struct clk *, const char *); 9 struct clk *, const char *);
10void sp804_timer_disable(void __iomem *); 10void sp804_timer_disable(void __iomem *);
11 11
12static inline void sp804_clocksource_init(void __iomem *base, const char *name) 12static inline void sp804_clocksource_init(void __iomem *base, const char *name)
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 9094599a1150..33466bfc6440 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -309,6 +309,7 @@
309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ 309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ 310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ 311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
312 INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
312 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ 313 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
313 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ 314 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
314 315
@@ -322,15 +323,12 @@
322 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ 323 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
323 324
324#define INTEL_KBL_GT3_IDS(info) \ 325#define INTEL_KBL_GT3_IDS(info) \
326 INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
325 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ 327 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
326 INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ 328 INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
327 INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
328 329
329#define INTEL_KBL_GT4_IDS(info) \ 330#define INTEL_KBL_GT4_IDS(info) \
330 INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ 331 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
331 INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
332 INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
333 INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
334 332
335#define INTEL_KBL_IDS(info) \ 333#define INTEL_KBL_IDS(info) \
336 INTEL_KBL_GT1_IDS(info), \ 334 INTEL_KBL_GT1_IDS(info), \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c801d9028e37..4cecb0b75b9c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
316 */ 316 */
317extern int ttm_bo_wait(struct ttm_buffer_object *bo, 317extern int ttm_bo_wait(struct ttm_buffer_object *bo,
318 bool interruptible, bool no_wait); 318 bool interruptible, bool no_wait);
319
320/**
321 * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
322 *
323 * @placement: Return immediately if buffer is busy.
324 * @mem: The struct ttm_mem_reg indicating the region where the bo resides
325 * @new_flags: Describes compatible placement found
326 *
327 * Returns true if the placement is compatible
328 */
329extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
330 struct ttm_mem_reg *mem,
331 uint32_t *new_flags);
332
319/** 333/**
320 * ttm_bo_validate 334 * ttm_bo_validate
321 * 335 *
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index fe389ac31489..92e7e97ca8ff 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -18,13 +18,13 @@
18#ifndef __ASM_ARM_KVM_PMU_H 18#ifndef __ASM_ARM_KVM_PMU_H
19#define __ASM_ARM_KVM_PMU_H 19#define __ASM_ARM_KVM_PMU_H
20 20
21#ifdef CONFIG_KVM_ARM_PMU
22
23#include <linux/perf_event.h> 21#include <linux/perf_event.h>
24#include <asm/perf_event.h> 22#include <asm/perf_event.h>
25 23
26#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 24#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
27 25
26#ifdef CONFIG_KVM_ARM_PMU
27
28struct kvm_pmc { 28struct kvm_pmc {
29 u8 idx; /* index into the pmu->pmc array */ 29 u8 idx; /* index into the pmu->pmc array */
30 struct perf_event *perf_event; 30 struct perf_event *perf_event;
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 52f3b7da4f2d..9d8031257a90 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -26,10 +26,10 @@ enum alarmtimer_restart {
26 * struct alarm - Alarm timer structure 26 * struct alarm - Alarm timer structure
27 * @node: timerqueue node for adding to the event list this value 27 * @node: timerqueue node for adding to the event list this value
28 * also includes the expiration time. 28 * also includes the expiration time.
29 * @period: Period for recuring alarms 29 * @timer: hrtimer used to schedule events while running
30 * @function: Function pointer to be executed when the timer fires. 30 * @function: Function pointer to be executed when the timer fires.
31 * @type: Alarm type (BOOTTIME/REALTIME) 31 * @type: Alarm type (BOOTTIME/REALTIME).
32 * @enabled: Flag that represents if the alarm is set to fire or not 32 * @state: Flag that represents if the alarm is set to fire or not.
33 * @data: Internal data value. 33 * @data: Internal data value.
34 */ 34 */
35struct alarm { 35struct alarm {
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index e451534fe54d..e71835bf60a9 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -163,206 +163,265 @@
163#endif 163#endif
164#endif /* atomic_dec_return_relaxed */ 164#endif /* atomic_dec_return_relaxed */
165 165
166/* atomic_xchg_relaxed */
167#ifndef atomic_xchg_relaxed
168#define atomic_xchg_relaxed atomic_xchg
169#define atomic_xchg_acquire atomic_xchg
170#define atomic_xchg_release atomic_xchg
171 166
172#else /* atomic_xchg_relaxed */ 167/* atomic_fetch_add_relaxed */
168#ifndef atomic_fetch_add_relaxed
169#define atomic_fetch_add_relaxed atomic_fetch_add
170#define atomic_fetch_add_acquire atomic_fetch_add
171#define atomic_fetch_add_release atomic_fetch_add
173 172
174#ifndef atomic_xchg_acquire 173#else /* atomic_fetch_add_relaxed */
175#define atomic_xchg_acquire(...) \ 174
176 __atomic_op_acquire(atomic_xchg, __VA_ARGS__) 175#ifndef atomic_fetch_add_acquire
176#define atomic_fetch_add_acquire(...) \
177 __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__)
177#endif 178#endif
178 179
179#ifndef atomic_xchg_release 180#ifndef atomic_fetch_add_release
180#define atomic_xchg_release(...) \ 181#define atomic_fetch_add_release(...) \
181 __atomic_op_release(atomic_xchg, __VA_ARGS__) 182 __atomic_op_release(atomic_fetch_add, __VA_ARGS__)
182#endif 183#endif
183 184
184#ifndef atomic_xchg 185#ifndef atomic_fetch_add
185#define atomic_xchg(...) \ 186#define atomic_fetch_add(...) \
186 __atomic_op_fence(atomic_xchg, __VA_ARGS__) 187 __atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
188#endif
189#endif /* atomic_fetch_add_relaxed */
190
191/* atomic_fetch_inc_relaxed */
192#ifndef atomic_fetch_inc_relaxed
193
194#ifndef atomic_fetch_inc
195#define atomic_fetch_inc(v) atomic_fetch_add(1, (v))
196#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v))
197#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v))
198#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v))
199#else /* atomic_fetch_inc */
200#define atomic_fetch_inc_relaxed atomic_fetch_inc
201#define atomic_fetch_inc_acquire atomic_fetch_inc
202#define atomic_fetch_inc_release atomic_fetch_inc
203#endif /* atomic_fetch_inc */
204
205#else /* atomic_fetch_inc_relaxed */
206
207#ifndef atomic_fetch_inc_acquire
208#define atomic_fetch_inc_acquire(...) \
209 __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__)
187#endif 210#endif
188#endif /* atomic_xchg_relaxed */
189 211
190/* atomic_cmpxchg_relaxed */ 212#ifndef atomic_fetch_inc_release
191#ifndef atomic_cmpxchg_relaxed 213#define atomic_fetch_inc_release(...) \
192#define atomic_cmpxchg_relaxed atomic_cmpxchg 214 __atomic_op_release(atomic_fetch_inc, __VA_ARGS__)
193#define atomic_cmpxchg_acquire atomic_cmpxchg 215#endif
194#define atomic_cmpxchg_release atomic_cmpxchg
195 216
196#else /* atomic_cmpxchg_relaxed */ 217#ifndef atomic_fetch_inc
218#define atomic_fetch_inc(...) \
219 __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__)
220#endif
221#endif /* atomic_fetch_inc_relaxed */
197 222
198#ifndef atomic_cmpxchg_acquire 223/* atomic_fetch_sub_relaxed */
199#define atomic_cmpxchg_acquire(...) \ 224#ifndef atomic_fetch_sub_relaxed
200 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) 225#define atomic_fetch_sub_relaxed atomic_fetch_sub
226#define atomic_fetch_sub_acquire atomic_fetch_sub
227#define atomic_fetch_sub_release atomic_fetch_sub
228
229#else /* atomic_fetch_sub_relaxed */
230
231#ifndef atomic_fetch_sub_acquire
232#define atomic_fetch_sub_acquire(...) \
233 __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
201#endif 234#endif
202 235
203#ifndef atomic_cmpxchg_release 236#ifndef atomic_fetch_sub_release
204#define atomic_cmpxchg_release(...) \ 237#define atomic_fetch_sub_release(...) \
205 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) 238 __atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
206#endif 239#endif
207 240
208#ifndef atomic_cmpxchg 241#ifndef atomic_fetch_sub
209#define atomic_cmpxchg(...) \ 242#define atomic_fetch_sub(...) \
210 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) 243 __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
244#endif
245#endif /* atomic_fetch_sub_relaxed */
246
247/* atomic_fetch_dec_relaxed */
248#ifndef atomic_fetch_dec_relaxed
249
250#ifndef atomic_fetch_dec
251#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v))
252#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v))
253#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v))
254#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v))
255#else /* atomic_fetch_dec */
256#define atomic_fetch_dec_relaxed atomic_fetch_dec
257#define atomic_fetch_dec_acquire atomic_fetch_dec
258#define atomic_fetch_dec_release atomic_fetch_dec
259#endif /* atomic_fetch_dec */
260
261#else /* atomic_fetch_dec_relaxed */
262
263#ifndef atomic_fetch_dec_acquire
264#define atomic_fetch_dec_acquire(...) \
265 __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__)
211#endif 266#endif
212#endif /* atomic_cmpxchg_relaxed */
213 267
214#ifndef atomic64_read_acquire 268#ifndef atomic_fetch_dec_release
215#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) 269#define atomic_fetch_dec_release(...) \
270 __atomic_op_release(atomic_fetch_dec, __VA_ARGS__)
216#endif 271#endif
217 272
218#ifndef atomic64_set_release 273#ifndef atomic_fetch_dec
219#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) 274#define atomic_fetch_dec(...) \
275 __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__)
220#endif 276#endif
277#endif /* atomic_fetch_dec_relaxed */
221 278
222/* atomic64_add_return_relaxed */ 279/* atomic_fetch_or_relaxed */
223#ifndef atomic64_add_return_relaxed 280#ifndef atomic_fetch_or_relaxed
224#define atomic64_add_return_relaxed atomic64_add_return 281#define atomic_fetch_or_relaxed atomic_fetch_or
225#define atomic64_add_return_acquire atomic64_add_return 282#define atomic_fetch_or_acquire atomic_fetch_or
226#define atomic64_add_return_release atomic64_add_return 283#define atomic_fetch_or_release atomic_fetch_or
227 284
228#else /* atomic64_add_return_relaxed */ 285#else /* atomic_fetch_or_relaxed */
229 286
230#ifndef atomic64_add_return_acquire 287#ifndef atomic_fetch_or_acquire
231#define atomic64_add_return_acquire(...) \ 288#define atomic_fetch_or_acquire(...) \
232 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) 289 __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__)
233#endif 290#endif
234 291
235#ifndef atomic64_add_return_release 292#ifndef atomic_fetch_or_release
236#define atomic64_add_return_release(...) \ 293#define atomic_fetch_or_release(...) \
237 __atomic_op_release(atomic64_add_return, __VA_ARGS__) 294 __atomic_op_release(atomic_fetch_or, __VA_ARGS__)
238#endif 295#endif
239 296
240#ifndef atomic64_add_return 297#ifndef atomic_fetch_or
241#define atomic64_add_return(...) \ 298#define atomic_fetch_or(...) \
242 __atomic_op_fence(atomic64_add_return, __VA_ARGS__) 299 __atomic_op_fence(atomic_fetch_or, __VA_ARGS__)
243#endif 300#endif
244#endif /* atomic64_add_return_relaxed */ 301#endif /* atomic_fetch_or_relaxed */
245 302
246/* atomic64_inc_return_relaxed */ 303/* atomic_fetch_and_relaxed */
247#ifndef atomic64_inc_return_relaxed 304#ifndef atomic_fetch_and_relaxed
248#define atomic64_inc_return_relaxed atomic64_inc_return 305#define atomic_fetch_and_relaxed atomic_fetch_and
249#define atomic64_inc_return_acquire atomic64_inc_return 306#define atomic_fetch_and_acquire atomic_fetch_and
250#define atomic64_inc_return_release atomic64_inc_return 307#define atomic_fetch_and_release atomic_fetch_and
251 308
252#else /* atomic64_inc_return_relaxed */ 309#else /* atomic_fetch_and_relaxed */
253 310
254#ifndef atomic64_inc_return_acquire 311#ifndef atomic_fetch_and_acquire
255#define atomic64_inc_return_acquire(...) \ 312#define atomic_fetch_and_acquire(...) \
256 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) 313 __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__)
257#endif 314#endif
258 315
259#ifndef atomic64_inc_return_release 316#ifndef atomic_fetch_and_release
260#define atomic64_inc_return_release(...) \ 317#define atomic_fetch_and_release(...) \
261 __atomic_op_release(atomic64_inc_return, __VA_ARGS__) 318 __atomic_op_release(atomic_fetch_and, __VA_ARGS__)
262#endif 319#endif
263 320
264#ifndef atomic64_inc_return 321#ifndef atomic_fetch_and
265#define atomic64_inc_return(...) \ 322#define atomic_fetch_and(...) \
266 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) 323 __atomic_op_fence(atomic_fetch_and, __VA_ARGS__)
267#endif 324#endif
268#endif /* atomic64_inc_return_relaxed */ 325#endif /* atomic_fetch_and_relaxed */
269
270 326
271/* atomic64_sub_return_relaxed */ 327#ifdef atomic_andnot
272#ifndef atomic64_sub_return_relaxed 328/* atomic_fetch_andnot_relaxed */
273#define atomic64_sub_return_relaxed atomic64_sub_return 329#ifndef atomic_fetch_andnot_relaxed
274#define atomic64_sub_return_acquire atomic64_sub_return 330#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
275#define atomic64_sub_return_release atomic64_sub_return 331#define atomic_fetch_andnot_acquire atomic_fetch_andnot
332#define atomic_fetch_andnot_release atomic_fetch_andnot
276 333
277#else /* atomic64_sub_return_relaxed */ 334#else /* atomic_fetch_andnot_relaxed */
278 335
279#ifndef atomic64_sub_return_acquire 336#ifndef atomic_fetch_andnot_acquire
280#define atomic64_sub_return_acquire(...) \ 337#define atomic_fetch_andnot_acquire(...) \
281 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) 338 __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__)
282#endif 339#endif
283 340
284#ifndef atomic64_sub_return_release 341#ifndef atomic_fetch_andnot_release
285#define atomic64_sub_return_release(...) \ 342#define atomic_fetch_andnot_release(...) \
286 __atomic_op_release(atomic64_sub_return, __VA_ARGS__) 343 __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__)
287#endif 344#endif
288 345
289#ifndef atomic64_sub_return 346#ifndef atomic_fetch_andnot
290#define atomic64_sub_return(...) \ 347#define atomic_fetch_andnot(...) \
291 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) 348 __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
292#endif 349#endif
293#endif /* atomic64_sub_return_relaxed */ 350#endif /* atomic_fetch_andnot_relaxed */
351#endif /* atomic_andnot */
294 352
295/* atomic64_dec_return_relaxed */ 353/* atomic_fetch_xor_relaxed */
296#ifndef atomic64_dec_return_relaxed 354#ifndef atomic_fetch_xor_relaxed
297#define atomic64_dec_return_relaxed atomic64_dec_return 355#define atomic_fetch_xor_relaxed atomic_fetch_xor
298#define atomic64_dec_return_acquire atomic64_dec_return 356#define atomic_fetch_xor_acquire atomic_fetch_xor
299#define atomic64_dec_return_release atomic64_dec_return 357#define atomic_fetch_xor_release atomic_fetch_xor
300 358
301#else /* atomic64_dec_return_relaxed */ 359#else /* atomic_fetch_xor_relaxed */
302 360
303#ifndef atomic64_dec_return_acquire 361#ifndef atomic_fetch_xor_acquire
304#define atomic64_dec_return_acquire(...) \ 362#define atomic_fetch_xor_acquire(...) \
305 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) 363 __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__)
306#endif 364#endif
307 365
308#ifndef atomic64_dec_return_release 366#ifndef atomic_fetch_xor_release
309#define atomic64_dec_return_release(...) \ 367#define atomic_fetch_xor_release(...) \
310 __atomic_op_release(atomic64_dec_return, __VA_ARGS__) 368 __atomic_op_release(atomic_fetch_xor, __VA_ARGS__)
311#endif 369#endif
312 370
313#ifndef atomic64_dec_return 371#ifndef atomic_fetch_xor
314#define atomic64_dec_return(...) \ 372#define atomic_fetch_xor(...) \
315 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) 373 __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__)
316#endif 374#endif
317#endif /* atomic64_dec_return_relaxed */ 375#endif /* atomic_fetch_xor_relaxed */
318 376
319/* atomic64_xchg_relaxed */
320#ifndef atomic64_xchg_relaxed
321#define atomic64_xchg_relaxed atomic64_xchg
322#define atomic64_xchg_acquire atomic64_xchg
323#define atomic64_xchg_release atomic64_xchg
324 377
325#else /* atomic64_xchg_relaxed */ 378/* atomic_xchg_relaxed */
379#ifndef atomic_xchg_relaxed
380#define atomic_xchg_relaxed atomic_xchg
381#define atomic_xchg_acquire atomic_xchg
382#define atomic_xchg_release atomic_xchg
326 383
327#ifndef atomic64_xchg_acquire 384#else /* atomic_xchg_relaxed */
328#define atomic64_xchg_acquire(...) \ 385
329 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) 386#ifndef atomic_xchg_acquire
387#define atomic_xchg_acquire(...) \
388 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
330#endif 389#endif
331 390
332#ifndef atomic64_xchg_release 391#ifndef atomic_xchg_release
333#define atomic64_xchg_release(...) \ 392#define atomic_xchg_release(...) \
334 __atomic_op_release(atomic64_xchg, __VA_ARGS__) 393 __atomic_op_release(atomic_xchg, __VA_ARGS__)
335#endif 394#endif
336 395
337#ifndef atomic64_xchg 396#ifndef atomic_xchg
338#define atomic64_xchg(...) \ 397#define atomic_xchg(...) \
339 __atomic_op_fence(atomic64_xchg, __VA_ARGS__) 398 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
340#endif 399#endif
341#endif /* atomic64_xchg_relaxed */ 400#endif /* atomic_xchg_relaxed */
342 401
343/* atomic64_cmpxchg_relaxed */ 402/* atomic_cmpxchg_relaxed */
344#ifndef atomic64_cmpxchg_relaxed 403#ifndef atomic_cmpxchg_relaxed
345#define atomic64_cmpxchg_relaxed atomic64_cmpxchg 404#define atomic_cmpxchg_relaxed atomic_cmpxchg
346#define atomic64_cmpxchg_acquire atomic64_cmpxchg 405#define atomic_cmpxchg_acquire atomic_cmpxchg
347#define atomic64_cmpxchg_release atomic64_cmpxchg 406#define atomic_cmpxchg_release atomic_cmpxchg
348 407
349#else /* atomic64_cmpxchg_relaxed */ 408#else /* atomic_cmpxchg_relaxed */
350 409
351#ifndef atomic64_cmpxchg_acquire 410#ifndef atomic_cmpxchg_acquire
352#define atomic64_cmpxchg_acquire(...) \ 411#define atomic_cmpxchg_acquire(...) \
353 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) 412 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
354#endif 413#endif
355 414
356#ifndef atomic64_cmpxchg_release 415#ifndef atomic_cmpxchg_release
357#define atomic64_cmpxchg_release(...) \ 416#define atomic_cmpxchg_release(...) \
358 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) 417 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
359#endif 418#endif
360 419
361#ifndef atomic64_cmpxchg 420#ifndef atomic_cmpxchg
362#define atomic64_cmpxchg(...) \ 421#define atomic_cmpxchg(...) \
363 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) 422 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
364#endif 423#endif
365#endif /* atomic64_cmpxchg_relaxed */ 424#endif /* atomic_cmpxchg_relaxed */
366 425
367/* cmpxchg_relaxed */ 426/* cmpxchg_relaxed */
368#ifndef cmpxchg_relaxed 427#ifndef cmpxchg_relaxed
@@ -463,18 +522,28 @@ static inline void atomic_andnot(int i, atomic_t *v)
463{ 522{
464 atomic_and(~i, v); 523 atomic_and(~i, v);
465} 524}
466#endif
467 525
468static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) 526static inline int atomic_fetch_andnot(int i, atomic_t *v)
469{ 527{
470 atomic_andnot(mask, v); 528 return atomic_fetch_and(~i, v);
471} 529}
472 530
473static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) 531static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
474{ 532{
475 atomic_or(mask, v); 533 return atomic_fetch_and_relaxed(~i, v);
476} 534}
477 535
536static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
537{
538 return atomic_fetch_and_acquire(~i, v);
539}
540
541static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
542{
543 return atomic_fetch_and_release(~i, v);
544}
545#endif
546
478/** 547/**
479 * atomic_inc_not_zero_hint - increment if not null 548 * atomic_inc_not_zero_hint - increment if not null
480 * @v: pointer of type atomic_t 549 * @v: pointer of type atomic_t
@@ -558,36 +627,400 @@ static inline int atomic_dec_if_positive(atomic_t *v)
558} 627}
559#endif 628#endif
560 629
561/** 630#ifdef CONFIG_GENERIC_ATOMIC64
562 * atomic_fetch_or - perform *p |= mask and return old value of *p 631#include <asm-generic/atomic64.h>
563 * @mask: mask to OR on the atomic_t 632#endif
564 * @p: pointer to atomic_t
565 */
566#ifndef atomic_fetch_or
567static inline int atomic_fetch_or(int mask, atomic_t *p)
568{
569 int old, val = atomic_read(p);
570 633
571 for (;;) { 634#ifndef atomic64_read_acquire
572 old = atomic_cmpxchg(p, val, val | mask); 635#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
573 if (old == val) 636#endif
574 break;
575 val = old;
576 }
577 637
578 return old; 638#ifndef atomic64_set_release
579} 639#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
580#endif 640#endif
581 641
582#ifdef CONFIG_GENERIC_ATOMIC64 642/* atomic64_add_return_relaxed */
583#include <asm-generic/atomic64.h> 643#ifndef atomic64_add_return_relaxed
644#define atomic64_add_return_relaxed atomic64_add_return
645#define atomic64_add_return_acquire atomic64_add_return
646#define atomic64_add_return_release atomic64_add_return
647
648#else /* atomic64_add_return_relaxed */
649
650#ifndef atomic64_add_return_acquire
651#define atomic64_add_return_acquire(...) \
652 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
653#endif
654
655#ifndef atomic64_add_return_release
656#define atomic64_add_return_release(...) \
657 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
658#endif
659
660#ifndef atomic64_add_return
661#define atomic64_add_return(...) \
662 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
663#endif
664#endif /* atomic64_add_return_relaxed */
665
666/* atomic64_inc_return_relaxed */
667#ifndef atomic64_inc_return_relaxed
668#define atomic64_inc_return_relaxed atomic64_inc_return
669#define atomic64_inc_return_acquire atomic64_inc_return
670#define atomic64_inc_return_release atomic64_inc_return
671
672#else /* atomic64_inc_return_relaxed */
673
674#ifndef atomic64_inc_return_acquire
675#define atomic64_inc_return_acquire(...) \
676 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
677#endif
678
679#ifndef atomic64_inc_return_release
680#define atomic64_inc_return_release(...) \
681 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
682#endif
683
684#ifndef atomic64_inc_return
685#define atomic64_inc_return(...) \
686 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
687#endif
688#endif /* atomic64_inc_return_relaxed */
689
690
691/* atomic64_sub_return_relaxed */
692#ifndef atomic64_sub_return_relaxed
693#define atomic64_sub_return_relaxed atomic64_sub_return
694#define atomic64_sub_return_acquire atomic64_sub_return
695#define atomic64_sub_return_release atomic64_sub_return
696
697#else /* atomic64_sub_return_relaxed */
698
699#ifndef atomic64_sub_return_acquire
700#define atomic64_sub_return_acquire(...) \
701 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
584#endif 702#endif
585 703
704#ifndef atomic64_sub_return_release
705#define atomic64_sub_return_release(...) \
706 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
707#endif
708
709#ifndef atomic64_sub_return
710#define atomic64_sub_return(...) \
711 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
712#endif
713#endif /* atomic64_sub_return_relaxed */
714
715/* atomic64_dec_return_relaxed */
716#ifndef atomic64_dec_return_relaxed
717#define atomic64_dec_return_relaxed atomic64_dec_return
718#define atomic64_dec_return_acquire atomic64_dec_return
719#define atomic64_dec_return_release atomic64_dec_return
720
721#else /* atomic64_dec_return_relaxed */
722
723#ifndef atomic64_dec_return_acquire
724#define atomic64_dec_return_acquire(...) \
725 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
726#endif
727
728#ifndef atomic64_dec_return_release
729#define atomic64_dec_return_release(...) \
730 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
731#endif
732
733#ifndef atomic64_dec_return
734#define atomic64_dec_return(...) \
735 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
736#endif
737#endif /* atomic64_dec_return_relaxed */
738
739
740/* atomic64_fetch_add_relaxed */
741#ifndef atomic64_fetch_add_relaxed
742#define atomic64_fetch_add_relaxed atomic64_fetch_add
743#define atomic64_fetch_add_acquire atomic64_fetch_add
744#define atomic64_fetch_add_release atomic64_fetch_add
745
746#else /* atomic64_fetch_add_relaxed */
747
748#ifndef atomic64_fetch_add_acquire
749#define atomic64_fetch_add_acquire(...) \
750 __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
751#endif
752
753#ifndef atomic64_fetch_add_release
754#define atomic64_fetch_add_release(...) \
755 __atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
756#endif
757
758#ifndef atomic64_fetch_add
759#define atomic64_fetch_add(...) \
760 __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
761#endif
762#endif /* atomic64_fetch_add_relaxed */
763
764/* atomic64_fetch_inc_relaxed */
765#ifndef atomic64_fetch_inc_relaxed
766
767#ifndef atomic64_fetch_inc
768#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v))
769#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v))
770#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v))
771#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v))
772#else /* atomic64_fetch_inc */
773#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
774#define atomic64_fetch_inc_acquire atomic64_fetch_inc
775#define atomic64_fetch_inc_release atomic64_fetch_inc
776#endif /* atomic64_fetch_inc */
777
778#else /* atomic64_fetch_inc_relaxed */
779
780#ifndef atomic64_fetch_inc_acquire
781#define atomic64_fetch_inc_acquire(...) \
782 __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__)
783#endif
784
785#ifndef atomic64_fetch_inc_release
786#define atomic64_fetch_inc_release(...) \
787 __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__)
788#endif
789
790#ifndef atomic64_fetch_inc
791#define atomic64_fetch_inc(...) \
792 __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__)
793#endif
794#endif /* atomic64_fetch_inc_relaxed */
795
796/* atomic64_fetch_sub_relaxed */
797#ifndef atomic64_fetch_sub_relaxed
798#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
799#define atomic64_fetch_sub_acquire atomic64_fetch_sub
800#define atomic64_fetch_sub_release atomic64_fetch_sub
801
802#else /* atomic64_fetch_sub_relaxed */
803
804#ifndef atomic64_fetch_sub_acquire
805#define atomic64_fetch_sub_acquire(...) \
806 __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
807#endif
808
809#ifndef atomic64_fetch_sub_release
810#define atomic64_fetch_sub_release(...) \
811 __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
812#endif
813
814#ifndef atomic64_fetch_sub
815#define atomic64_fetch_sub(...) \
816 __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
817#endif
818#endif /* atomic64_fetch_sub_relaxed */
819
820/* atomic64_fetch_dec_relaxed */
821#ifndef atomic64_fetch_dec_relaxed
822
823#ifndef atomic64_fetch_dec
824#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v))
825#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v))
826#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v))
827#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v))
828#else /* atomic64_fetch_dec */
829#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
830#define atomic64_fetch_dec_acquire atomic64_fetch_dec
831#define atomic64_fetch_dec_release atomic64_fetch_dec
832#endif /* atomic64_fetch_dec */
833
834#else /* atomic64_fetch_dec_relaxed */
835
836#ifndef atomic64_fetch_dec_acquire
837#define atomic64_fetch_dec_acquire(...) \
838 __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__)
839#endif
840
841#ifndef atomic64_fetch_dec_release
842#define atomic64_fetch_dec_release(...) \
843 __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__)
844#endif
845
846#ifndef atomic64_fetch_dec
847#define atomic64_fetch_dec(...) \
848 __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__)
849#endif
850#endif /* atomic64_fetch_dec_relaxed */
851
852/* atomic64_fetch_or_relaxed */
853#ifndef atomic64_fetch_or_relaxed
854#define atomic64_fetch_or_relaxed atomic64_fetch_or
855#define atomic64_fetch_or_acquire atomic64_fetch_or
856#define atomic64_fetch_or_release atomic64_fetch_or
857
858#else /* atomic64_fetch_or_relaxed */
859
860#ifndef atomic64_fetch_or_acquire
861#define atomic64_fetch_or_acquire(...) \
862 __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
863#endif
864
865#ifndef atomic64_fetch_or_release
866#define atomic64_fetch_or_release(...) \
867 __atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
868#endif
869
870#ifndef atomic64_fetch_or
871#define atomic64_fetch_or(...) \
872 __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
873#endif
874#endif /* atomic64_fetch_or_relaxed */
875
876/* atomic64_fetch_and_relaxed */
877#ifndef atomic64_fetch_and_relaxed
878#define atomic64_fetch_and_relaxed atomic64_fetch_and
879#define atomic64_fetch_and_acquire atomic64_fetch_and
880#define atomic64_fetch_and_release atomic64_fetch_and
881
882#else /* atomic64_fetch_and_relaxed */
883
884#ifndef atomic64_fetch_and_acquire
885#define atomic64_fetch_and_acquire(...) \
886 __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
887#endif
888
889#ifndef atomic64_fetch_and_release
890#define atomic64_fetch_and_release(...) \
891 __atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
892#endif
893
894#ifndef atomic64_fetch_and
895#define atomic64_fetch_and(...) \
896 __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
897#endif
898#endif /* atomic64_fetch_and_relaxed */
899
900#ifdef atomic64_andnot
901/* atomic64_fetch_andnot_relaxed */
902#ifndef atomic64_fetch_andnot_relaxed
903#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
904#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
905#define atomic64_fetch_andnot_release atomic64_fetch_andnot
906
907#else /* atomic64_fetch_andnot_relaxed */
908
909#ifndef atomic64_fetch_andnot_acquire
910#define atomic64_fetch_andnot_acquire(...) \
911 __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
912#endif
913
914#ifndef atomic64_fetch_andnot_release
915#define atomic64_fetch_andnot_release(...) \
916 __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
917#endif
918
919#ifndef atomic64_fetch_andnot
920#define atomic64_fetch_andnot(...) \
921 __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
922#endif
923#endif /* atomic64_fetch_andnot_relaxed */
924#endif /* atomic64_andnot */
925
926/* atomic64_fetch_xor_relaxed */
927#ifndef atomic64_fetch_xor_relaxed
928#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
929#define atomic64_fetch_xor_acquire atomic64_fetch_xor
930#define atomic64_fetch_xor_release atomic64_fetch_xor
931
932#else /* atomic64_fetch_xor_relaxed */
933
934#ifndef atomic64_fetch_xor_acquire
935#define atomic64_fetch_xor_acquire(...) \
936 __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
937#endif
938
939#ifndef atomic64_fetch_xor_release
940#define atomic64_fetch_xor_release(...) \
941 __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
942#endif
943
944#ifndef atomic64_fetch_xor
945#define atomic64_fetch_xor(...) \
946 __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
947#endif
948#endif /* atomic64_fetch_xor_relaxed */
949
950
951/* atomic64_xchg_relaxed */
952#ifndef atomic64_xchg_relaxed
953#define atomic64_xchg_relaxed atomic64_xchg
954#define atomic64_xchg_acquire atomic64_xchg
955#define atomic64_xchg_release atomic64_xchg
956
957#else /* atomic64_xchg_relaxed */
958
959#ifndef atomic64_xchg_acquire
960#define atomic64_xchg_acquire(...) \
961 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
962#endif
963
964#ifndef atomic64_xchg_release
965#define atomic64_xchg_release(...) \
966 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
967#endif
968
969#ifndef atomic64_xchg
970#define atomic64_xchg(...) \
971 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
972#endif
973#endif /* atomic64_xchg_relaxed */
974
975/* atomic64_cmpxchg_relaxed */
976#ifndef atomic64_cmpxchg_relaxed
977#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
978#define atomic64_cmpxchg_acquire atomic64_cmpxchg
979#define atomic64_cmpxchg_release atomic64_cmpxchg
980
981#else /* atomic64_cmpxchg_relaxed */
982
983#ifndef atomic64_cmpxchg_acquire
984#define atomic64_cmpxchg_acquire(...) \
985 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
986#endif
987
988#ifndef atomic64_cmpxchg_release
989#define atomic64_cmpxchg_release(...) \
990 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
991#endif
992
993#ifndef atomic64_cmpxchg
994#define atomic64_cmpxchg(...) \
995 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
996#endif
997#endif /* atomic64_cmpxchg_relaxed */
998
586#ifndef atomic64_andnot 999#ifndef atomic64_andnot
587static inline void atomic64_andnot(long long i, atomic64_t *v) 1000static inline void atomic64_andnot(long long i, atomic64_t *v)
588{ 1001{
589 atomic64_and(~i, v); 1002 atomic64_and(~i, v);
590} 1003}
1004
1005static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
1006{
1007 return atomic64_fetch_and(~i, v);
1008}
1009
1010static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
1011{
1012 return atomic64_fetch_and_relaxed(~i, v);
1013}
1014
1015static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
1016{
1017 return atomic64_fetch_and_acquire(~i, v);
1018}
1019
1020static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
1021{
1022 return atomic64_fetch_and_release(~i, v);
1023}
591#endif 1024#endif
592 1025
593#include <asm-generic/atomic-long.h> 1026#include <asm-generic/atomic-long.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 961a417d641e..e38e3fc13ea8 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -26,7 +26,6 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <uapi/linux/audit.h> 28#include <uapi/linux/audit.h>
29#include <linux/tty.h>
30 29
31#define AUDIT_INO_UNSET ((unsigned long)-1) 30#define AUDIT_INO_UNSET ((unsigned long)-1)
32#define AUDIT_DEV_UNSET ((dev_t)-1) 31#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -348,23 +347,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
348 return tsk->sessionid; 347 return tsk->sessionid;
349} 348}
350 349
351static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
352{
353 struct tty_struct *tty = NULL;
354 unsigned long flags;
355
356 spin_lock_irqsave(&tsk->sighand->siglock, flags);
357 if (tsk->signal)
358 tty = tty_kref_get(tsk->signal->tty);
359 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
360 return tty;
361}
362
363static inline void audit_put_tty(struct tty_struct *tty)
364{
365 tty_kref_put(tty);
366}
367
368extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); 350extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
369extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); 351extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
370extern void __audit_bprm(struct linux_binprm *bprm); 352extern void __audit_bprm(struct linux_binprm *bprm);
@@ -522,12 +504,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
522{ 504{
523 return -1; 505 return -1;
524} 506}
525static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
526{
527 return NULL;
528}
529static inline void audit_put_tty(struct tty_struct *tty)
530{ }
531static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) 507static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
532{ } 508{ }
533static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, 509static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index e6b41f42602b..3db25df396cb 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -159,6 +159,7 @@ struct bcma_host_ops {
159#define BCMA_CORE_DEFAULT 0xFFF 159#define BCMA_CORE_DEFAULT 0xFFF
160 160
161#define BCMA_MAX_NR_CORES 16 161#define BCMA_MAX_NR_CORES 16
162#define BCMA_CORE_SIZE 0x1000
162 163
163/* Chip IDs of PCIe devices */ 164/* Chip IDs of PCIe devices */
164#define BCMA_CHIP_ID_BCM4313 0x4313 165#define BCMA_CHIP_ID_BCM4313 0x4313
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 576e4639ca60..314b3caa701c 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -65,6 +65,7 @@ struct coredump_params {
65 unsigned long limit; 65 unsigned long limit;
66 unsigned long mm_flags; 66 unsigned long mm_flags;
67 loff_t written; 67 loff_t written;
68 loff_t pos;
68}; 69};
69 70
70/* 71/*
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e9b0b9ab07e5..27bfc0b631a9 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -267,6 +267,10 @@ static inline int bitmap_equal(const unsigned long *src1,
267{ 267{
268 if (small_const_nbits(nbits)) 268 if (small_const_nbits(nbits))
269 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); 269 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
270#ifdef CONFIG_S390
271 else if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0)
272 return !memcmp(src1, src2, nbits / 8);
273#endif
270 else 274 else
271 return __bitmap_equal(src1, src2, nbits); 275 return __bitmap_equal(src1, src2, nbits);
272} 276}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8ee27b8afe81..0de4de6dd43e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -111,6 +111,31 @@ enum bpf_access_type {
111 BPF_WRITE = 2 111 BPF_WRITE = 2
112}; 112};
113 113
114/* types of values stored in eBPF registers */
115enum bpf_reg_type {
116 NOT_INIT = 0, /* nothing was written into register */
117 UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
118 PTR_TO_CTX, /* reg points to bpf_context */
119 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
120 PTR_TO_MAP_VALUE, /* reg points to map element value */
121 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
122 FRAME_PTR, /* reg == frame_pointer */
123 PTR_TO_STACK, /* reg == frame_pointer + imm */
124 CONST_IMM, /* constant integer value */
125
126 /* PTR_TO_PACKET represents:
127 * skb->data
128 * skb->data + imm
129 * skb->data + (u16) var
130 * skb->data + (u16) var + imm
131 * if (range > 0) then [ptr, ptr + range - off) is safe to access
132 * if (id > 0) means that some 'var' was added
133 * if (off > 0) menas that 'imm' was added
134 */
135 PTR_TO_PACKET,
136 PTR_TO_PACKET_END, /* skb->data + headlen */
137};
138
114struct bpf_prog; 139struct bpf_prog;
115 140
116struct bpf_verifier_ops { 141struct bpf_verifier_ops {
@@ -120,7 +145,8 @@ struct bpf_verifier_ops {
120 /* return true if 'size' wide access at offset 'off' within bpf_context 145 /* return true if 'size' wide access at offset 'off' within bpf_context
121 * with 'type' (read or write) is allowed 146 * with 'type' (read or write) is allowed
122 */ 147 */
123 bool (*is_valid_access)(int off, int size, enum bpf_access_type type); 148 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
149 enum bpf_reg_type *reg_type);
124 150
125 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, 151 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
126 int src_reg, int ctx_off, 152 int src_reg, int ctx_off,
@@ -238,6 +264,10 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
238static inline void bpf_prog_put(struct bpf_prog *prog) 264static inline void bpf_prog_put(struct bpf_prog *prog)
239{ 265{
240} 266}
267
268static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
269{
270}
241#endif /* CONFIG_BPF_SYSCALL */ 271#endif /* CONFIG_BPF_SYSCALL */
242 272
243/* verifier prototypes for helper functions called from eBPF programs */ 273/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d48daa3f6f20..7e14e545c4b6 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -208,6 +208,9 @@ void block_invalidatepage(struct page *page, unsigned int offset,
208 unsigned int length); 208 unsigned int length);
209int block_write_full_page(struct page *page, get_block_t *get_block, 209int block_write_full_page(struct page *page, get_block_t *get_block,
210 struct writeback_control *wbc); 210 struct writeback_control *wbc);
211int __block_write_full_page(struct inode *inode, struct page *page,
212 get_block_t *get_block, struct writeback_control *wbc,
213 bh_end_io_t *handler);
211int block_read_full_page(struct page*, get_block_t*); 214int block_read_full_page(struct page*, get_block_t*);
212int block_is_partially_uptodate(struct page *page, unsigned long from, 215int block_is_partially_uptodate(struct page *page, unsigned long from,
213 unsigned long count); 216 unsigned long count);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 19b14862d3e0..1b3b6e155392 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -279,6 +279,11 @@ struct ceph_osd_client {
279 struct workqueue_struct *notify_wq; 279 struct workqueue_struct *notify_wq;
280}; 280};
281 281
282static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
283{
284 return osdc->osdmap->flags & flag;
285}
286
282extern int ceph_osdc_setup(void); 287extern int ceph_osdc_setup(void);
283extern void ceph_osdc_cleanup(void); 288extern void ceph_osdc_cleanup(void);
284 289
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index ddc426b22d81..9ccf4dbe55f8 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
189 return !ceph_osd_is_up(map, osd); 189 return !ceph_osd_is_up(map, osd);
190} 190}
191 191
192static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
193{
194 return map && (map->flags & flag);
195}
196
197extern char *ceph_osdmap_state_str(char *str, int len, int state); 192extern char *ceph_osdmap_state_str(char *str, int len, int state);
198extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); 193extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
199 194
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0c72204c75fc..fb39d5add173 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -25,7 +25,7 @@
25#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ 25#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
26#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ 26#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
27#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ 27#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
28#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */ 28 /* unused */
29#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ 29#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
30#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ 30#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
31#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ 31#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0df4a51e1a78..834179f3fa72 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -461,6 +461,10 @@ static inline struct clk *clk_get_parent(struct clk *clk)
461 return NULL; 461 return NULL;
462} 462}
463 463
464static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
465{
466 return NULL;
467}
464#endif 468#endif
465 469
466/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 470/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 44a1aff22566..08398182f56e 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *,
244extern int clocksource_i8253_init(void); 244extern int clocksource_i8253_init(void);
245 245
246#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ 246#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
247 OF_DECLARE_1(clksrc, name, compat, fn) 247 OF_DECLARE_1_RET(clksrc, name, compat, fn)
248 248
249#ifdef CONFIG_CLKSRC_PROBE 249#ifdef CONFIG_CLKSRC_PROBE
250extern void clocksource_probe(void); 250extern void clocksource_probe(void);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 793c0829e3a3..2e853b679a5d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -304,23 +304,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
304 __u.__val; \ 304 __u.__val; \
305}) 305})
306 306
307/**
308 * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
309 * @cond: boolean expression to wait for
310 *
311 * Equivalent to using smp_load_acquire() on the condition variable but employs
312 * the control dependency of the wait to reduce the barrier on many platforms.
313 *
314 * The control dependency provides a LOAD->STORE order, the additional RMB
315 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
316 * aka. ACQUIRE.
317 */
318#define smp_cond_acquire(cond) do { \
319 while (!(cond)) \
320 cpu_relax(); \
321 smp_rmb(); /* ctrl + rmb := acquire */ \
322} while (0)
323
324#endif /* __KERNEL__ */ 307#endif /* __KERNEL__ */
325 308
326#endif /* __ASSEMBLY__ */ 309#endif /* __ASSEMBLY__ */
@@ -545,10 +528,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
545 * Similar to rcu_dereference(), but for situations where the pointed-to 528 * Similar to rcu_dereference(), but for situations where the pointed-to
546 * object's lifetime is managed by something other than RCU. That 529 * object's lifetime is managed by something other than RCU. That
547 * "something other" might be reference counting or simple immortality. 530 * "something other" might be reference counting or simple immortality.
531 *
532 * The seemingly unused void * variable is to validate @p is indeed a pointer
533 * type. All pointer types silently cast to void *.
548 */ 534 */
549#define lockless_dereference(p) \ 535#define lockless_dereference(p) \
550({ \ 536({ \
551 typeof(p) _________p1 = READ_ONCE(p); \ 537 typeof(p) _________p1 = READ_ONCE(p); \
538 __maybe_unused const void * const _________p2 = _________p1; \
552 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 539 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
553 (_________p1); \ 540 (_________p1); \
554}) 541})
diff --git a/include/linux/console.h b/include/linux/console.h
index 98c8615dc300..d530c4627e54 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -28,6 +28,13 @@ struct tty_struct;
28#define VT100ID "\033[?1;2c" 28#define VT100ID "\033[?1;2c"
29#define VT102ID "\033[?6c" 29#define VT102ID "\033[?6c"
30 30
31/**
32 * struct consw - callbacks for consoles
33 *
34 * @con_set_palette: sets the palette of the console to @table (optional)
35 * @con_scrolldelta: the contents of the console should be scrolled by @lines.
36 * Invoked by user. (optional)
37 */
31struct consw { 38struct consw {
32 struct module *owner; 39 struct module *owner;
33 const char *(*con_startup)(void); 40 const char *(*con_startup)(void);
@@ -38,7 +45,6 @@ struct consw {
38 void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); 45 void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
39 void (*con_cursor)(struct vc_data *, int); 46 void (*con_cursor)(struct vc_data *, int);
40 int (*con_scroll)(struct vc_data *, int, int, int, int); 47 int (*con_scroll)(struct vc_data *, int, int, int, int);
41 void (*con_bmove)(struct vc_data *, int, int, int, int, int, int);
42 int (*con_switch)(struct vc_data *); 48 int (*con_switch)(struct vc_data *);
43 int (*con_blank)(struct vc_data *, int, int); 49 int (*con_blank)(struct vc_data *, int, int);
44 int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); 50 int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
@@ -47,8 +53,9 @@ struct consw {
47 int (*con_font_copy)(struct vc_data *, int); 53 int (*con_font_copy)(struct vc_data *, int);
48 int (*con_resize)(struct vc_data *, unsigned int, unsigned int, 54 int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
49 unsigned int); 55 unsigned int);
50 int (*con_set_palette)(struct vc_data *, const unsigned char *); 56 void (*con_set_palette)(struct vc_data *,
51 int (*con_scrolldelta)(struct vc_data *, int); 57 const unsigned char *table);
58 void (*con_scrolldelta)(struct vc_data *, int lines);
52 int (*con_set_origin)(struct vc_data *); 59 int (*con_set_origin)(struct vc_data *);
53 void (*con_save_screen)(struct vc_data *); 60 void (*con_save_screen)(struct vc_data *);
54 u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); 61 u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index e329ee2667e1..6fd3c908a340 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -21,6 +21,38 @@ struct uni_pagedir;
21 21
22#define NPAR 16 22#define NPAR 16
23 23
24/*
25 * Example: vc_data of a console that was scrolled 3 lines down.
26 *
27 * Console buffer
28 * vc_screenbuf ---------> +----------------------+-.
29 * | initializing W | \
30 * | initializing X | |
31 * | initializing Y | > scroll-back area
32 * | initializing Z | |
33 * | | /
34 * vc_visible_origin ---> ^+----------------------+-:
35 * (changes by scroll) || Welcome to linux | \
36 * || | |
37 * vc_rows --->< | login: root | | visible on console
38 * || password: | > (vc_screenbuf_size is
39 * vc_origin -----------> || | | vc_size_row * vc_rows)
40 * (start when no scroll) || Last login: 12:28 | /
41 * v+----------------------+-:
42 * | Have a lot of fun... | \
43 * vc_pos -----------------|--------v | > scroll-front area
44 * | ~ # cat_ | /
45 * vc_scr_end -----------> +----------------------+-:
46 * (vc_origin + | | \ EMPTY, to be filled by
47 * vc_screenbuf_size) | | / vc_video_erase_char
48 * +----------------------+-'
49 * <---- 2 * vc_cols ----->
50 * <---- vc_size_row ----->
51 *
52 * Note that every character in the console buffer is accompanied with an
53 * attribute in the buffer right after the character. This is not depicted
54 * in the figure.
55 */
24struct vc_data { 56struct vc_data {
25 struct tty_port port; /* Upper level data */ 57 struct tty_port port; /* Upper level data */
26 58
@@ -74,7 +106,6 @@ struct vc_data {
74 unsigned int vc_decawm : 1; /* Autowrap Mode */ 106 unsigned int vc_decawm : 1; /* Autowrap Mode */
75 unsigned int vc_deccm : 1; /* Cursor Visible */ 107 unsigned int vc_deccm : 1; /* Cursor Visible */
76 unsigned int vc_decim : 1; /* Insert Mode */ 108 unsigned int vc_decim : 1; /* Insert Mode */
77 unsigned int vc_deccolm : 1; /* 80/132 Column Mode */
78 /* attribute flags */ 109 /* attribute flags */
79 unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ 110 unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
80 unsigned int vc_italic:1; 111 unsigned int vc_italic:1;
@@ -136,6 +167,9 @@ extern void vc_SAK(struct work_struct *work);
136 167
137#define CUR_DEFAULT CUR_UNDERLINE 168#define CUR_DEFAULT CUR_UNDERLINE
138 169
139#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp) 170static inline bool con_is_visible(const struct vc_data *vc)
171{
172 return *vc->vc_display_fg == vc;
173}
140 174
141#endif /* _LINUX_CONSOLE_STRUCT_H */ 175#endif /* _LINUX_CONSOLE_STRUCT_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d259274238db..d9aef2a0ec8e 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -31,6 +31,19 @@ static inline void user_exit(void)
31 context_tracking_exit(CONTEXT_USER); 31 context_tracking_exit(CONTEXT_USER);
32} 32}
33 33
34/* Called with interrupts disabled. */
35static inline void user_enter_irqoff(void)
36{
37 if (context_tracking_is_enabled())
38 __context_tracking_enter(CONTEXT_USER);
39
40}
41static inline void user_exit_irqoff(void)
42{
43 if (context_tracking_is_enabled())
44 __context_tracking_exit(CONTEXT_USER);
45}
46
34static inline enum ctx_state exception_enter(void) 47static inline enum ctx_state exception_enter(void)
35{ 48{
36 enum ctx_state prev_ctx; 49 enum ctx_state prev_ctx;
@@ -69,6 +82,8 @@ static inline enum ctx_state ct_state(void)
69#else 82#else
70static inline void user_enter(void) { } 83static inline void user_enter(void) { }
71static inline void user_exit(void) { } 84static inline void user_exit(void) { }
85static inline void user_enter_irqoff(void) { }
86static inline void user_exit_irqoff(void) { }
72static inline enum ctx_state exception_enter(void) { return 0; } 87static inline enum ctx_state exception_enter(void) { return 0; }
73static inline void exception_exit(enum ctx_state prev_ctx) { } 88static inline void exception_exit(enum ctx_state prev_ctx) { }
74static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } 89static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 786ad32631a6..07b83d32f66c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
152extern int cpuidle_play_dead(void); 152extern int cpuidle_play_dead(void);
153 153
154extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 154extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
155static inline struct cpuidle_device *cpuidle_get_device(void)
156{return __this_cpu_read(cpuidle_devices); }
155#else 157#else
156static inline void disable_cpuidle(void) { } 158static inline void disable_cpuidle(void) { }
157static inline bool cpuidle_not_available(struct cpuidle_driver *drv, 159static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
187static inline int cpuidle_play_dead(void) {return -ENODEV; } 189static inline int cpuidle_play_dead(void) {return -ENODEV; }
188static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 190static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
189 struct cpuidle_device *dev) {return NULL; } 191 struct cpuidle_device *dev) {return NULL; }
192static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
190#endif 193#endif
191 194
192#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) 195#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 484c8792da82..f53fa055021a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -212,6 +212,7 @@ struct dentry_operations {
212#define DCACHE_OP_REAL 0x08000000 212#define DCACHE_OP_REAL 0x08000000
213 213
214#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ 214#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
215#define DCACHE_DENTRY_CURSOR 0x20000000
215 216
216extern seqlock_t rename_lock; 217extern seqlock_t rename_lock;
217 218
@@ -575,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
575 return inode; 576 return inode;
576} 577}
577 578
579/**
580 * d_real_inode - Return the real inode
581 * @dentry: The dentry to query
582 *
583 * If dentry is on an union/overlay, then return the underlying, real inode.
584 * Otherwise return d_inode().
585 */
586static inline struct inode *d_real_inode(struct dentry *dentry)
587{
588 return d_backing_inode(d_real(dentry));
589}
590
578 591
579#endif /* __LINUX_DCACHE_H */ 592#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 5871f292b596..277ab9af9ac2 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,13 +15,12 @@
15 15
16#include <linux/errno.h> 16#include <linux/errno.h>
17 17
18struct pts_fs_info;
19
20#ifdef CONFIG_UNIX98_PTYS 18#ifdef CONFIG_UNIX98_PTYS
21 19
22/* Look up a pts fs info and get a ref to it */ 20struct pts_fs_info;
23struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); 21
24void devpts_put_ref(struct pts_fs_info *); 22struct pts_fs_info *devpts_acquire(struct file *);
23void devpts_release(struct pts_fs_info *);
25 24
26int devpts_new_index(struct pts_fs_info *); 25int devpts_new_index(struct pts_fs_info *);
27void devpts_kill_index(struct pts_fs_info *, int); 26void devpts_kill_index(struct pts_fs_info *, int);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3fe90d494edb..4551c6f2a6c4 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -112,19 +112,24 @@ struct dma_buf_ops {
112 * @file: file pointer used for sharing buffers across, and for refcounting. 112 * @file: file pointer used for sharing buffers across, and for refcounting.
113 * @attachments: list of dma_buf_attachment that denotes all devices attached. 113 * @attachments: list of dma_buf_attachment that denotes all devices attached.
114 * @ops: dma_buf_ops associated with this buffer object. 114 * @ops: dma_buf_ops associated with this buffer object.
115 * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
116 * @vmapping_counter: used internally to refcnt the vmaps
117 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
115 * @exp_name: name of the exporter; useful for debugging. 118 * @exp_name: name of the exporter; useful for debugging.
116 * @owner: pointer to exporter module; used for refcounting when exporter is a 119 * @owner: pointer to exporter module; used for refcounting when exporter is a
117 * kernel module. 120 * kernel module.
118 * @list_node: node for dma_buf accounting and debugging. 121 * @list_node: node for dma_buf accounting and debugging.
119 * @priv: exporter specific private data for this buffer object. 122 * @priv: exporter specific private data for this buffer object.
120 * @resv: reservation object linked to this dma-buf 123 * @resv: reservation object linked to this dma-buf
124 * @poll: for userspace poll support
125 * @cb_excl: for userspace poll support
126 * @cb_shared: for userspace poll support
121 */ 127 */
122struct dma_buf { 128struct dma_buf {
123 size_t size; 129 size_t size;
124 struct file *file; 130 struct file *file;
125 struct list_head attachments; 131 struct list_head attachments;
126 const struct dma_buf_ops *ops; 132 const struct dma_buf_ops *ops;
127 /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
128 struct mutex lock; 133 struct mutex lock;
129 unsigned vmapping_counter; 134 unsigned vmapping_counter;
130 void *vmap_ptr; 135 void *vmap_ptr;
@@ -188,9 +193,11 @@ struct dma_buf_export_info {
188 193
189/** 194/**
190 * helper macro for exporters; zeros and fills in most common values 195 * helper macro for exporters; zeros and fills in most common values
196 *
197 * @name: export-info name
191 */ 198 */
192#define DEFINE_DMA_BUF_EXPORT_INFO(a) \ 199#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
193 struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ 200 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
194 .owner = THIS_MODULE } 201 .owner = THIS_MODULE }
195 202
196/** 203/**
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index 79df69dc629c..aaff68efba5d 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -39,14 +39,22 @@ struct hsu_dma_chip {
39 39
40#if IS_ENABLED(CONFIG_HSU_DMA) 40#if IS_ENABLED(CONFIG_HSU_DMA)
41/* Export to the internal users */ 41/* Export to the internal users */
42irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); 42int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
43 u32 *status);
44irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
45 u32 status);
43 46
44/* Export to the platform drivers */ 47/* Export to the platform drivers */
45int hsu_dma_probe(struct hsu_dma_chip *chip); 48int hsu_dma_probe(struct hsu_dma_chip *chip);
46int hsu_dma_remove(struct hsu_dma_chip *chip); 49int hsu_dma_remove(struct hsu_dma_chip *chip);
47#else 50#else
48static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, 51static inline int hsu_dma_get_status(struct hsu_dma_chip *chip,
49 unsigned short nr) 52 unsigned short nr, u32 *status)
53{
54 return 0;
55}
56static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip,
57 unsigned short nr, u32 status)
50{ 58{
51 return IRQ_NONE; 59 return IRQ_NONE;
52} 60}
diff --git a/include/linux/efi.h b/include/linux/efi.h
index c2db3ca22217..7f80a75ee9e3 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -536,116 +536,58 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
536void efi_native_runtime_setup(void); 536void efi_native_runtime_setup(void);
537 537
538/* 538/*
539 * EFI Configuration Table and GUID definitions 539 * EFI Configuration Table and GUID definitions
540 *
541 * These are all defined in a single line to make them easier to
542 * grep for and to see them at a glance - while still having a
543 * similar structure to the definitions in the spec.
544 *
545 * Here's how they are structured:
546 *
547 * GUID: 12345678-1234-1234-1234-123456789012
548 * Spec:
549 * #define EFI_SOME_PROTOCOL_GUID \
550 * {0x12345678,0x1234,0x1234,\
551 * {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}}
552 * Here:
553 * #define SOME_PROTOCOL_GUID EFI_GUID(0x12345678, 0x1234, 0x1234, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12)
554 * ^ tabs ^extra space
555 *
556 * Note that the 'extra space' separates the values at the same place
557 * where the UEFI SPEC breaks the line.
540 */ 558 */
541#define NULL_GUID \ 559#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
542 EFI_GUID(0x00000000, 0x0000, 0x0000, \ 560#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
543 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) 561#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
544 562#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
545#define MPS_TABLE_GUID \ 563#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
546 EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \ 564#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
547 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) 565#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
548 566#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
549#define ACPI_TABLE_GUID \ 567#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
550 EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \ 568#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
551 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) 569#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
552 570#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
553#define ACPI_20_TABLE_GUID \ 571#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
554 EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \ 572#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
555 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) 573#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
556 574#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
557#define SMBIOS_TABLE_GUID \ 575#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
558 EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \ 576#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
559 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) 577#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
560 578#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
561#define SMBIOS3_TABLE_GUID \ 579#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
562 EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \ 580#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
563 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) 581#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
564 582#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
565#define SAL_SYSTEM_TABLE_GUID \
566 EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \
567 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
568
569#define HCDP_TABLE_GUID \
570 EFI_GUID(0xf951938d, 0x620b, 0x42ef, \
571 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
572
573#define UGA_IO_PROTOCOL_GUID \
574 EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \
575 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
576
577#define EFI_GLOBAL_VARIABLE_GUID \
578 EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \
579 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
580
581#define UV_SYSTEM_TABLE_GUID \
582 EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \
583 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
584
585#define LINUX_EFI_CRASH_GUID \
586 EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \
587 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
588
589#define LOADED_IMAGE_PROTOCOL_GUID \
590 EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \
591 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
592
593#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \
594 EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \
595 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
596
597#define EFI_UGA_PROTOCOL_GUID \
598 EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \
599 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
600
601#define EFI_PCI_IO_PROTOCOL_GUID \
602 EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \
603 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
604
605#define EFI_FILE_INFO_ID \
606 EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \
607 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
608
609#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
610 EFI_GUID(0xb122a263, 0x3661, 0x4f68, \
611 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
612
613#define EFI_FILE_SYSTEM_GUID \
614 EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \
615 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
616
617#define DEVICE_TREE_GUID \
618 EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \
619 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
620
621#define EFI_PROPERTIES_TABLE_GUID \
622 EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \
623 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
624
625#define EFI_RNG_PROTOCOL_GUID \
626 EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
627 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
628
629#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
630 EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
631 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
632
633#define EFI_CONSOLE_OUT_DEVICE_GUID \
634 EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
635 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
636 583
637/* 584/*
638 * This GUID is used to pass to the kernel proper the struct screen_info 585 * This GUID is used to pass to the kernel proper the struct screen_info
639 * structure that was populated by the stub based on the GOP protocol instance 586 * structure that was populated by the stub based on the GOP protocol instance
640 * associated with ConOut 587 * associated with ConOut
641 */ 588 */
642#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \ 589#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
643 EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \ 590#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
644 0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
645
646#define LINUX_EFI_LOADER_ENTRY_GUID \
647 EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \
648 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
649 591
650typedef struct { 592typedef struct {
651 efi_guid_t guid; 593 efi_guid_t guid;
@@ -975,7 +917,6 @@ extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
975extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); 917extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
976extern void efi_initialize_iomem_resources(struct resource *code_resource, 918extern void efi_initialize_iomem_resources(struct resource *code_resource,
977 struct resource *data_resource, struct resource *bss_resource); 919 struct resource *data_resource, struct resource *bss_resource);
978extern void efi_get_time(struct timespec *now);
979extern void efi_reserve_boot_services(void); 920extern void efi_reserve_boot_services(void);
980extern int efi_get_fdt_params(struct efi_fdt_params *params); 921extern int efi_get_fdt_params(struct efi_fdt_params *params);
981extern struct kobject *efi_kobj; 922extern struct kobject *efi_kobj;
@@ -1005,7 +946,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
1005/* Iterate through an efi_memory_map */ 946/* Iterate through an efi_memory_map */
1006#define for_each_efi_memory_desc_in_map(m, md) \ 947#define for_each_efi_memory_desc_in_map(m, md) \
1007 for ((md) = (m)->map; \ 948 for ((md) = (m)->map; \
1008 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ 949 ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
1009 (md) = (void *)(md) + (m)->desc_size) 950 (md) = (void *)(md) + (m)->desc_size)
1010 951
1011/** 952/**
@@ -1465,4 +1406,55 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
1465 unsigned long size); 1406 unsigned long size);
1466 1407
1467bool efi_runtime_disabled(void); 1408bool efi_runtime_disabled(void);
1409extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
1410
1411/*
1412 * Arch code can implement the following three template macros, avoiding
1413 * reptition for the void/non-void return cases of {__,}efi_call_virt():
1414 *
1415 * * arch_efi_call_virt_setup()
1416 *
1417 * Sets up the environment for the call (e.g. switching page tables,
1418 * allowing kernel-mode use of floating point, if required).
1419 *
1420 * * arch_efi_call_virt()
1421 *
1422 * Performs the call. The last expression in the macro must be the call
1423 * itself, allowing the logic to be shared by the void and non-void
1424 * cases.
1425 *
1426 * * arch_efi_call_virt_teardown()
1427 *
1428 * Restores the usual kernel environment once the call has returned.
1429 */
1430
1431#define efi_call_virt_pointer(p, f, args...) \
1432({ \
1433 efi_status_t __s; \
1434 unsigned long __flags; \
1435 \
1436 arch_efi_call_virt_setup(); \
1437 \
1438 local_save_flags(__flags); \
1439 __s = arch_efi_call_virt(p, f, args); \
1440 efi_call_virt_check_flags(__flags, __stringify(f)); \
1441 \
1442 arch_efi_call_virt_teardown(); \
1443 \
1444 __s; \
1445})
1446
1447#define __efi_call_virt_pointer(p, f, args...) \
1448({ \
1449 unsigned long __flags; \
1450 \
1451 arch_efi_call_virt_setup(); \
1452 \
1453 local_save_flags(__flags); \
1454 arch_efi_call_virt(p, f, args); \
1455 efi_call_virt_check_flags(__flags, __stringify(f)); \
1456 \
1457 arch_efi_call_virt_teardown(); \
1458})
1459
1468#endif /* _LINUX_EFI_H */ 1460#endif /* _LINUX_EFI_H */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7abf674c388c..61004413dc64 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -126,42 +126,6 @@ struct extcon_dev {
126 struct device_attribute *d_attrs_muex; 126 struct device_attribute *d_attrs_muex;
127}; 127};
128 128
129/**
130 * struct extcon_cable - An internal data for each cable of extcon device.
131 * @edev: The extcon device
132 * @cable_index: Index of this cable in the edev
133 * @attr_g: Attribute group for the cable
134 * @attr_name: "name" sysfs entry
135 * @attr_state: "state" sysfs entry
136 * @attrs: Array pointing to attr_name and attr_state for attr_g
137 */
138struct extcon_cable {
139 struct extcon_dev *edev;
140 int cable_index;
141
142 struct attribute_group attr_g;
143 struct device_attribute attr_name;
144 struct device_attribute attr_state;
145
146 struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
147};
148
149/**
150 * struct extcon_specific_cable_nb - An internal data for
151 * extcon_register_interest().
152 * @user_nb: user provided notifier block for events from
153 * a specific cable.
154 * @cable_index: the target cable.
155 * @edev: the target extcon device.
156 * @previous_value: the saved previous event value.
157 */
158struct extcon_specific_cable_nb {
159 struct notifier_block *user_nb;
160 int cable_index;
161 struct extcon_dev *edev;
162 unsigned long previous_value;
163};
164
165#if IS_ENABLED(CONFIG_EXTCON) 129#if IS_ENABLED(CONFIG_EXTCON)
166 130
167/* 131/*
@@ -201,29 +165,12 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
201 165
202/* 166/*
203 * get/set_cable_state access each bit of the 32b encoded state value. 167 * get/set_cable_state access each bit of the 32b encoded state value.
204 * They are used to access the status of each cable based on the cable_name. 168 * They are used to access the status of each cable based on the cable id.
205 */ 169 */
206extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id); 170extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
207extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, 171extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
208 bool cable_state); 172 bool cable_state);
209 173
210extern int extcon_get_cable_state(struct extcon_dev *edev,
211 const char *cable_name);
212extern int extcon_set_cable_state(struct extcon_dev *edev,
213 const char *cable_name, bool cable_state);
214
215/*
216 * Following APIs are for notifiees (those who want to be notified)
217 * to register a callback for events from a specific cable of the extcon.
218 * Notifiees are the connected device drivers wanting to get notified by
219 * a specific external port of a connection device.
220 */
221extern int extcon_register_interest(struct extcon_specific_cable_nb *obj,
222 const char *extcon_name,
223 const char *cable_name,
224 struct notifier_block *nb);
225extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
226
227/* 174/*
228 * Following APIs are to monitor every action of a notifier. 175 * Following APIs are to monitor every action of a notifier.
229 * Registrar gets notified for every external port of a connection device. 176 * Registrar gets notified for every external port of a connection device.
@@ -235,6 +182,12 @@ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
235 struct notifier_block *nb); 182 struct notifier_block *nb);
236extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, 183extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
237 struct notifier_block *nb); 184 struct notifier_block *nb);
185extern int devm_extcon_register_notifier(struct device *dev,
186 struct extcon_dev *edev, unsigned int id,
187 struct notifier_block *nb);
188extern void devm_extcon_unregister_notifier(struct device *dev,
189 struct extcon_dev *edev, unsigned int id,
190 struct notifier_block *nb);
238 191
239/* 192/*
240 * Following API get the extcon device from devicetree. 193 * Following API get the extcon device from devicetree.
@@ -246,6 +199,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
246/* Following API to get information of extcon device */ 199/* Following API to get information of extcon device */
247extern const char *extcon_get_edev_name(struct extcon_dev *edev); 200extern const char *extcon_get_edev_name(struct extcon_dev *edev);
248 201
202
249#else /* CONFIG_EXTCON */ 203#else /* CONFIG_EXTCON */
250static inline int extcon_dev_register(struct extcon_dev *edev) 204static inline int extcon_dev_register(struct extcon_dev *edev)
251{ 205{
@@ -306,18 +260,6 @@ static inline int extcon_set_cable_state_(struct extcon_dev *edev,
306 return 0; 260 return 0;
307} 261}
308 262
309static inline int extcon_get_cable_state(struct extcon_dev *edev,
310 const char *cable_name)
311{
312 return 0;
313}
314
315static inline int extcon_set_cable_state(struct extcon_dev *edev,
316 const char *cable_name, int state)
317{
318 return 0;
319}
320
321static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) 263static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
322{ 264{
323 return NULL; 265 return NULL;
@@ -337,19 +279,16 @@ static inline int extcon_unregister_notifier(struct extcon_dev *edev,
337 return 0; 279 return 0;
338} 280}
339 281
340static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, 282static inline int devm_extcon_register_notifier(struct device *dev,
341 const char *extcon_name, 283 struct extcon_dev *edev, unsigned int id,
342 const char *cable_name, 284 struct notifier_block *nb)
343 struct notifier_block *nb)
344{ 285{
345 return 0; 286 return -ENOSYS;
346} 287}
347 288
348static inline int extcon_unregister_interest(struct extcon_specific_cable_nb 289static inline void devm_extcon_unregister_notifier(struct device *dev,
349 *obj) 290 struct extcon_dev *edev, unsigned int id,
350{ 291 struct notifier_block *nb) { }
351 return 0;
352}
353 292
354static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, 293static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
355 int index) 294 int index)
@@ -357,4 +296,28 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
357 return ERR_PTR(-ENODEV); 296 return ERR_PTR(-ENODEV);
358} 297}
359#endif /* CONFIG_EXTCON */ 298#endif /* CONFIG_EXTCON */
299
300/*
301 * Following structure and API are deprecated. EXTCON remains the function
302 * definition to prevent the build break.
303 */
304struct extcon_specific_cable_nb {
305 struct notifier_block *user_nb;
306 int cable_index;
307 struct extcon_dev *edev;
308 unsigned long previous_value;
309};
310
311static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
312 const char *extcon_name, const char *cable_name,
313 struct notifier_block *nb)
314{
315 return -EINVAL;
316}
317
318static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
319 *obj)
320{
321 return -EINVAL;
322}
360#endif /* __LINUX_EXTCON_H__ */ 323#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 53c60806bcfb..ac85f2061351 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -53,6 +53,7 @@ struct adc_jack_cond {
53 * milli-seconds after the interrupt occurs. You may 53 * milli-seconds after the interrupt occurs. You may
54 * describe such delays with @handling_delay_ms, which 54 * describe such delays with @handling_delay_ms, which
55 * is rounded-off by jiffies. 55 * is rounded-off by jiffies.
56 * @wakeup_source: flag to wake up the system for extcon events.
56 */ 57 */
57struct adc_jack_pdata { 58struct adc_jack_pdata {
58 const char *name; 59 const char *name;
@@ -65,6 +66,7 @@ struct adc_jack_pdata {
65 66
66 unsigned long irq_flags; 67 unsigned long irq_flags;
67 unsigned long handling_delay_ms; /* in ms */ 68 unsigned long handling_delay_ms; /* in ms */
69 bool wakeup_source;
68}; 70};
69 71
70#endif /* _EXTCON_ADC_JACK_H */ 72#endif /* _EXTCON_ADC_JACK_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2b17698b60b8..1de1b3f6fb76 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,6 +49,8 @@ struct fence_cb;
49 * @timestamp: Timestamp when the fence was signaled. 49 * @timestamp: Timestamp when the fence was signaled.
50 * @status: Optional, only valid if < 0, must be set before calling 50 * @status: Optional, only valid if < 0, must be set before calling
51 * fence_signal, indicates that the fence has completed with an error. 51 * fence_signal, indicates that the fence has completed with an error.
52 * @child_list: list of children fences
53 * @active_list: list of active fences
52 * 54 *
53 * the flags member must be manipulated and read using the appropriate 55 * the flags member must be manipulated and read using the appropriate
54 * atomic ops (bit_*), so taking the spinlock will not be needed most 56 * atomic ops (bit_*), so taking the spinlock will not be needed most
@@ -79,8 +81,6 @@ struct fence {
79 unsigned long flags; 81 unsigned long flags;
80 ktime_t timestamp; 82 ktime_t timestamp;
81 int status; 83 int status;
82 struct list_head child_list;
83 struct list_head active_list;
84}; 84};
85 85
86enum fence_flag_bits { 86enum fence_flag_bits {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6fc31ef1da2d..8f74f3d61894 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -467,7 +467,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
467} 467}
468#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ 468#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
469 469
470int sk_filter(struct sock *sk, struct sk_buff *skb); 470int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
471static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
472{
473 return sk_filter_trim_cap(sk, skb, 1);
474}
471 475
472struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); 476struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
473void bpf_prog_free(struct bpf_prog *fp); 477void bpf_prog_free(struct bpf_prog *fp);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 604e1526cd00..13ba552e6c09 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -241,7 +241,7 @@ struct fscache_cache_ops {
241 241
242 /* check the consistency between the backing cache and the FS-Cache 242 /* check the consistency between the backing cache and the FS-Cache
243 * cookie */ 243 * cookie */
244 bool (*check_consistency)(struct fscache_operation *op); 244 int (*check_consistency)(struct fscache_operation *op);
245 245
246 /* store the updated auxiliary data on an object */ 246 /* store the updated auxiliary data on an object */
247 void (*update_object)(struct fscache_object *object); 247 void (*update_object)(struct fscache_object *object);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 419fb9e03447..f0a7a0320300 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -94,7 +94,7 @@ static inline int split_huge_page(struct page *page)
94void deferred_split_huge_page(struct page *page); 94void deferred_split_huge_page(struct page *page);
95 95
96void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 96void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
97 unsigned long address, bool freeze); 97 unsigned long address, bool freeze, struct page *page);
98 98
99#define split_huge_pmd(__vma, __pmd, __address) \ 99#define split_huge_pmd(__vma, __pmd, __address) \
100 do { \ 100 do { \
@@ -102,7 +102,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
102 if (pmd_trans_huge(*____pmd) \ 102 if (pmd_trans_huge(*____pmd) \
103 || pmd_devmap(*____pmd)) \ 103 || pmd_devmap(*____pmd)) \
104 __split_huge_pmd(__vma, __pmd, __address, \ 104 __split_huge_pmd(__vma, __pmd, __address, \
105 false); \ 105 false, NULL); \
106 } while (0) 106 } while (0)
107 107
108 108
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index d029ffac0d69..228bd44efa4c 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -223,6 +223,9 @@ struct st_sensor_settings {
223 * @get_irq_data_ready: Function to get the IRQ used for data ready signal. 223 * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
224 * @tf: Transfer function structure used by I/O operations. 224 * @tf: Transfer function structure used by I/O operations.
225 * @tb: Transfer buffers and mutex used by I/O operations. 225 * @tb: Transfer buffers and mutex used by I/O operations.
226 * @edge_irq: the IRQ triggers on edges and need special handling.
227 * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
228 * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
226 */ 229 */
227struct st_sensor_data { 230struct st_sensor_data {
228 struct device *dev; 231 struct device *dev;
@@ -247,12 +250,14 @@ struct st_sensor_data {
247 250
248 const struct st_sensor_transfer_function *tf; 251 const struct st_sensor_transfer_function *tf;
249 struct st_sensor_transfer_buffer tb; 252 struct st_sensor_transfer_buffer tb;
253
254 bool edge_irq;
255 bool hw_irq_trigger;
256 s64 hw_timestamp;
250}; 257};
251 258
252#ifdef CONFIG_IIO_BUFFER 259#ifdef CONFIG_IIO_BUFFER
253irqreturn_t st_sensors_trigger_handler(int irq, void *p); 260irqreturn_t st_sensors_trigger_handler(int irq, void *p);
254
255int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf);
256#endif 261#endif
257 262
258#ifdef CONFIG_IIO_TRIGGER 263#ifdef CONFIG_IIO_TRIGGER
@@ -260,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
260 const struct iio_trigger_ops *trigger_ops); 265 const struct iio_trigger_ops *trigger_ops);
261 266
262void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); 267void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
263 268int st_sensors_validate_device(struct iio_trigger *trig,
269 struct iio_dev *indio_dev);
264#else 270#else
265static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, 271static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
266 const struct iio_trigger_ops *trigger_ops) 272 const struct iio_trigger_ops *trigger_ops)
@@ -271,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
271{ 277{
272 return; 278 return;
273} 279}
280#define st_sensors_validate_device NULL
274#endif 281#endif
275 282
276int st_sensors_init_sensor(struct iio_dev *indio_dev, 283int st_sensors_init_sensor(struct iio_dev *indio_dev,
@@ -280,7 +287,7 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable);
280 287
281int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); 288int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
282 289
283void st_sensors_power_enable(struct iio_dev *indio_dev); 290int st_sensors_power_enable(struct iio_dev *indio_dev);
284 291
285void st_sensors_power_disable(struct iio_dev *indio_dev); 292void st_sensors_power_disable(struct iio_dev *indio_dev);
286 293
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 7c29cb0124ae..854e2dad1e0d 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -312,13 +312,8 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
312 }, \ 312 }, \
313} 313}
314 314
315/** 315s64 iio_get_time_ns(const struct iio_dev *indio_dev);
316 * iio_get_time_ns() - utility function to get a time stamp for events etc 316unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
317 **/
318static inline s64 iio_get_time_ns(void)
319{
320 return ktime_get_real_ns();
321}
322 317
323/* Device operating modes */ 318/* Device operating modes */
324#define INDIO_DIRECT_MODE 0x01 319#define INDIO_DIRECT_MODE 0x01
@@ -497,6 +492,7 @@ struct iio_buffer_setup_ops {
497 * @chan_attr_group: [INTERN] group for all attrs in base directory 492 * @chan_attr_group: [INTERN] group for all attrs in base directory
498 * @name: [DRIVER] name of the device. 493 * @name: [DRIVER] name of the device.
499 * @info: [DRIVER] callbacks and constant info from driver 494 * @info: [DRIVER] callbacks and constant info from driver
495 * @clock_id: [INTERN] timestamping clock posix identifier
500 * @info_exist_lock: [INTERN] lock to prevent use during removal 496 * @info_exist_lock: [INTERN] lock to prevent use during removal
501 * @setup_ops: [DRIVER] callbacks to call before and after buffer 497 * @setup_ops: [DRIVER] callbacks to call before and after buffer
502 * enable/disable 498 * enable/disable
@@ -537,6 +533,7 @@ struct iio_dev {
537 struct attribute_group chan_attr_group; 533 struct attribute_group chan_attr_group;
538 const char *name; 534 const char *name;
539 const struct iio_info *info; 535 const struct iio_info *info;
536 clockid_t clock_id;
540 struct mutex info_exist_lock; 537 struct mutex info_exist_lock;
541 const struct iio_buffer_setup_ops *setup_ops; 538 const struct iio_buffer_setup_ops *setup_ops;
542 struct cdev chrdev; 539 struct cdev chrdev;
@@ -565,7 +562,7 @@ extern struct bus_type iio_bus_type;
565 562
566/** 563/**
567 * iio_device_put() - reference counted deallocation of struct device 564 * iio_device_put() - reference counted deallocation of struct device
568 * @indio_dev: IIO device structure containing the device 565 * @indio_dev: IIO device structure containing the device
569 **/ 566 **/
570static inline void iio_device_put(struct iio_dev *indio_dev) 567static inline void iio_device_put(struct iio_dev *indio_dev)
571{ 568{
@@ -574,6 +571,15 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
574} 571}
575 572
576/** 573/**
574 * iio_device_get_clock() - Retrieve current timestamping clock for the device
575 * @indio_dev: IIO device structure containing the device
576 */
577static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
578{
579 return indio_dev->clock_id;
580}
581
582/**
577 * dev_to_iio_dev() - Get IIO device struct from a device struct 583 * dev_to_iio_dev() - Get IIO device struct from a device struct
578 * @dev: The device embedded in the IIO device 584 * @dev: The device embedded in the IIO device
579 * 585 *
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
new file mode 100644
index 000000000000..23ca41515527
--- /dev/null
+++ b/include/linux/iio/sw_device.h
@@ -0,0 +1,70 @@
1/*
2 * Industrial I/O software device interface
3 *
4 * Copyright (c) 2016 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10
11#ifndef __IIO_SW_DEVICE
12#define __IIO_SW_DEVICE
13
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/iio/iio.h>
17#include <linux/configfs.h>
18
19#define module_iio_sw_device_driver(__iio_sw_device_type) \
20 module_driver(__iio_sw_device_type, iio_register_sw_device_type, \
21 iio_unregister_sw_device_type)
22
23struct iio_sw_device_ops;
24
25struct iio_sw_device_type {
26 const char *name;
27 struct module *owner;
28 const struct iio_sw_device_ops *ops;
29 struct list_head list;
30 struct config_group *group;
31};
32
33struct iio_sw_device {
34 struct iio_dev *device;
35 struct iio_sw_device_type *device_type;
36 struct config_group group;
37};
38
39struct iio_sw_device_ops {
40 struct iio_sw_device* (*probe)(const char *);
41 int (*remove)(struct iio_sw_device *);
42};
43
44static inline
45struct iio_sw_device *to_iio_sw_device(struct config_item *item)
46{
47 return container_of(to_config_group(item), struct iio_sw_device,
48 group);
49}
50
51int iio_register_sw_device_type(struct iio_sw_device_type *dt);
52void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
53
54struct iio_sw_device *iio_sw_device_create(const char *, const char *);
55void iio_sw_device_destroy(struct iio_sw_device *);
56
57int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
58void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
59
60static inline
61void iio_swd_group_init_type_name(struct iio_sw_device *d,
62 const char *name,
63 struct config_item_type *type)
64{
65#ifdef CONFIG_CONFIGFS_FS
66 config_group_init_type_name(&d->group, name, type);
67#endif
68}
69
70#endif /* __IIO_SW_DEVICE */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 7c27fa1030e8..feb04ea20f11 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -52,6 +52,12 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
52 52
53int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); 53int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
54 54
55void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
56
57int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
58 struct inet_diag_msg *r, int ext,
59 struct user_namespace *user_ns);
60
55extern int inet_diag_register(const struct inet_diag_handler *handler); 61extern int inet_diag_register(const struct inet_diag_handler *handler);
56extern void inet_diag_unregister(const struct inet_diag_handler *handler); 62extern void inet_diag_unregister(const struct inet_diag_handler *handler);
57#endif /* _INET_DIAG_H_ */ 63#endif /* _INET_DIAG_H_ */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f2cb8d45513d..f8834f820ec2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -190,7 +190,7 @@ extern struct task_group root_task_group;
190#define INIT_TASK(tsk) \ 190#define INIT_TASK(tsk) \
191{ \ 191{ \
192 .state = 0, \ 192 .state = 0, \
193 .stack = &init_thread_info, \ 193 .stack = init_stack, \
194 .usage = ATOMIC_INIT(2), \ 194 .usage = ATOMIC_INIT(2), \
195 .flags = PF_KTHREAD, \ 195 .flags = PF_KTHREAD, \
196 .prio = MAX_PRIO-20, \ 196 .prio = MAX_PRIO-20, \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9fcabeb07787..b6683f0ffc9f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -278,6 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
278extern int 278extern int
279irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 279irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
280 280
281struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs);
282
281#else /* CONFIG_SMP */ 283#else /* CONFIG_SMP */
282 284
283static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 285static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -308,6 +310,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
308{ 310{
309 return 0; 311 return 0;
310} 312}
313
314static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
315{
316 *nr_vecs = 1;
317 return NULL;
318}
311#endif /* CONFIG_SMP */ 319#endif /* CONFIG_SMP */
312 320
313/* 321/*
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4d758a7c604a..b52424eaa0ed 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -197,6 +197,7 @@ struct irq_data {
197 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 197 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
198 * IRQD_WAKEUP_ARMED - Wakeup mode armed 198 * IRQD_WAKEUP_ARMED - Wakeup mode armed
199 * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU 199 * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
200 * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel
200 */ 201 */
201enum { 202enum {
202 IRQD_TRIGGER_MASK = 0xf, 203 IRQD_TRIGGER_MASK = 0xf,
@@ -212,6 +213,7 @@ enum {
212 IRQD_IRQ_INPROGRESS = (1 << 18), 213 IRQD_IRQ_INPROGRESS = (1 << 18),
213 IRQD_WAKEUP_ARMED = (1 << 19), 214 IRQD_WAKEUP_ARMED = (1 << 19),
214 IRQD_FORWARDED_TO_VCPU = (1 << 20), 215 IRQD_FORWARDED_TO_VCPU = (1 << 20),
216 IRQD_AFFINITY_MANAGED = (1 << 21),
215}; 217};
216 218
217#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) 219#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
305 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; 307 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
306} 308}
307 309
310static inline bool irqd_affinity_is_managed(struct irq_data *d)
311{
312 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
313}
314
308#undef __irqd_to_state 315#undef __irqd_to_state
309 316
310static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 317static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -315,6 +322,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
315/** 322/**
316 * struct irq_chip - hardware interrupt chip descriptor 323 * struct irq_chip - hardware interrupt chip descriptor
317 * 324 *
325 * @parent_device: pointer to parent device for irqchip
318 * @name: name for /proc/interrupts 326 * @name: name for /proc/interrupts
319 * @irq_startup: start up the interrupt (defaults to ->enable if NULL) 327 * @irq_startup: start up the interrupt (defaults to ->enable if NULL)
320 * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) 328 * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@@ -354,6 +362,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
354 * @flags: chip specific flags 362 * @flags: chip specific flags
355 */ 363 */
356struct irq_chip { 364struct irq_chip {
365 struct device *parent_device;
357 const char *name; 366 const char *name;
358 unsigned int (*irq_startup)(struct irq_data *data); 367 unsigned int (*irq_startup)(struct irq_data *data);
359 void (*irq_shutdown)(struct irq_data *data); 368 void (*irq_shutdown)(struct irq_data *data);
@@ -482,12 +491,15 @@ extern void handle_fasteoi_irq(struct irq_desc *desc);
482extern void handle_edge_irq(struct irq_desc *desc); 491extern void handle_edge_irq(struct irq_desc *desc);
483extern void handle_edge_eoi_irq(struct irq_desc *desc); 492extern void handle_edge_eoi_irq(struct irq_desc *desc);
484extern void handle_simple_irq(struct irq_desc *desc); 493extern void handle_simple_irq(struct irq_desc *desc);
494extern void handle_untracked_irq(struct irq_desc *desc);
485extern void handle_percpu_irq(struct irq_desc *desc); 495extern void handle_percpu_irq(struct irq_desc *desc);
486extern void handle_percpu_devid_irq(struct irq_desc *desc); 496extern void handle_percpu_devid_irq(struct irq_desc *desc);
487extern void handle_bad_irq(struct irq_desc *desc); 497extern void handle_bad_irq(struct irq_desc *desc);
488extern void handle_nested_irq(unsigned int irq); 498extern void handle_nested_irq(unsigned int irq);
489 499
490extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); 500extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
501extern int irq_chip_pm_get(struct irq_data *data);
502extern int irq_chip_pm_put(struct irq_data *data);
491#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 503#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
492extern void irq_chip_enable_parent(struct irq_data *data); 504extern void irq_chip_enable_parent(struct irq_data *data);
493extern void irq_chip_disable_parent(struct irq_data *data); 505extern void irq_chip_disable_parent(struct irq_data *data);
@@ -701,11 +713,11 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
701unsigned int arch_dynirq_lower_bound(unsigned int from); 713unsigned int arch_dynirq_lower_bound(unsigned int from);
702 714
703int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 715int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
704 struct module *owner); 716 struct module *owner, const struct cpumask *affinity);
705 717
706/* use macros to avoid needing export.h for THIS_MODULE */ 718/* use macros to avoid needing export.h for THIS_MODULE */
707#define irq_alloc_descs(irq, from, cnt, node) \ 719#define irq_alloc_descs(irq, from, cnt, node) \
708 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) 720 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
709 721
710#define irq_alloc_desc(node) \ 722#define irq_alloc_desc(node) \
711 irq_alloc_descs(-1, 0, 1, node) 723 irq_alloc_descs(-1, 0, 1, node)
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index bfbd707de390..107eed475b94 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -204,6 +204,7 @@
204#define GITS_BASER_NR_REGS 8 204#define GITS_BASER_NR_REGS 8
205 205
206#define GITS_BASER_VALID (1UL << 63) 206#define GITS_BASER_VALID (1UL << 63)
207#define GITS_BASER_INDIRECT (1UL << 62)
207#define GITS_BASER_nCnB (0UL << 59) 208#define GITS_BASER_nCnB (0UL << 59)
208#define GITS_BASER_nC (1UL << 59) 209#define GITS_BASER_nC (1UL << 59)
209#define GITS_BASER_RaWt (2UL << 59) 210#define GITS_BASER_RaWt (2UL << 59)
@@ -228,6 +229,7 @@
228#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) 229#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
229#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) 230#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
230#define GITS_BASER_PAGES_MAX 256 231#define GITS_BASER_PAGES_MAX 256
232#define GITS_BASER_PAGES_SHIFT (0)
231 233
232#define GITS_BASER_TYPE_NONE 0 234#define GITS_BASER_TYPE_NONE 0
233#define GITS_BASER_TYPE_DEVICE 1 235#define GITS_BASER_TYPE_DEVICE 1
@@ -238,6 +240,8 @@
238#define GITS_BASER_TYPE_RESERVED6 6 240#define GITS_BASER_TYPE_RESERVED6 6
239#define GITS_BASER_TYPE_RESERVED7 7 241#define GITS_BASER_TYPE_RESERVED7 7
240 242
243#define GITS_LVL1_ENTRY_SIZE (8UL)
244
241/* 245/*
242 * ITS commands 246 * ITS commands
243 */ 247 */
@@ -305,12 +309,12 @@
305#define ICC_SGI1R_AFFINITY_1_SHIFT 16 309#define ICC_SGI1R_AFFINITY_1_SHIFT 16
306#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) 310#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
307#define ICC_SGI1R_SGI_ID_SHIFT 24 311#define ICC_SGI1R_SGI_ID_SHIFT 24
308#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) 312#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
309#define ICC_SGI1R_AFFINITY_2_SHIFT 32 313#define ICC_SGI1R_AFFINITY_2_SHIFT 32
310#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) 314#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
311#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 315#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
312#define ICC_SGI1R_AFFINITY_3_SHIFT 48 316#define ICC_SGI1R_AFFINITY_3_SHIFT 48
313#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) 317#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
314 318
315#include <asm/arch_gicv3.h> 319#include <asm/arch_gicv3.h>
316 320
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index fd051855539b..eafc965b3eb8 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -101,9 +101,14 @@
101#include <linux/irqdomain.h> 101#include <linux/irqdomain.h>
102 102
103struct device_node; 103struct device_node;
104struct gic_chip_data;
104 105
105void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 106void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
106int gic_cpu_if_down(unsigned int gic_nr); 107int gic_cpu_if_down(unsigned int gic_nr);
108void gic_cpu_save(struct gic_chip_data *gic);
109void gic_cpu_restore(struct gic_chip_data *gic);
110void gic_dist_save(struct gic_chip_data *gic);
111void gic_dist_restore(struct gic_chip_data *gic);
107 112
108/* 113/*
109 * Subdrivers that need some preparatory work can initialize their 114 * Subdrivers that need some preparatory work can initialize their
@@ -112,6 +117,12 @@ int gic_cpu_if_down(unsigned int gic_nr);
112int gic_of_init(struct device_node *node, struct device_node *parent); 117int gic_of_init(struct device_node *node, struct device_node *parent);
113 118
114/* 119/*
120 * Initialises and registers a non-root or child GIC chip. Memory for
121 * the gic_chip_data structure is dynamically allocated.
122 */
123int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
124
125/*
115 * Legacy platforms not converted to DT yet must use this to init 126 * Legacy platforms not converted to DT yet must use this to init
116 * their GIC 127 * their GIC
117 */ 128 */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index f1f36e04d885..ffb84604c1de 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -39,6 +39,7 @@ struct irq_domain;
39struct of_device_id; 39struct of_device_id;
40struct irq_chip; 40struct irq_chip;
41struct irq_data; 41struct irq_data;
42struct cpumask;
42 43
43/* Number of irqs reserved for a legacy isa controller */ 44/* Number of irqs reserved for a legacy isa controller */
44#define NUM_ISA_INTERRUPTS 16 45#define NUM_ISA_INTERRUPTS 16
@@ -217,7 +218,8 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
217 enum irq_domain_bus_token bus_token); 218 enum irq_domain_bus_token bus_token);
218extern void irq_set_default_host(struct irq_domain *host); 219extern void irq_set_default_host(struct irq_domain *host);
219extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 220extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
220 irq_hw_number_t hwirq, int node); 221 irq_hw_number_t hwirq, int node,
222 const struct cpumask *affinity);
221 223
222static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) 224static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
223{ 225{
@@ -389,7 +391,7 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
389 391
390extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 392extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
391 unsigned int nr_irqs, int node, void *arg, 393 unsigned int nr_irqs, int node, void *arg,
392 bool realloc); 394 bool realloc, const struct cpumask *affinity);
393extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); 395extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
394extern void irq_domain_activate_irq(struct irq_data *irq_data); 396extern void irq_domain_activate_irq(struct irq_data *irq_data);
395extern void irq_domain_deactivate_irq(struct irq_data *irq_data); 397extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
@@ -397,7 +399,8 @@ extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
397static inline int irq_domain_alloc_irqs(struct irq_domain *domain, 399static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
398 unsigned int nr_irqs, int node, void *arg) 400 unsigned int nr_irqs, int node, void *arg)
399{ 401{
400 return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); 402 return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
403 NULL);
401} 404}
402 405
403extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, 406extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
@@ -452,6 +455,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
452 return -1; 455 return -1;
453} 456}
454 457
458static inline void irq_domain_free_irqs(unsigned int virq,
459 unsigned int nr_irqs) { }
460
455static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) 461static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
456{ 462{
457 return false; 463 return false;
diff --git a/include/linux/isa.h b/include/linux/isa.h
index 5ab85281230b..f2d0258414cf 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -6,6 +6,7 @@
6#define __LINUX_ISA_H 6#define __LINUX_ISA_H
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/errno.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10 11
11struct isa_driver { 12struct isa_driver {
@@ -22,13 +23,13 @@ struct isa_driver {
22 23
23#define to_isa_driver(x) container_of((x), struct isa_driver, driver) 24#define to_isa_driver(x) container_of((x), struct isa_driver, driver)
24 25
25#ifdef CONFIG_ISA 26#ifdef CONFIG_ISA_BUS_API
26int isa_register_driver(struct isa_driver *, unsigned int); 27int isa_register_driver(struct isa_driver *, unsigned int);
27void isa_unregister_driver(struct isa_driver *); 28void isa_unregister_driver(struct isa_driver *);
28#else 29#else
29static inline int isa_register_driver(struct isa_driver *d, unsigned int i) 30static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
30{ 31{
31 return 0; 32 return -ENODEV;
32} 33}
33 34
34static inline void isa_unregister_driver(struct isa_driver *d) 35static inline void isa_unregister_driver(struct isa_driver *d)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 0536524bb9eb..68904469fba1 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -117,13 +117,18 @@ struct module;
117 117
118#include <linux/atomic.h> 118#include <linux/atomic.h>
119 119
120#ifdef HAVE_JUMP_LABEL
121
120static inline int static_key_count(struct static_key *key) 122static inline int static_key_count(struct static_key *key)
121{ 123{
122 return atomic_read(&key->enabled); 124 /*
125 * -1 means the first static_key_slow_inc() is in progress.
126 * static_key_enabled() must return true, so return 1 here.
127 */
128 int n = atomic_read(&key->enabled);
129 return n >= 0 ? n : 1;
123} 130}
124 131
125#ifdef HAVE_JUMP_LABEL
126
127#define JUMP_TYPE_FALSE 0UL 132#define JUMP_TYPE_FALSE 0UL
128#define JUMP_TYPE_TRUE 1UL 133#define JUMP_TYPE_TRUE 1UL
129#define JUMP_TYPE_MASK 1UL 134#define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
162 167
163#else /* !HAVE_JUMP_LABEL */ 168#else /* !HAVE_JUMP_LABEL */
164 169
170static inline int static_key_count(struct static_key *key)
171{
172 return atomic_read(&key->enabled);
173}
174
165static __always_inline void jump_label_init(void) 175static __always_inline void jump_label_init(void)
166{ 176{
167 static_key_initialized = true; 177 static_key_initialized = true;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 611927f5870d..ac4b3c46a84d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
59 59
60void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); 60void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
61void kasan_kfree_large(const void *ptr); 61void kasan_kfree_large(const void *ptr);
62void kasan_kfree(void *ptr); 62void kasan_poison_kfree(void *ptr);
63void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, 63void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
64 gfp_t flags); 64 gfp_t flags);
65void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); 65void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
66 66
67void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); 67void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
68bool kasan_slab_free(struct kmem_cache *s, void *object); 68bool kasan_slab_free(struct kmem_cache *s, void *object);
69void kasan_poison_slab_free(struct kmem_cache *s, void *object);
70 69
71struct kasan_cache { 70struct kasan_cache {
72 int alloc_meta_offset; 71 int alloc_meta_offset;
@@ -76,6 +75,9 @@ struct kasan_cache {
76int kasan_module_alloc(void *addr, size_t size); 75int kasan_module_alloc(void *addr, size_t size);
77void kasan_free_shadow(const struct vm_struct *vm); 76void kasan_free_shadow(const struct vm_struct *vm);
78 77
78size_t ksize(const void *);
79static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
80
79#else /* CONFIG_KASAN */ 81#else /* CONFIG_KASAN */
80 82
81static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 83static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
102 104
103static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} 105static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
104static inline void kasan_kfree_large(const void *ptr) {} 106static inline void kasan_kfree_large(const void *ptr) {}
105static inline void kasan_kfree(void *ptr) {} 107static inline void kasan_poison_kfree(void *ptr) {}
106static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, 108static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
107 size_t size, gfp_t flags) {} 109 size_t size, gfp_t flags) {}
108static inline void kasan_krealloc(const void *object, size_t new_size, 110static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
114{ 116{
115 return false; 117 return false;
116} 118}
117static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
118 119
119static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 120static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
120static inline void kasan_free_shadow(const struct vm_struct *vm) {} 121static inline void kasan_free_shadow(const struct vm_struct *vm) {}
121 122
123static inline void kasan_unpoison_slab(const void *ptr) { }
124
122#endif /* CONFIG_KASAN */ 125#endif /* CONFIG_KASAN */
123 126
124#endif /* LINUX_KASAN_H */ 127#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 94aa10ffe156..c42082112ec8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -451,6 +451,7 @@ extern int panic_on_oops;
451extern int panic_on_unrecovered_nmi; 451extern int panic_on_unrecovered_nmi;
452extern int panic_on_io_nmi; 452extern int panic_on_io_nmi;
453extern int panic_on_warn; 453extern int panic_on_warn;
454extern int sysctl_panic_on_rcu_stall;
454extern int sysctl_panic_on_stackoverflow; 455extern int sysctl_panic_on_stackoverflow;
455 456
456extern bool crash_kexec_post_notifiers; 457extern bool crash_kexec_post_notifiers;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d2b13066e781..e5e7f2e80a54 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -42,15 +42,16 @@ struct led_classdev {
42#define LED_UNREGISTERING (1 << 1) 42#define LED_UNREGISTERING (1 << 1)
43 /* Upper 16 bits reflect control information */ 43 /* Upper 16 bits reflect control information */
44#define LED_CORE_SUSPENDRESUME (1 << 16) 44#define LED_CORE_SUSPENDRESUME (1 << 16)
45#define LED_BLINK_ONESHOT (1 << 17) 45#define LED_BLINK_SW (1 << 17)
46#define LED_BLINK_ONESHOT_STOP (1 << 18) 46#define LED_BLINK_ONESHOT (1 << 18)
47#define LED_BLINK_INVERT (1 << 19) 47#define LED_BLINK_ONESHOT_STOP (1 << 19)
48#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) 48#define LED_BLINK_INVERT (1 << 20)
49#define LED_BLINK_DISABLE (1 << 21) 49#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
50#define LED_SYSFS_DISABLE (1 << 22) 50#define LED_BLINK_DISABLE (1 << 22)
51#define LED_DEV_CAP_FLASH (1 << 23) 51#define LED_SYSFS_DISABLE (1 << 23)
52#define LED_HW_PLUGGABLE (1 << 24) 52#define LED_DEV_CAP_FLASH (1 << 24)
53#define LED_PANIC_INDICATOR (1 << 25) 53#define LED_HW_PLUGGABLE (1 << 25)
54#define LED_PANIC_INDICATOR (1 << 26)
54 55
55 /* Set LED brightness level 56 /* Set LED brightness level
56 * Must not sleep. Use brightness_set_blocking for drivers 57 * Must not sleep. Use brightness_set_blocking for drivers
@@ -72,8 +73,8 @@ struct led_classdev {
72 * and if both are zero then a sensible default should be chosen. 73 * and if both are zero then a sensible default should be chosen.
73 * The call should adjust the timings in that case and if it can't 74 * The call should adjust the timings in that case and if it can't
74 * match the values specified exactly. 75 * match the values specified exactly.
75 * Deactivate blinking again when the brightness is set to a fixed 76 * Deactivate blinking again when the brightness is set to LED_OFF
76 * value via the brightness_set() callback. 77 * via the brightness_set() callback.
77 */ 78 */
78 int (*blink_set)(struct led_classdev *led_cdev, 79 int (*blink_set)(struct led_classdev *led_cdev,
79 unsigned long *delay_on, 80 unsigned long *delay_on,
diff --git a/include/linux/list.h b/include/linux/list.h
index 5356f4d661a7..5183138aa932 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h)
679} 679}
680 680
681/* 681/*
682 * Check whether the node is the only node of the head without
683 * accessing head:
684 */
685static inline bool
686hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
687{
688 return !n->next && n->pprev == &h->first;
689}
690
691/*
682 * Move a list from one list head to another. Fixup the pprev 692 * Move a list from one list head to another. Fixup the pprev
683 * reference of the first entry if it exists. 693 * reference of the first entry if it exists.
684 */ 694 */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index a805474df4ab..56e6069d2452 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -97,6 +97,11 @@ enum mem_cgroup_events_target {
97#define MEM_CGROUP_ID_SHIFT 16 97#define MEM_CGROUP_ID_SHIFT 16
98#define MEM_CGROUP_ID_MAX USHRT_MAX 98#define MEM_CGROUP_ID_MAX USHRT_MAX
99 99
100struct mem_cgroup_id {
101 int id;
102 atomic_t ref;
103};
104
100struct mem_cgroup_stat_cpu { 105struct mem_cgroup_stat_cpu {
101 long count[MEMCG_NR_STAT]; 106 long count[MEMCG_NR_STAT];
102 unsigned long events[MEMCG_NR_EVENTS]; 107 unsigned long events[MEMCG_NR_EVENTS];
@@ -172,6 +177,9 @@ enum memcg_kmem_state {
172struct mem_cgroup { 177struct mem_cgroup {
173 struct cgroup_subsys_state css; 178 struct cgroup_subsys_state css;
174 179
180 /* Private memcg ID. Used to ID objects that outlive the cgroup */
181 struct mem_cgroup_id id;
182
175 /* Accounted resources */ 183 /* Accounted resources */
176 struct page_counter memory; 184 struct page_counter memory;
177 struct page_counter swap; 185 struct page_counter swap;
@@ -330,22 +338,9 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
330 if (mem_cgroup_disabled()) 338 if (mem_cgroup_disabled())
331 return 0; 339 return 0;
332 340
333 return memcg->css.id; 341 return memcg->id.id;
334}
335
336/**
337 * mem_cgroup_from_id - look up a memcg from an id
338 * @id: the id to look up
339 *
340 * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
341 */
342static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
343{
344 struct cgroup_subsys_state *css;
345
346 css = css_from_id(id, &memory_cgrp_subsys);
347 return mem_cgroup_from_css(css);
348} 342}
343struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
349 344
350/** 345/**
351 * parent_mem_cgroup - find the accounting parent of a memcg 346 * parent_mem_cgroup - find the accounting parent of a memcg
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index c18a4c19d6fc..ce9230af09c2 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -171,7 +171,7 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
171static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, 171static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
172 unsigned reg_cnt, unsigned char *val) 172 unsigned reg_cnt, unsigned char *val)
173{ 173{
174 int ret; 174 int ret = 0;
175 int i; 175 int i;
176 176
177 for (i = 0; i < reg_cnt; i++) { 177 for (i = 0; i < reg_cnt; i++) {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 80dec87a94f8..d46a0e7f144d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -466,6 +466,7 @@ enum {
466enum { 466enum {
467 MLX4_INTERFACE_STATE_UP = 1 << 0, 467 MLX4_INTERFACE_STATE_UP = 1 << 0,
468 MLX4_INTERFACE_STATE_DELETION = 1 << 1, 468 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
469 MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
469}; 470};
470 471
471#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 472#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 035abdf62cfe..73a48479892d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out {
1240 u8 rsvd[8]; 1240 u8 rsvd[8];
1241}; 1241};
1242 1242
1243#define MLX5_CMD_OP_MAX 0x920
1244
1245enum { 1243enum {
1246 VPORT_STATE_DOWN = 0x0, 1244 VPORT_STATE_DOWN = 0x0,
1247 VPORT_STATE_UP = 0x1, 1245 VPORT_STATE_UP = 0x1,
@@ -1369,6 +1367,12 @@ enum mlx5_cap_type {
1369#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1367#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1370 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 1368 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1371 1369
1370#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1371 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1372
1373#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1374 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1375
1372#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1376#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1373 MLX5_GET(flow_table_eswitch_cap, \ 1377 MLX5_GET(flow_table_eswitch_cap, \
1374 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1378 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80776d0c52dc..fd72ecf0ce9f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -629,6 +629,7 @@ struct mlx5_cmd_work_ent {
629 void *uout; 629 void *uout;
630 int uout_size; 630 int uout_size;
631 mlx5_cmd_cbk_t callback; 631 mlx5_cmd_cbk_t callback;
632 struct delayed_work cb_timeout_work;
632 void *context; 633 void *context;
633 int idx; 634 int idx;
634 struct completion done; 635 struct completion done;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 9a05cd7e5890..e955a2859009 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -205,7 +205,8 @@ enum {
205 MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, 205 MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
206 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, 206 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
207 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, 207 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
208 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c 208 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
209 MLX5_CMD_OP_MAX
209}; 210};
210 211
211struct mlx5_ifc_flow_table_fields_supported_bits { 212struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits {
500 u8 vport_svlan_insert[0x1]; 501 u8 vport_svlan_insert[0x1];
501 u8 vport_cvlan_insert_if_not_exist[0x1]; 502 u8 vport_cvlan_insert_if_not_exist[0x1];
502 u8 vport_cvlan_insert_overwrite[0x1]; 503 u8 vport_cvlan_insert_overwrite[0x1];
503 u8 reserved_at_5[0x1b]; 504 u8 reserved_at_5[0x19];
505 u8 nic_vport_node_guid_modify[0x1];
506 u8 nic_vport_port_guid_modify[0x1];
504 507
505 u8 reserved_at_20[0x7e0]; 508 u8 reserved_at_20[0x7e0];
506}; 509};
@@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
4583}; 4586};
4584 4587
4585struct mlx5_ifc_modify_nic_vport_field_select_bits { 4588struct mlx5_ifc_modify_nic_vport_field_select_bits {
4586 u8 reserved_at_0[0x19]; 4589 u8 reserved_at_0[0x16];
4590 u8 node_guid[0x1];
4591 u8 port_guid[0x1];
4592 u8 reserved_at_18[0x1];
4587 u8 mtu[0x1]; 4593 u8 mtu[0x1];
4588 u8 change_event[0x1]; 4594 u8 change_event[0x1];
4589 u8 promisc[0x1]; 4595 u8 promisc[0x1];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 64221027bf1f..ab310819ac36 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -172,6 +172,7 @@ enum {
172enum { 172enum {
173 MLX5_FENCE_MODE_NONE = 0 << 5, 173 MLX5_FENCE_MODE_NONE = 0 << 5,
174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, 174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
175 MLX5_FENCE_MODE_FENCE = 2 << 5,
175 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, 176 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
176 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, 177 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
177}; 178};
@@ -460,10 +461,9 @@ struct mlx5_core_qp {
460}; 461};
461 462
462struct mlx5_qp_path { 463struct mlx5_qp_path {
463 u8 fl; 464 u8 fl_free_ar;
464 u8 rsvd3; 465 u8 rsvd3;
465 u8 free_ar; 466 __be16 pkey_index;
466 u8 pkey_index;
467 u8 rsvd0; 467 u8 rsvd0;
468 u8 grh_mlid; 468 u8 grh_mlid;
469 __be16 rlid; 469 __be16 rlid;
@@ -560,6 +560,7 @@ struct mlx5_modify_qp_mbox_in {
560 __be32 optparam; 560 __be32 optparam;
561 u8 rsvd0[4]; 561 u8 rsvd0[4];
562 struct mlx5_qp_context ctx; 562 struct mlx5_qp_context ctx;
563 u8 rsvd2[16];
563}; 564};
564 565
565struct mlx5_modify_qp_mbox_out { 566struct mlx5_modify_qp_mbox_out {
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 301da4a5e6bf..6c16c198f680 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
50int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 50int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
51 u64 *system_image_guid); 51 u64 *system_image_guid);
52int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 52int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
53int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
54 u32 vport, u64 node_guid);
53int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, 55int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
54 u16 *qkey_viol_cntr); 56 u16 *qkey_viol_cntr);
55int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, 57int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5df5feb49575..ece042dfe23c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
602} 602}
603 603
604void do_set_pte(struct vm_area_struct *vma, unsigned long address, 604void do_set_pte(struct vm_area_struct *vma, unsigned long address,
605 struct page *page, pte_t *pte, bool write, bool anon, bool old); 605 struct page *page, pte_t *pte, bool write, bool anon);
606#endif 606#endif
607 607
608/* 608/*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ca3e517980a0..917f2b6a0cde 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -594,6 +594,9 @@ struct vm_special_mapping {
594 int (*fault)(const struct vm_special_mapping *sm, 594 int (*fault)(const struct vm_special_mapping *sm,
595 struct vm_area_struct *vma, 595 struct vm_area_struct *vma,
596 struct vm_fault *vmf); 596 struct vm_fault *vmf);
597
598 int (*mremap)(const struct vm_special_mapping *sm,
599 struct vm_area_struct *new_vma);
597}; 600};
598 601
599enum tlb_flush_reason { 602enum tlb_flush_reason {
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 6e4c645e1c0d..ed84c07f6a51 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -657,4 +657,20 @@ struct ulpi_device_id {
657 kernel_ulong_t driver_data; 657 kernel_ulong_t driver_data;
658}; 658};
659 659
660/**
661 * struct fsl_mc_device_id - MC object device identifier
662 * @vendor: vendor ID
663 * @obj_type: MC object type
664 * @ver_major: MC object version major number
665 * @ver_minor: MC object version minor number
666 *
667 * Type of entries in the "device Id" table for MC object devices supported by
668 * a MC object device driver. The last entry of the table has vendor set to 0x0
669 */
670struct fsl_mc_device_id {
671 __u16 vendor;
672 const char obj_type[16];
673};
674
675
660#endif /* LINUX_MOD_DEVICETABLE_H */ 676#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8b425c66305a..4f0bfe5912b2 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -47,6 +47,7 @@ struct fsl_mc_msi_desc {
47 * @nvec_used: The number of vectors used 47 * @nvec_used: The number of vectors used
48 * @dev: Pointer to the device which uses this descriptor 48 * @dev: Pointer to the device which uses this descriptor
49 * @msg: The last set MSI message cached for reuse 49 * @msg: The last set MSI message cached for reuse
50 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
50 * 51 *
51 * @masked: [PCI MSI/X] Mask bits 52 * @masked: [PCI MSI/X] Mask bits
52 * @is_msix: [PCI MSI/X] True if MSI-X 53 * @is_msix: [PCI MSI/X] True if MSI-X
@@ -67,6 +68,7 @@ struct msi_desc {
67 unsigned int nvec_used; 68 unsigned int nvec_used;
68 struct device *dev; 69 struct device *dev;
69 struct msi_msg msg; 70 struct msi_msg msg;
71 const struct cpumask *affinity;
70 72
71 union { 73 union {
72 /* PCI MSI/X specific data */ 74 /* PCI MSI/X specific data */
@@ -264,12 +266,10 @@ enum {
264 * callbacks. 266 * callbacks.
265 */ 267 */
266 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), 268 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
267 /* Build identity map between hwirq and irq */
268 MSI_FLAG_IDENTITY_MAP = (1 << 2),
269 /* Support multiple PCI MSI interrupts */ 269 /* Support multiple PCI MSI interrupts */
270 MSI_FLAG_MULTI_PCI_MSI = (1 << 3), 270 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
271 /* Support PCI MSIX interrupts */ 271 /* Support PCI MSIX interrupts */
272 MSI_FLAG_PCI_MSIX = (1 << 4), 272 MSI_FLAG_PCI_MSIX = (1 << 3),
273}; 273};
274 274
275int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, 275int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/linux/namei.h b/include/linux/namei.h
index ec5ec2818a28..d3d0398f2a1b 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
45#define LOOKUP_ROOT 0x2000 45#define LOOKUP_ROOT 0x2000
46#define LOOKUP_EMPTY 0x4000 46#define LOOKUP_EMPTY 0x4000
47 47
48extern int path_pts(struct path *path);
49
48extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); 50extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
49 51
50static inline int user_path_at(int dfd, const char __user *name, unsigned flags, 52static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
diff --git a/include/linux/net.h b/include/linux/net.h
index 9aa49a05fe38..25aa03b51c4e 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -251,7 +251,8 @@ do { \
251 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 251 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
252 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 252 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
253 net_ratelimit()) \ 253 net_ratelimit()) \
254 __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ 254 __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
255 ##__VA_ARGS__); \
255} while (0) 256} while (0)
256#elif defined(DEBUG) 257#elif defined(DEBUG)
257#define net_dbg_ratelimited(fmt, ...) \ 258#define net_dbg_ratelimited(fmt, ...) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f45929ce8157..da4b33bea982 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4145,6 +4145,13 @@ static inline void netif_keep_dst(struct net_device *dev)
4145 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 4145 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4146} 4146}
4147 4147
4148/* return true if dev can't cope with mtu frames that need vlan tag insertion */
4149static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4150{
4151 /* TODO: reserve and use an additional IFF bit, if we get more users */
4152 return dev->priv_flags & IFF_MACSEC;
4153}
4154
4148extern struct pernet_operations __net_initdata loopback_net_ops; 4155extern struct pernet_operations __net_initdata loopback_net_ops;
4149 4156
4150/* Logging, debugging and troubleshooting/diagnostic helpers. */ 4157/* Logging, debugging and troubleshooting/diagnostic helpers. */
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 9bb77d3ed6e0..c2256d746543 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
74{ 74{
75} 75}
76 76
77static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 77static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
78{ 78{
79 return ERR_PTR(-ENOSYS); 79 return ERR_PTR(-ENOSYS);
80} 80}
diff --git a/include/linux/of.h b/include/linux/of.h
index c7292e8ea080..15c43f076b23 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -614,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np,
614 return NULL; 614 return NULL;
615} 615}
616 616
617static inline int of_parse_phandle_with_args(struct device_node *np, 617static inline int of_parse_phandle_with_args(const struct device_node *np,
618 const char *list_name, 618 const char *list_name,
619 const char *cells_name, 619 const char *cells_name,
620 int index, 620 int index,
@@ -1009,10 +1009,13 @@ static inline int of_get_available_child_count(const struct device_node *np)
1009#endif 1009#endif
1010 1010
1011typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); 1011typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
1012typedef int (*of_init_fn_1_ret)(struct device_node *);
1012typedef void (*of_init_fn_1)(struct device_node *); 1013typedef void (*of_init_fn_1)(struct device_node *);
1013 1014
1014#define OF_DECLARE_1(table, name, compat, fn) \ 1015#define OF_DECLARE_1(table, name, compat, fn) \
1015 _OF_DECLARE(table, name, compat, fn, of_init_fn_1) 1016 _OF_DECLARE(table, name, compat, fn, of_init_fn_1)
1017#define OF_DECLARE_1_RET(table, name, compat, fn) \
1018 _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret)
1016#define OF_DECLARE_2(table, name, compat, fn) \ 1019#define OF_DECLARE_2(table, name, compat, fn) \
1017 _OF_DECLARE(table, name, compat, fn, of_init_fn_2) 1020 _OF_DECLARE(table, name, compat, fn, of_init_fn_2)
1018 1021
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f6e9e85164e8..b969e9443962 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -8,7 +8,7 @@ struct pci_dev;
8struct of_phandle_args; 8struct of_phandle_args;
9struct device_node; 9struct device_node;
10 10
11#ifdef CONFIG_OF 11#ifdef CONFIG_OF_PCI
12int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); 12int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
13struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
14 unsigned int devfn); 14 unsigned int devfn);
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index ad2f67054372..c201060e0c6d 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -31,6 +31,13 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
31int of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
35 phys_addr_t align,
36 phys_addr_t start,
37 phys_addr_t end,
38 bool nomap,
39 phys_addr_t *res_base);
40
34void fdt_init_reserved_mem(void); 41void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 42void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 43 phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa92c5b..fec40271339f 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
46 46
47static inline bool page_is_young(struct page *page) 47static inline bool page_is_young(struct page *page)
48{ 48{
49 return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 49 struct page_ext *page_ext = lookup_page_ext(page);
50
51 if (unlikely(!page_ext))
52 return false;
53
54 return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
50} 55}
51 56
52static inline void set_page_young(struct page *page) 57static inline void set_page_young(struct page *page)
53{ 58{
54 set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 59 struct page_ext *page_ext = lookup_page_ext(page);
60
61 if (unlikely(!page_ext))
62 return;
63
64 set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
55} 65}
56 66
57static inline bool test_and_clear_page_young(struct page *page) 67static inline bool test_and_clear_page_young(struct page *page)
58{ 68{
59 return test_and_clear_bit(PAGE_EXT_YOUNG, 69 struct page_ext *page_ext = lookup_page_ext(page);
60 &lookup_page_ext(page)->flags); 70
71 if (unlikely(!page_ext))
72 return false;
73
74 return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
61} 75}
62 76
63static inline bool page_is_idle(struct page *page) 77static inline bool page_is_idle(struct page *page)
64{ 78{
65 return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 79 struct page_ext *page_ext = lookup_page_ext(page);
80
81 if (unlikely(!page_ext))
82 return false;
83
84 return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
66} 85}
67 86
68static inline void set_page_idle(struct page *page) 87static inline void set_page_idle(struct page *page)
69{ 88{
70 set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 89 struct page_ext *page_ext = lookup_page_ext(page);
90
91 if (unlikely(!page_ext))
92 return;
93
94 set_bit(PAGE_EXT_IDLE, &page_ext->flags);
71} 95}
72 96
73static inline void clear_page_idle(struct page *page) 97static inline void clear_page_idle(struct page *page)
74{ 98{
75 clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 99 struct page_ext *page_ext = lookup_page_ext(page);
100
101 if (unlikely(!page_ext))
102 return;
103
104 clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
76} 105}
77#endif /* CONFIG_64BIT */ 106#endif /* CONFIG_64BIT */
78 107
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 84f542df7ff5..1c7eec09e5eb 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -136,14 +136,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
136 * used as a pointer. If the compiler generates a separate fetch 136 * used as a pointer. If the compiler generates a separate fetch
137 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in 137 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
138 * between contaminating the pointer value, meaning that 138 * between contaminating the pointer value, meaning that
139 * ACCESS_ONCE() is required when fetching it. 139 * READ_ONCE() is required when fetching it.
140 *
141 * Also, we need a data dependency barrier to be paired with
142 * smp_store_release() in __percpu_ref_switch_to_percpu().
143 *
144 * Use lockless deref which contains both.
145 */ 140 */
146 percpu_ptr = lockless_dereference(ref->percpu_count_ptr); 141 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
142
143 /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
144 smp_read_barrier_depends();
147 145
148 /* 146 /*
149 * Theoretically, the following could test just ATOMIC; however, 147 * Theoretically, the following could test just ATOMIC; however,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1a827cecd62f..7921f4f20a58 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -517,6 +517,11 @@ struct swevent_hlist {
517struct perf_cgroup; 517struct perf_cgroup;
518struct ring_buffer; 518struct ring_buffer;
519 519
520struct pmu_event_list {
521 raw_spinlock_t lock;
522 struct list_head list;
523};
524
520/** 525/**
521 * struct perf_event - performance event kernel representation: 526 * struct perf_event - performance event kernel representation:
522 */ 527 */
@@ -675,6 +680,7 @@ struct perf_event {
675 int cgrp_defer_enabled; 680 int cgrp_defer_enabled;
676#endif 681#endif
677 682
683 struct list_head sb_list;
678#endif /* CONFIG_PERF_EVENTS */ 684#endif /* CONFIG_PERF_EVENTS */
679}; 685};
680 686
@@ -1074,7 +1080,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
1074extern struct perf_callchain_entry * 1080extern struct perf_callchain_entry *
1075get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1081get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1076 u32 max_stack, bool crosstask, bool add_mark); 1082 u32 max_stack, bool crosstask, bool add_mark);
1077extern int get_callchain_buffers(void); 1083extern int get_callchain_buffers(int max_stack);
1078extern void put_callchain_buffers(void); 1084extern void put_callchain_buffers(void);
1079 1085
1080extern int sysctl_perf_event_max_stack; 1086extern int sysctl_perf_event_max_stack;
@@ -1326,6 +1332,13 @@ struct perf_pmu_events_attr {
1326 const char *event_str; 1332 const char *event_str;
1327}; 1333};
1328 1334
1335struct perf_pmu_events_ht_attr {
1336 struct device_attribute attr;
1337 u64 id;
1338 const char *event_str_ht;
1339 const char *event_str_noht;
1340};
1341
1329ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 1342ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1330 char *page); 1343 char *page);
1331 1344
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index a810f2a18842..f08b67238b58 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -22,12 +22,20 @@
22 22
23struct phy; 23struct phy;
24 24
25enum phy_mode {
26 PHY_MODE_INVALID,
27 PHY_MODE_USB_HOST,
28 PHY_MODE_USB_DEVICE,
29 PHY_MODE_USB_OTG,
30};
31
25/** 32/**
26 * struct phy_ops - set of function pointers for performing phy operations 33 * struct phy_ops - set of function pointers for performing phy operations
27 * @init: operation to be performed for initializing phy 34 * @init: operation to be performed for initializing phy
28 * @exit: operation to be performed while exiting 35 * @exit: operation to be performed while exiting
29 * @power_on: powering on the phy 36 * @power_on: powering on the phy
30 * @power_off: powering off the phy 37 * @power_off: powering off the phy
38 * @set_mode: set the mode of the phy
31 * @owner: the module owner containing the ops 39 * @owner: the module owner containing the ops
32 */ 40 */
33struct phy_ops { 41struct phy_ops {
@@ -35,6 +43,7 @@ struct phy_ops {
35 int (*exit)(struct phy *phy); 43 int (*exit)(struct phy *phy);
36 int (*power_on)(struct phy *phy); 44 int (*power_on)(struct phy *phy);
37 int (*power_off)(struct phy *phy); 45 int (*power_off)(struct phy *phy);
46 int (*set_mode)(struct phy *phy, enum phy_mode mode);
38 struct module *owner; 47 struct module *owner;
39}; 48};
40 49
@@ -126,6 +135,7 @@ int phy_init(struct phy *phy);
126int phy_exit(struct phy *phy); 135int phy_exit(struct phy *phy);
127int phy_power_on(struct phy *phy); 136int phy_power_on(struct phy *phy);
128int phy_power_off(struct phy *phy); 137int phy_power_off(struct phy *phy);
138int phy_set_mode(struct phy *phy, enum phy_mode mode);
129static inline int phy_get_bus_width(struct phy *phy) 139static inline int phy_get_bus_width(struct phy *phy)
130{ 140{
131 return phy->attrs.bus_width; 141 return phy->attrs.bus_width;
@@ -233,6 +243,13 @@ static inline int phy_power_off(struct phy *phy)
233 return -ENOSYS; 243 return -ENOSYS;
234} 244}
235 245
246static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
247{
248 if (!phy)
249 return 0;
250 return -ENOSYS;
251}
252
236static inline int phy_get_bus_width(struct phy *phy) 253static inline int phy_get_bus_width(struct phy *phy)
237{ 254{
238 return -ENOSYS; 255 return -ENOSYS;
diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h
new file mode 100644
index 000000000000..2e5eea358194
--- /dev/null
+++ b/include/linux/platform_data/sht3x.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2016 Sensirion AG, Switzerland
3 * Author: David Frey <david.frey@sensirion.com>
4 * Author: Pascal Sachs <pascal.sachs@sensirion.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef __SHT3X_H_
19#define __SHT3X_H_
20
21struct sht3x_platform_data {
22 bool blocking_io;
23 bool high_precision;
24};
25#endif /* __SHT3X_H_ */
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 5b5a80cc5926..c818772d9f9d 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -43,10 +43,8 @@ struct posix_acl_entry {
43}; 43};
44 44
45struct posix_acl { 45struct posix_acl {
46 union { 46 atomic_t a_refcount;
47 atomic_t a_refcount; 47 struct rcu_head a_rcu;
48 struct rcu_head a_rcu;
49 };
50 unsigned int a_count; 48 unsigned int a_count;
51 struct posix_acl_entry a_entries[0]; 49 struct posix_acl_entry a_entries[0];
52}; 50};
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f4da695fd615..f136b22c7772 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -108,11 +108,14 @@ struct va_format {
108 * Dummy printk for disabled debugging statements to use whilst maintaining 108 * Dummy printk for disabled debugging statements to use whilst maintaining
109 * gcc's format checking. 109 * gcc's format checking.
110 */ 110 */
111#define no_printk(fmt, ...) \ 111#define no_printk(fmt, ...) \
112do { \ 112({ \
113 if (0) \ 113 do { \
114 printk(fmt, ##__VA_ARGS__); \ 114 if (0) \
115} while (0) 115 printk(fmt, ##__VA_ARGS__); \
116 } while (0); \
117 0; \
118})
116 119
117#ifdef CONFIG_EARLY_PRINTK 120#ifdef CONFIG_EARLY_PRINTK
118extern asmlinkage __printf(1, 2) 121extern asmlinkage __printf(1, 2)
@@ -309,20 +312,24 @@ extern asmlinkage void dump_stack(void) __cold;
309#define printk_once(fmt, ...) \ 312#define printk_once(fmt, ...) \
310({ \ 313({ \
311 static bool __print_once __read_mostly; \ 314 static bool __print_once __read_mostly; \
315 bool __ret_print_once = !__print_once; \
312 \ 316 \
313 if (!__print_once) { \ 317 if (!__print_once) { \
314 __print_once = true; \ 318 __print_once = true; \
315 printk(fmt, ##__VA_ARGS__); \ 319 printk(fmt, ##__VA_ARGS__); \
316 } \ 320 } \
321 unlikely(__ret_print_once); \
317}) 322})
318#define printk_deferred_once(fmt, ...) \ 323#define printk_deferred_once(fmt, ...) \
319({ \ 324({ \
320 static bool __print_once __read_mostly; \ 325 static bool __print_once __read_mostly; \
326 bool __ret_print_once = !__print_once; \
321 \ 327 \
322 if (!__print_once) { \ 328 if (!__print_once) { \
323 __print_once = true; \ 329 __print_once = true; \
324 printk_deferred(fmt, ##__VA_ARGS__); \ 330 printk_deferred(fmt, ##__VA_ARGS__); \
325 } \ 331 } \
332 unlikely(__ret_print_once); \
326}) 333})
327#else 334#else
328#define printk_once(fmt, ...) \ 335#define printk_once(fmt, ...) \
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 17018f3c066e..c038ae36b10e 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -235,6 +235,9 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
235 if (!pwm) 235 if (!pwm)
236 return -EINVAL; 236 return -EINVAL;
237 237
238 if (duty_ns < 0 || period_ns < 0)
239 return -EINVAL;
240
238 pwm_get_state(pwm, &state); 241 pwm_get_state(pwm, &state);
239 if (state.duty_cycle == duty_ns && state.period == period_ns) 242 if (state.duty_cycle == duty_ns && state.period == period_ns)
240 return 0; 243 return 0;
@@ -461,6 +464,8 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm)
461 464
462static inline void pwm_apply_args(struct pwm_device *pwm) 465static inline void pwm_apply_args(struct pwm_device *pwm)
463{ 466{
467 struct pwm_state state = { };
468
464 /* 469 /*
465 * PWM users calling pwm_apply_args() expect to have a fresh config 470 * PWM users calling pwm_apply_args() expect to have a fresh config
466 * where the polarity and period are set according to pwm_args info. 471 * where the polarity and period are set according to pwm_args info.
@@ -473,18 +478,20 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
473 * at startup (even if they are actually enabled), thus authorizing 478 * at startup (even if they are actually enabled), thus authorizing
474 * polarity setting. 479 * polarity setting.
475 * 480 *
476 * Instead of setting ->enabled to false, we call pwm_disable() 481 * To fulfill this requirement, we apply a new state which disables
477 * before pwm_set_polarity() to ensure that everything is configured 482 * the PWM device and set the reference period and polarity config.
478 * as expected, and the PWM is really disabled when the user request
479 * it.
480 * 483 *
481 * Note that PWM users requiring a smooth handover between the 484 * Note that PWM users requiring a smooth handover between the
482 * bootloader and the kernel (like critical regulators controlled by 485 * bootloader and the kernel (like critical regulators controlled by
483 * PWM devices) will have to switch to the atomic API and avoid calling 486 * PWM devices) will have to switch to the atomic API and avoid calling
484 * pwm_apply_args(). 487 * pwm_apply_args().
485 */ 488 */
486 pwm_disable(pwm); 489
487 pwm_set_polarity(pwm, pwm->args.polarity); 490 state.enabled = false;
491 state.polarity = pwm->args.polarity;
492 state.period = pwm->args.period;
493
494 pwm_apply_state(pwm, &state);
488} 495}
489 496
490struct pwm_lookup { 497struct pwm_lookup {
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 6ae8cb4a61d3..6c876a63558d 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -49,6 +49,7 @@ struct qed_start_vport_params {
49 bool drop_ttl0; 49 bool drop_ttl0;
50 u8 vport_id; 50 u8 vport_id;
51 u16 mtu; 51 u16 mtu;
52 bool clear_stats;
52}; 53};
53 54
54struct qed_stop_rxq_params { 55struct qed_stop_rxq_params {
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index cb4b7e8cee81..eca6f626c16e 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -407,6 +407,7 @@ static inline __must_check
407void **radix_tree_iter_retry(struct radix_tree_iter *iter) 407void **radix_tree_iter_retry(struct radix_tree_iter *iter)
408{ 408{
409 iter->next_index = iter->index; 409 iter->next_index = iter->index;
410 iter->tags = 0;
410 return NULL; 411 return NULL;
411} 412}
412 413
diff --git a/include/linux/random.h b/include/linux/random.h
index e47e533742b5..3d6e9815cd85 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -95,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
95#ifdef CONFIG_ARCH_RANDOM 95#ifdef CONFIG_ARCH_RANDOM
96# include <asm/archrandom.h> 96# include <asm/archrandom.h>
97#else 97#else
98static inline int arch_get_random_long(unsigned long *v) 98static inline bool arch_get_random_long(unsigned long *v)
99{ 99{
100 return 0; 100 return 0;
101} 101}
102static inline int arch_get_random_int(unsigned int *v) 102static inline bool arch_get_random_int(unsigned int *v)
103{ 103{
104 return 0; 104 return 0;
105} 105}
106static inline int arch_has_random(void) 106static inline bool arch_has_random(void)
107{ 107{
108 return 0; 108 return 0;
109} 109}
110static inline int arch_get_random_seed_long(unsigned long *v) 110static inline bool arch_get_random_seed_long(unsigned long *v)
111{ 111{
112 return 0; 112 return 0;
113} 113}
114static inline int arch_get_random_seed_int(unsigned int *v) 114static inline bool arch_get_random_seed_int(unsigned int *v)
115{ 115{
116 return 0; 116 return 0;
117} 117}
118static inline int arch_has_random_seed(void) 118static inline bool arch_has_random_seed(void)
119{ 119{
120 return 0; 120 return 0;
121} 121}
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5f1533e3d032..3bc5de08c0b7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -45,6 +45,7 @@
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/ktime.h> 47#include <linux/ktime.h>
48#include <linux/irqflags.h>
48 49
49#include <asm/barrier.h> 50#include <asm/barrier.h>
50 51
@@ -379,12 +380,13 @@ static inline void rcu_init_nohz(void)
379 * in the inner idle loop. 380 * in the inner idle loop.
380 * 381 *
381 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) 382 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
382 * will tell RCU that it needs to pay attending, invoke its argument 383 * will tell RCU that it needs to pay attention, invoke its argument
383 * (in this example, a call to the do_something_with_RCU() function), 384 * (in this example, calling the do_something_with_RCU() function),
384 * and then tell RCU to go back to ignoring this CPU. It is permissible 385 * and then tell RCU to go back to ignoring this CPU. It is permissible
385 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently 386 * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
386 * quite limited. If deeper nesting is required, it will be necessary 387 * on the order of a million or so, even on 32-bit systems). It is
387 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. 388 * not legal to block within RCU_NONIDLE(), nor is it permissible to
389 * transfer control either into or out of RCU_NONIDLE()'s statement.
388 */ 390 */
389#define RCU_NONIDLE(a) \ 391#define RCU_NONIDLE(a) \
390 do { \ 392 do { \
@@ -649,7 +651,16 @@ static inline void rcu_preempt_sleep_check(void)
649 * please be careful when making changes to rcu_assign_pointer() and the 651 * please be careful when making changes to rcu_assign_pointer() and the
650 * other macros that it invokes. 652 * other macros that it invokes.
651 */ 653 */
652#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) 654#define rcu_assign_pointer(p, v) \
655({ \
656 uintptr_t _r_a_p__v = (uintptr_t)(v); \
657 \
658 if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
659 WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
660 else \
661 smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
662 _r_a_p__v; \
663})
653 664
654/** 665/**
655 * rcu_access_pointer() - fetch RCU pointer with no dereferencing 666 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 49d057655d62..b0f305e77b7f 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class;
49extern struct lock_class_key reservation_seqcount_class; 49extern struct lock_class_key reservation_seqcount_class;
50extern const char reservation_seqcount_string[]; 50extern const char reservation_seqcount_string[];
51 51
52/**
53 * struct reservation_object_list - a list of shared fences
54 * @rcu: for internal use
55 * @shared_count: table of shared fences
56 * @shared_max: for growing shared fence table
57 * @shared: shared fence table
58 */
52struct reservation_object_list { 59struct reservation_object_list {
53 struct rcu_head rcu; 60 struct rcu_head rcu;
54 u32 shared_count, shared_max; 61 u32 shared_count, shared_max;
55 struct fence __rcu *shared[]; 62 struct fence __rcu *shared[];
56}; 63};
57 64
65/**
66 * struct reservation_object - a reservation object manages fences for a buffer
67 * @lock: update side lock
68 * @seq: sequence count for managing RCU read-side synchronization
69 * @fence_excl: the exclusive fence, if there is one currently
70 * @fence: list of current shared fences
71 * @staged: staged copy of shared fences for RCU updates
72 */
58struct reservation_object { 73struct reservation_object {
59 struct ww_mutex lock; 74 struct ww_mutex lock;
60 seqcount_t seq; 75 seqcount_t seq;
@@ -68,6 +83,10 @@ struct reservation_object {
68#define reservation_object_assert_held(obj) \ 83#define reservation_object_assert_held(obj) \
69 lockdep_assert_held(&(obj)->lock.base) 84 lockdep_assert_held(&(obj)->lock.base)
70 85
86/**
87 * reservation_object_init - initialize a reservation object
88 * @obj: the reservation object
89 */
71static inline void 90static inline void
72reservation_object_init(struct reservation_object *obj) 91reservation_object_init(struct reservation_object *obj)
73{ 92{
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj)
79 obj->staged = NULL; 98 obj->staged = NULL;
80} 99}
81 100
101/**
102 * reservation_object_fini - destroys a reservation object
103 * @obj: the reservation object
104 */
82static inline void 105static inline void
83reservation_object_fini(struct reservation_object *obj) 106reservation_object_fini(struct reservation_object *obj)
84{ 107{
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj)
106 ww_mutex_destroy(&obj->lock); 129 ww_mutex_destroy(&obj->lock);
107} 130}
108 131
132/**
133 * reservation_object_get_list - get the reservation object's
134 * shared fence list, with update-side lock held
135 * @obj: the reservation object
136 *
137 * Returns the shared fence list. Does NOT take references to
138 * the fence. The obj->lock must be held.
139 */
109static inline struct reservation_object_list * 140static inline struct reservation_object_list *
110reservation_object_get_list(struct reservation_object *obj) 141reservation_object_get_list(struct reservation_object *obj)
111{ 142{
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj)
113 reservation_object_held(obj)); 144 reservation_object_held(obj));
114} 145}
115 146
147/**
148 * reservation_object_get_excl - get the reservation object's
149 * exclusive fence, with update-side lock held
150 * @obj: the reservation object
151 *
152 * Returns the exclusive fence (if any). Does NOT take a
153 * reference. The obj->lock must be held.
154 *
155 * RETURNS
156 * The exclusive fence or NULL
157 */
116static inline struct fence * 158static inline struct fence *
117reservation_object_get_excl(struct reservation_object *obj) 159reservation_object_get_excl(struct reservation_object *obj)
118{ 160{
@@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj)
120 reservation_object_held(obj)); 162 reservation_object_held(obj));
121} 163}
122 164
165/**
166 * reservation_object_get_excl_rcu - get the reservation object's
167 * exclusive fence, without lock held.
168 * @obj: the reservation object
169 *
170 * If there is an exclusive fence, this atomically increments it's
171 * reference count and returns it.
172 *
173 * RETURNS
174 * The exclusive fence or NULL if none
175 */
123static inline struct fence * 176static inline struct fence *
124reservation_object_get_excl_rcu(struct reservation_object *obj) 177reservation_object_get_excl_rcu(struct reservation_object *obj)
125{ 178{
diff --git a/include/linux/reset.h b/include/linux/reset.h
index ec0306ce7b92..45a4abeb6acb 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -84,8 +84,8 @@ static inline struct reset_control *__devm_reset_control_get(
84#endif /* CONFIG_RESET_CONTROLLER */ 84#endif /* CONFIG_RESET_CONTROLLER */
85 85
86/** 86/**
87 * reset_control_get - Lookup and obtain an exclusive reference to a 87 * reset_control_get_exclusive - Lookup and obtain an exclusive reference
88 * reset controller. 88 * to a reset controller.
89 * @dev: device to be reset by the controller 89 * @dev: device to be reset by the controller
90 * @id: reset line name 90 * @id: reset line name
91 * 91 *
@@ -98,8 +98,8 @@ static inline struct reset_control *__devm_reset_control_get(
98 * 98 *
99 * Use of id names is optional. 99 * Use of id names is optional.
100 */ 100 */
101static inline struct reset_control *__must_check reset_control_get( 101static inline struct reset_control *
102 struct device *dev, const char *id) 102__must_check reset_control_get_exclusive(struct device *dev, const char *id)
103{ 103{
104#ifndef CONFIG_RESET_CONTROLLER 104#ifndef CONFIG_RESET_CONTROLLER
105 WARN_ON(1); 105 WARN_ON(1);
@@ -107,12 +107,6 @@ static inline struct reset_control *__must_check reset_control_get(
107 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); 107 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
108} 108}
109 109
110static inline struct reset_control *reset_control_get_optional(
111 struct device *dev, const char *id)
112{
113 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
114}
115
116/** 110/**
117 * reset_control_get_shared - Lookup and obtain a shared reference to a 111 * reset_control_get_shared - Lookup and obtain a shared reference to a
118 * reset controller. 112 * reset controller.
@@ -141,9 +135,21 @@ static inline struct reset_control *reset_control_get_shared(
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); 135 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
142} 136}
143 137
138static inline struct reset_control *reset_control_get_optional_exclusive(
139 struct device *dev, const char *id)
140{
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
142}
143
144static inline struct reset_control *reset_control_get_optional_shared(
145 struct device *dev, const char *id)
146{
147 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
148}
149
144/** 150/**
145 * of_reset_control_get - Lookup and obtain an exclusive reference to a 151 * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
146 * reset controller. 152 * to a reset controller.
147 * @node: device to be reset by the controller 153 * @node: device to be reset by the controller
148 * @id: reset line name 154 * @id: reset line name
149 * 155 *
@@ -151,15 +157,41 @@ static inline struct reset_control *reset_control_get_shared(
151 * 157 *
152 * Use of id names is optional. 158 * Use of id names is optional.
153 */ 159 */
154static inline struct reset_control *of_reset_control_get( 160static inline struct reset_control *of_reset_control_get_exclusive(
155 struct device_node *node, const char *id) 161 struct device_node *node, const char *id)
156{ 162{
157 return __of_reset_control_get(node, id, 0, 0); 163 return __of_reset_control_get(node, id, 0, 0);
158} 164}
159 165
160/** 166/**
161 * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to 167 * of_reset_control_get_shared - Lookup and obtain an shared reference
162 * a reset controller by index. 168 * to a reset controller.
169 * @node: device to be reset by the controller
170 * @id: reset line name
171 *
172 * When a reset-control is shared, the behavior of reset_control_assert /
173 * deassert is changed, the reset-core will keep track of a deassert_count
174 * and only (re-)assert the reset after reset_control_assert has been called
175 * as many times as reset_control_deassert was called. Also see the remark
176 * about shared reset-controls in the reset_control_assert docs.
177 *
178 * Calling reset_control_assert without first calling reset_control_deassert
179 * is not allowed on a shared reset control. Calling reset_control_reset is
180 * also not allowed on a shared reset control.
181 * Returns a struct reset_control or IS_ERR() condition containing errno.
182 *
183 * Use of id names is optional.
184 */
185static inline struct reset_control *of_reset_control_get_shared(
186 struct device_node *node, const char *id)
187{
188 return __of_reset_control_get(node, id, 0, 1);
189}
190
191/**
192 * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive
193 * reference to a reset controller
194 * by index.
163 * @node: device to be reset by the controller 195 * @node: device to be reset by the controller
164 * @index: index of the reset controller 196 * @index: index of the reset controller
165 * 197 *
@@ -167,49 +199,60 @@ static inline struct reset_control *of_reset_control_get(
167 * in whatever order. Returns a struct reset_control or IS_ERR() condition 199 * in whatever order. Returns a struct reset_control or IS_ERR() condition
168 * containing errno. 200 * containing errno.
169 */ 201 */
170static inline struct reset_control *of_reset_control_get_by_index( 202static inline struct reset_control *of_reset_control_get_exclusive_by_index(
171 struct device_node *node, int index) 203 struct device_node *node, int index)
172{ 204{
173 return __of_reset_control_get(node, NULL, index, 0); 205 return __of_reset_control_get(node, NULL, index, 0);
174} 206}
175 207
176/** 208/**
177 * devm_reset_control_get - resource managed reset_control_get() 209 * of_reset_control_get_shared_by_index - Lookup and obtain an shared
178 * @dev: device to be reset by the controller 210 * reference to a reset controller
179 * @id: reset line name 211 * by index.
212 * @node: device to be reset by the controller
213 * @index: index of the reset controller
214 *
215 * When a reset-control is shared, the behavior of reset_control_assert /
216 * deassert is changed, the reset-core will keep track of a deassert_count
217 * and only (re-)assert the reset after reset_control_assert has been called
218 * as many times as reset_control_deassert was called. Also see the remark
219 * about shared reset-controls in the reset_control_assert docs.
220 *
221 * Calling reset_control_assert without first calling reset_control_deassert
222 * is not allowed on a shared reset control. Calling reset_control_reset is
223 * also not allowed on a shared reset control.
224 * Returns a struct reset_control or IS_ERR() condition containing errno.
180 * 225 *
181 * Managed reset_control_get(). For reset controllers returned from this 226 * This is to be used to perform a list of resets for a device or power domain
182 * function, reset_control_put() is called automatically on driver detach. 227 * in whatever order. Returns a struct reset_control or IS_ERR() condition
183 * See reset_control_get() for more information. 228 * containing errno.
184 */ 229 */
185static inline struct reset_control *__must_check devm_reset_control_get( 230static inline struct reset_control *of_reset_control_get_shared_by_index(
186 struct device *dev, const char *id) 231 struct device_node *node, int index)
187{
188#ifndef CONFIG_RESET_CONTROLLER
189 WARN_ON(1);
190#endif
191 return __devm_reset_control_get(dev, id, 0, 0);
192}
193
194static inline struct reset_control *devm_reset_control_get_optional(
195 struct device *dev, const char *id)
196{ 232{
197 return __devm_reset_control_get(dev, id, 0, 0); 233 return __of_reset_control_get(node, NULL, index, 1);
198} 234}
199 235
200/** 236/**
201 * devm_reset_control_get_by_index - resource managed reset_control_get 237 * devm_reset_control_get_exclusive - resource managed
238 * reset_control_get_exclusive()
202 * @dev: device to be reset by the controller 239 * @dev: device to be reset by the controller
203 * @index: index of the reset controller 240 * @id: reset line name
204 * 241 *
205 * Managed reset_control_get(). For reset controllers returned from this 242 * Managed reset_control_get_exclusive(). For reset controllers returned
206 * function, reset_control_put() is called automatically on driver detach. 243 * from this function, reset_control_put() is called automatically on driver
207 * See reset_control_get() for more information. 244 * detach.
245 *
246 * See reset_control_get_exclusive() for more information.
208 */ 247 */
209static inline struct reset_control *devm_reset_control_get_by_index( 248static inline struct reset_control *
210 struct device *dev, int index) 249__must_check devm_reset_control_get_exclusive(struct device *dev,
250 const char *id)
211{ 251{
212 return __devm_reset_control_get(dev, NULL, index, 0); 252#ifndef CONFIG_RESET_CONTROLLER
253 WARN_ON(1);
254#endif
255 return __devm_reset_control_get(dev, id, 0, 0);
213} 256}
214 257
215/** 258/**
@@ -227,6 +270,36 @@ static inline struct reset_control *devm_reset_control_get_shared(
227 return __devm_reset_control_get(dev, id, 0, 1); 270 return __devm_reset_control_get(dev, id, 0, 1);
228} 271}
229 272
273static inline struct reset_control *devm_reset_control_get_optional_exclusive(
274 struct device *dev, const char *id)
275{
276 return __devm_reset_control_get(dev, id, 0, 0);
277}
278
279static inline struct reset_control *devm_reset_control_get_optional_shared(
280 struct device *dev, const char *id)
281{
282 return __devm_reset_control_get(dev, id, 0, 1);
283}
284
285/**
286 * devm_reset_control_get_exclusive_by_index - resource managed
287 * reset_control_get_exclusive()
288 * @dev: device to be reset by the controller
289 * @index: index of the reset controller
290 *
291 * Managed reset_control_get_exclusive(). For reset controllers returned from
292 * this function, reset_control_put() is called automatically on driver
293 * detach.
294 *
295 * See reset_control_get_exclusive() for more information.
296 */
297static inline struct reset_control *
298devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
299{
300 return __devm_reset_control_get(dev, NULL, index, 0);
301}
302
230/** 303/**
231 * devm_reset_control_get_shared_by_index - resource managed 304 * devm_reset_control_get_shared_by_index - resource managed
232 * reset_control_get_shared 305 * reset_control_get_shared
@@ -237,10 +310,60 @@ static inline struct reset_control *devm_reset_control_get_shared(
237 * this function, reset_control_put() is called automatically on driver detach. 310 * this function, reset_control_put() is called automatically on driver detach.
238 * See reset_control_get_shared() for more information. 311 * See reset_control_get_shared() for more information.
239 */ 312 */
240static inline struct reset_control *devm_reset_control_get_shared_by_index( 313static inline struct reset_control *
241 struct device *dev, int index) 314devm_reset_control_get_shared_by_index(struct device *dev, int index)
242{ 315{
243 return __devm_reset_control_get(dev, NULL, index, 1); 316 return __devm_reset_control_get(dev, NULL, index, 1);
244} 317}
245 318
319/*
320 * TEMPORARY calls to use during transition:
321 *
322 * of_reset_control_get() => of_reset_control_get_exclusive()
323 *
324 * These inline function calls will be removed once all consumers
325 * have been moved over to the new explicit API.
326 */
327static inline struct reset_control *reset_control_get(
328 struct device *dev, const char *id)
329{
330 return reset_control_get_exclusive(dev, id);
331}
332
333static inline struct reset_control *reset_control_get_optional(
334 struct device *dev, const char *id)
335{
336 return reset_control_get_optional_exclusive(dev, id);
337}
338
339static inline struct reset_control *of_reset_control_get(
340 struct device_node *node, const char *id)
341{
342 return of_reset_control_get_exclusive(node, id);
343}
344
345static inline struct reset_control *of_reset_control_get_by_index(
346 struct device_node *node, int index)
347{
348 return of_reset_control_get_exclusive_by_index(node, index);
349}
350
351static inline struct reset_control *devm_reset_control_get(
352 struct device *dev, const char *id)
353{
354 return devm_reset_control_get_exclusive(dev, id);
355}
356
357static inline struct reset_control *devm_reset_control_get_optional(
358 struct device *dev, const char *id)
359{
360 return devm_reset_control_get_optional_exclusive(dev, id);
361
362}
363
364static inline struct reset_control *devm_reset_control_get_by_index(
365 struct device *dev, int index)
366{
367 return devm_reset_control_get_exclusive_by_index(dev, index);
368}
246#endif 369#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 49eb4f8ebac9..2b0fad83683f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page);
158/* 158/*
159 * rmap interfaces called when adding or removing pte of page 159 * rmap interfaces called when adding or removing pte of page
160 */ 160 */
161void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 161void page_move_anon_rmap(struct page *, struct vm_area_struct *);
162void page_add_anon_rmap(struct page *, struct vm_area_struct *, 162void page_add_anon_rmap(struct page *, struct vm_area_struct *,
163 unsigned long, bool); 163 unsigned long, bool);
164void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, 164void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index d37fbb34d06f..dd1d14250340 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -23,10 +23,11 @@ struct rw_semaphore;
23 23
24#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 24#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
25#include <linux/rwsem-spinlock.h> /* use a generic implementation */ 25#include <linux/rwsem-spinlock.h> /* use a generic implementation */
26#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE
26#else 27#else
27/* All arch specific implementations share the same struct */ 28/* All arch specific implementations share the same struct */
28struct rw_semaphore { 29struct rw_semaphore {
29 long count; 30 atomic_long_t count;
30 struct list_head wait_list; 31 struct list_head wait_list;
31 raw_spinlock_t wait_lock; 32 raw_spinlock_t wait_lock;
32#ifdef CONFIG_RWSEM_SPIN_ON_OWNER 33#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -54,9 +55,10 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
54/* In all implementations count != 0 means locked */ 55/* In all implementations count != 0 means locked */
55static inline int rwsem_is_locked(struct rw_semaphore *sem) 56static inline int rwsem_is_locked(struct rw_semaphore *sem)
56{ 57{
57 return sem->count != 0; 58 return atomic_long_read(&sem->count) != 0;
58} 59}
59 60
61#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
60#endif 62#endif
61 63
62/* Common initializer macros and functions */ 64/* Common initializer macros and functions */
@@ -74,7 +76,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
74#endif 76#endif
75 77
76#define __RWSEM_INITIALIZER(name) \ 78#define __RWSEM_INITIALIZER(name) \
77 { .count = RWSEM_UNLOCKED_VALUE, \ 79 { __RWSEM_INIT_COUNT(name), \
78 .wait_list = LIST_HEAD_INIT((name).wait_list), \ 80 .wait_list = LIST_HEAD_INIT((name).wait_list), \
79 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ 81 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
80 __RWSEM_OPT_INIT(name) \ 82 __RWSEM_OPT_INIT(name) \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e42ada26345..d99218a1e043 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -219,9 +219,10 @@ extern void proc_sched_set_task(struct task_struct *p);
219#define TASK_WAKING 256 219#define TASK_WAKING 256
220#define TASK_PARKED 512 220#define TASK_PARKED 512
221#define TASK_NOLOAD 1024 221#define TASK_NOLOAD 1024
222#define TASK_STATE_MAX 2048 222#define TASK_NEW 2048
223#define TASK_STATE_MAX 4096
223 224
224#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN" 225#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
225 226
226extern char ___assert_task_state[1 - 2*!!( 227extern char ___assert_task_state[1 - 2*!!(
227 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -2139,6 +2140,9 @@ static inline void put_task_struct(struct task_struct *t)
2139 __put_task_struct(t); 2140 __put_task_struct(t);
2140} 2141}
2141 2142
2143struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2144struct task_struct *try_get_task_struct(struct task_struct **ptask);
2145
2142#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2146#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2143extern void task_cputime(struct task_struct *t, 2147extern void task_cputime(struct task_struct *t,
2144 cputime_t *utime, cputime_t *stime); 2148 cputime_t *utime, cputime_t *stime);
@@ -3007,7 +3011,7 @@ static inline int object_is_on_stack(void *obj)
3007 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 3011 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3008} 3012}
3009 3013
3010extern void thread_info_cache_init(void); 3014extern void thread_stack_cache_init(void);
3011 3015
3012#ifdef CONFIG_DEBUG_STACK_USAGE 3016#ifdef CONFIG_DEBUG_STACK_USAGE
3013static inline unsigned long stack_not_used(struct task_struct *p) 3017static inline unsigned long stack_not_used(struct task_struct *p)
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index dacb5e711994..de1f64318fc4 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -765,6 +765,8 @@ struct sctp_info {
765 __u8 sctpi_s_disable_fragments; 765 __u8 sctpi_s_disable_fragments;
766 __u8 sctpi_s_v4mapped; 766 __u8 sctpi_s_v4mapped;
767 __u8 sctpi_s_frag_interleave; 767 __u8 sctpi_s_frag_interleave;
768 __u32 sctpi_s_type;
769 __u32 __reserved3;
768}; 770};
769 771
770struct sctp_infox { 772struct sctp_infox {
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7973a821ac58..ead97654c4e9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
277 277
278static inline int raw_read_seqcount_latch(seqcount_t *s) 278static inline int raw_read_seqcount_latch(seqcount_t *s)
279{ 279{
280 return lockless_dereference(s)->sequence; 280 int seq = READ_ONCE(s->sequence);
281 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
282 smp_read_barrier_depends();
283 return seq;
281} 284}
282 285
283/** 286/**
@@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
331 * unsigned seq, idx; 334 * unsigned seq, idx;
332 * 335 *
333 * do { 336 * do {
334 * seq = lockless_dereference(latch)->seq; 337 * seq = raw_read_seqcount_latch(&latch->seq);
335 * 338 *
336 * idx = seq & 0x01; 339 * idx = seq & 0x01;
337 * entry = data_query(latch->data[idx], ...); 340 * entry = data_query(latch->data[idx], ...);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 48ec7651989b..923266cd294a 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -111,6 +111,7 @@ struct uart_8250_port {
111 * if no_console_suspend 111 * if no_console_suspend
112 */ 112 */
113 unsigned char probe; 113 unsigned char probe;
114 struct mctrl_gpios *gpios;
114#define UART_PROBE_RSA (1 << 0) 115#define UART_PROBE_RSA (1 << 0)
115 116
116 /* 117 /*
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a3d7c0d4a03e..2f44e2013654 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -352,9 +352,15 @@ struct earlycon_id {
352extern const struct earlycon_id __earlycon_table[]; 352extern const struct earlycon_id __earlycon_table[];
353extern const struct earlycon_id __earlycon_table_end[]; 353extern const struct earlycon_id __earlycon_table_end[];
354 354
355#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
356#define EARLYCON_USED_OR_UNUSED __used
357#else
358#define EARLYCON_USED_OR_UNUSED __maybe_unused
359#endif
360
355#define OF_EARLYCON_DECLARE(_name, compat, fn) \ 361#define OF_EARLYCON_DECLARE(_name, compat, fn) \
356 static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ 362 static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
357 __used __section(__earlycon_table) \ 363 EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
358 = { .name = __stringify(_name), \ 364 = { .name = __stringify(_name), \
359 .compatible = compat, \ 365 .compatible = compat, \
360 .setup = fn } 366 .setup = fn }
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index d9b436f09925..e0e1597ef9e6 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -156,6 +156,7 @@ struct sfi_device_table_entry {
156#define SFI_DEV_TYPE_UART 2 156#define SFI_DEV_TYPE_UART 2
157#define SFI_DEV_TYPE_HSI 3 157#define SFI_DEV_TYPE_HSI 3
158#define SFI_DEV_TYPE_IPC 4 158#define SFI_DEV_TYPE_IPC 4
159#define SFI_DEV_TYPE_SD 5
159 160
160 u8 host_num; /* attached to host 0, 1...*/ 161 u8 host_num; /* attached to host 0, 1...*/
161 u16 addr; 162 u16 addr;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ee38a4127475..f39b37180c41 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1062} 1062}
1063 1063
1064void __skb_get_hash(struct sk_buff *skb); 1064void __skb_get_hash(struct sk_buff *skb);
1065u32 __skb_get_hash_symmetric(struct sk_buff *skb);
1065u32 skb_get_poff(const struct sk_buff *skb); 1066u32 skb_get_poff(const struct sk_buff *skb);
1066u32 __skb_get_poff(const struct sk_buff *skb, void *data, 1067u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1067 const struct flow_keys *keys, int hlen); 1068 const struct flow_keys *keys, int hlen);
@@ -2870,6 +2871,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
2870} 2871}
2871 2872
2872/** 2873/**
2874 * skb_push_rcsum - push skb and update receive checksum
2875 * @skb: buffer to update
2876 * @len: length of data pulled
2877 *
2878 * This function performs an skb_push on the packet and updates
2879 * the CHECKSUM_COMPLETE checksum. It should be used on
2880 * receive path processing instead of skb_push unless you know
2881 * that the checksum difference is zero (e.g., a valid IP header)
2882 * or you are setting ip_summed to CHECKSUM_NONE.
2883 */
2884static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2885 unsigned int len)
2886{
2887 skb_push(skb, len);
2888 skb_postpush_rcsum(skb, skb->data, len);
2889 return skb->data;
2890}
2891
2892/**
2873 * pskb_trim_rcsum - trim received skb and update checksum 2893 * pskb_trim_rcsum - trim received skb and update checksum
2874 * @skb: buffer to trim 2894 * @skb: buffer to trim
2875 * @len: new length 2895 * @len: new length
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 4018b48f2b3b..a0596ca0e80a 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -36,6 +36,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
36{ 36{
37 switch (sk->sk_family) { 37 switch (sk->sk_family) {
38 case AF_INET: 38 case AF_INET:
39 if (sk->sk_type == SOCK_RAW)
40 return SKNLGRP_NONE;
41
39 switch (sk->sk_protocol) { 42 switch (sk->sk_protocol) {
40 case IPPROTO_TCP: 43 case IPPROTO_TCP:
41 return SKNLGRP_INET_TCP_DESTROY; 44 return SKNLGRP_INET_TCP_DESTROY;
@@ -45,6 +48,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
45 return SKNLGRP_NONE; 48 return SKNLGRP_NONE;
46 } 49 }
47 case AF_INET6: 50 case AF_INET6:
51 if (sk->sk_type == SOCK_RAW)
52 return SKNLGRP_NONE;
53
48 switch (sk->sk_protocol) { 54 switch (sk->sk_protocol) {
49 case IPPROTO_TCP: 55 case IPPROTO_TCP:
50 return SKNLGRP_INET6_TCP_DESTROY; 56 return SKNLGRP_INET6_TCP_DESTROY;
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 8b3ac0d718eb..0d9848de677d 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -6,6 +6,7 @@
6#endif 6#endif
7 7
8#include <asm/processor.h> /* for cpu_relax() */ 8#include <asm/processor.h> /* for cpu_relax() */
9#include <asm/barrier.h>
9 10
10/* 11/*
11 * include/linux/spinlock_up.h - UP-debug version of spinlocks. 12 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
@@ -25,6 +26,11 @@
25#ifdef CONFIG_DEBUG_SPINLOCK 26#ifdef CONFIG_DEBUG_SPINLOCK
26#define arch_spin_is_locked(x) ((x)->slock == 0) 27#define arch_spin_is_locked(x) ((x)->slock == 0)
27 28
29static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
30{
31 smp_cond_load_acquire(&lock->slock, VAL);
32}
33
28static inline void arch_spin_lock(arch_spinlock_t *lock) 34static inline void arch_spin_lock(arch_spinlock_t *lock)
29{ 35{
30 lock->slock = 0; 36 lock->slock = 0;
@@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
67 73
68#else /* DEBUG_SPINLOCK */ 74#else /* DEBUG_SPINLOCK */
69#define arch_spin_is_locked(lock) ((void)(lock), 0) 75#define arch_spin_is_locked(lock) ((void)(lock), 0)
76#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0)
70/* for sched/core.c and kernel_lock.c: */ 77/* for sched/core.c and kernel_lock.c: */
71# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) 78# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
72# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) 79# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
@@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
79#define arch_read_can_lock(lock) (((void)(lock), 1)) 86#define arch_read_can_lock(lock) (((void)(lock), 1))
80#define arch_write_can_lock(lock) (((void)(lock), 1)) 87#define arch_write_can_lock(lock) (((void)(lock), 1))
81 88
82#define arch_spin_unlock_wait(lock) \
83 do { cpu_relax(); } while (arch_spin_is_locked(lock))
84
85#endif /* __LINUX_SPINLOCK_UP_H */ 89#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 19c659d1c0f8..b6810c92b8bb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -137,8 +137,6 @@ struct rpc_create_args {
137#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) 137#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
138 138
139struct rpc_clnt *rpc_create(struct rpc_create_args *args); 139struct rpc_clnt *rpc_create(struct rpc_create_args *args);
140struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
141 struct rpc_xprt *xprt);
142struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 140struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
143 const struct rpc_program *, u32); 141 const struct rpc_program *, u32);
144struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 142struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index b7dabc4baafd..79ba50856707 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -84,6 +84,7 @@ struct svc_xprt {
84 84
85 struct net *xpt_net; 85 struct net *xpt_net;
86 struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ 86 struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
87 struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
87}; 88};
88 89
89static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 90static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5aa3834619a8..5e3e1b63dbb3 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -297,6 +297,7 @@ struct xprt_create {
297 size_t addrlen; 297 size_t addrlen;
298 const char *servername; 298 const char *servername;
299 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ 299 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
300 struct rpc_xprt_switch *bc_xps;
300 unsigned int flags; 301 unsigned int flags;
301}; 302};
302 303
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e45abe7db9a6..ee517bef0db0 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -335,6 +335,8 @@ struct thermal_genl_event {
335 * @get_trend: a pointer to a function that reads the sensor temperature trend. 335 * @get_trend: a pointer to a function that reads the sensor temperature trend.
336 * @set_emul_temp: a pointer to a function that sets sensor emulated 336 * @set_emul_temp: a pointer to a function that sets sensor emulated
337 * temperature. 337 * temperature.
338 * @set_trip_temp: a pointer to a function that sets the trip temperature on
339 * hardware.
338 */ 340 */
339struct thermal_zone_of_device_ops { 341struct thermal_zone_of_device_ops {
340 int (*get_temp)(void *, int *); 342 int (*get_temp)(void *, int *);
diff --git a/include/linux/time.h b/include/linux/time.h
index 297f09f23896..4cea09d94208 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -205,7 +205,20 @@ struct tm {
205 int tm_yday; 205 int tm_yday;
206}; 206};
207 207
208void time_to_tm(time_t totalsecs, int offset, struct tm *result); 208void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
209
210/**
211 * time_to_tm - converts the calendar time to local broken-down time
212 *
213 * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
214 * Coordinated Universal Time (UTC).
215 * @offset offset seconds adding to totalsecs.
216 * @result pointer to struct tm variable to receive broken-down time
217 */
218static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
219{
220 time64_to_tm(totalsecs, offset, result);
221}
209 222
210/** 223/**
211 * timespec_to_ns - Convert timespec to nanoseconds 224 * timespec_to_ns - Convert timespec to nanoseconds
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 37dbacf84849..816b7543f81b 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv,
21 struct timespec64 ts64; 21 struct timespec64 ts64;
22 22
23 if (!tv) 23 if (!tv)
24 return do_sys_settimeofday64(NULL, tz);
25
26 if (!timespec_valid(tv))
24 return -EINVAL; 27 return -EINVAL;
25 28
26 ts64 = timespec_to_timespec64(*tv); 29 ts64 = timespec_to_timespec64(*tv);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 20ac746f3eb3..4419506b564e 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -19,7 +19,6 @@ struct timer_list {
19 void (*function)(unsigned long); 19 void (*function)(unsigned long);
20 unsigned long data; 20 unsigned long data;
21 u32 flags; 21 u32 flags;
22 int slack;
23 22
24#ifdef CONFIG_TIMER_STATS 23#ifdef CONFIG_TIMER_STATS
25 int start_pid; 24 int start_pid;
@@ -58,11 +57,14 @@ struct timer_list {
58 * workqueue locking issues. It's not meant for executing random crap 57 * workqueue locking issues. It's not meant for executing random crap
59 * with interrupts disabled. Abuse is monitored! 58 * with interrupts disabled. Abuse is monitored!
60 */ 59 */
61#define TIMER_CPUMASK 0x0007FFFF 60#define TIMER_CPUMASK 0x0003FFFF
62#define TIMER_MIGRATING 0x00080000 61#define TIMER_MIGRATING 0x00040000
63#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) 62#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
64#define TIMER_DEFERRABLE 0x00100000 63#define TIMER_DEFERRABLE 0x00080000
64#define TIMER_PINNED 0x00100000
65#define TIMER_IRQSAFE 0x00200000 65#define TIMER_IRQSAFE 0x00200000
66#define TIMER_ARRAYSHIFT 22
67#define TIMER_ARRAYMASK 0xFFC00000
66 68
67#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ 69#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
68 .entry = { .next = TIMER_ENTRY_STATIC }, \ 70 .entry = { .next = TIMER_ENTRY_STATIC }, \
@@ -70,7 +72,6 @@ struct timer_list {
70 .expires = (_expires), \ 72 .expires = (_expires), \
71 .data = (_data), \ 73 .data = (_data), \
72 .flags = (_flags), \ 74 .flags = (_flags), \
73 .slack = -1, \
74 __TIMER_LOCKDEP_MAP_INITIALIZER( \ 75 __TIMER_LOCKDEP_MAP_INITIALIZER( \
75 __FILE__ ":" __stringify(__LINE__)) \ 76 __FILE__ ":" __stringify(__LINE__)) \
76 } 77 }
@@ -78,9 +79,15 @@ struct timer_list {
78#define TIMER_INITIALIZER(_function, _expires, _data) \ 79#define TIMER_INITIALIZER(_function, _expires, _data) \
79 __TIMER_INITIALIZER((_function), (_expires), (_data), 0) 80 __TIMER_INITIALIZER((_function), (_expires), (_data), 0)
80 81
82#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \
83 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
84
81#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ 85#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
82 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) 86 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
83 87
88#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \
89 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
90
84#define DEFINE_TIMER(_name, _function, _expires, _data) \ 91#define DEFINE_TIMER(_name, _function, _expires, _data) \
85 struct timer_list _name = \ 92 struct timer_list _name = \
86 TIMER_INITIALIZER(_function, _expires, _data) 93 TIMER_INITIALIZER(_function, _expires, _data)
@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
124 131
125#define init_timer(timer) \ 132#define init_timer(timer) \
126 __init_timer((timer), 0) 133 __init_timer((timer), 0)
134#define init_timer_pinned(timer) \
135 __init_timer((timer), TIMER_PINNED)
127#define init_timer_deferrable(timer) \ 136#define init_timer_deferrable(timer) \
128 __init_timer((timer), TIMER_DEFERRABLE) 137 __init_timer((timer), TIMER_DEFERRABLE)
138#define init_timer_pinned_deferrable(timer) \
139 __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
129#define init_timer_on_stack(timer) \ 140#define init_timer_on_stack(timer) \
130 __init_timer_on_stack((timer), 0) 141 __init_timer_on_stack((timer), 0)
131 142
@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
145 156
146#define setup_timer(timer, fn, data) \ 157#define setup_timer(timer, fn, data) \
147 __setup_timer((timer), (fn), (data), 0) 158 __setup_timer((timer), (fn), (data), 0)
159#define setup_pinned_timer(timer, fn, data) \
160 __setup_timer((timer), (fn), (data), TIMER_PINNED)
148#define setup_deferrable_timer(timer, fn, data) \ 161#define setup_deferrable_timer(timer, fn, data) \
149 __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) 162 __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
163#define setup_pinned_deferrable_timer(timer, fn, data) \
164 __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
150#define setup_timer_on_stack(timer, fn, data) \ 165#define setup_timer_on_stack(timer, fn, data) \
151 __setup_timer_on_stack((timer), (fn), (data), 0) 166 __setup_timer_on_stack((timer), (fn), (data), 0)
167#define setup_pinned_timer_on_stack(timer, fn, data) \
168 __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
152#define setup_deferrable_timer_on_stack(timer, fn, data) \ 169#define setup_deferrable_timer_on_stack(timer, fn, data) \
153 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) 170 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
171#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
172 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
154 173
155/** 174/**
156 * timer_pending - is a timer pending? 175 * timer_pending - is a timer pending?
@@ -171,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu);
171extern int del_timer(struct timer_list * timer); 190extern int del_timer(struct timer_list * timer);
172extern int mod_timer(struct timer_list *timer, unsigned long expires); 191extern int mod_timer(struct timer_list *timer, unsigned long expires);
173extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); 192extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
174extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
175
176extern void set_timer_slack(struct timer_list *time, int slack_hz);
177 193
178#define TIMER_NOT_PINNED 0
179#define TIMER_PINNED 1
180/* 194/*
181 * The jiffies value which is added to now, when there is no timer 195 * The jiffies value which is added to now, when there is no timer
182 * in the timer wheel: 196 * in the timer wheel:
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 7759fc3c622d..6685a73736a2 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -50,6 +50,10 @@
50 do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) 50 do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
51 51
52/* Definitions for online/offline exerciser. */ 52/* Definitions for online/offline exerciser. */
53bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
54 unsigned long *sum_offl, int *min_onl, int *max_onl);
55bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
56 unsigned long *sum_onl, int *min_onl, int *max_onl);
53int torture_onoff_init(long ooholdoff, long oointerval); 57int torture_onoff_init(long ooholdoff, long oointerval);
54void torture_onoff_stats(void); 58void torture_onoff_stats(void);
55bool torture_onoff_failures(void); 59bool torture_onoff_failures(void);
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index 966889a20ea3..e479033bd782 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -180,11 +180,11 @@ struct ehci_regs {
180 * PORTSCx 180 * PORTSCx
181 */ 181 */
182 /* HOSTPC: offset 0x84 */ 182 /* HOSTPC: offset 0x84 */
183 u32 hostpc[1]; /* HOSTPC extension */ 183 u32 hostpc[0]; /* HOSTPC extension */
184#define HOSTPC_PHCD (1<<22) /* Phy clock disable */ 184#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
185#define HOSTPC_PSPD (3<<25) /* Port speed detection */ 185#define HOSTPC_PSPD (3<<25) /* Port speed detection */
186 186
187 u32 reserved5[16]; 187 u32 reserved5[17];
188 188
189 /* USBMODE_EX: offset 0xc8 */ 189 /* USBMODE_EX: offset 0xc8 */
190 u32 usbmode_ex; /* USB Device mode extension */ 190 u32 usbmode_ex; /* USB Device mode extension */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 457651bf45b0..612dbdfa388e 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -25,6 +25,8 @@
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <linux/usb/ch9.h> 26#include <linux/usb/ch9.h>
27 27
28#define UDC_TRACE_STR_MAX 512
29
28struct usb_ep; 30struct usb_ep;
29 31
30/** 32/**
@@ -228,307 +230,49 @@ struct usb_ep {
228 230
229/*-------------------------------------------------------------------------*/ 231/*-------------------------------------------------------------------------*/
230 232
231/** 233#if IS_ENABLED(CONFIG_USB_GADGET)
232 * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint 234void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit);
233 * @ep:the endpoint being configured 235int usb_ep_enable(struct usb_ep *ep);
234 * @maxpacket_limit:value of maximum packet size limit 236int usb_ep_disable(struct usb_ep *ep);
235 * 237struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
236 * This function should be used only in UDC drivers to initialize endpoint 238void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req);
237 * (usually in probe function). 239int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags);
238 */ 240int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
241int usb_ep_set_halt(struct usb_ep *ep);
242int usb_ep_clear_halt(struct usb_ep *ep);
243int usb_ep_set_wedge(struct usb_ep *ep);
244int usb_ep_fifo_status(struct usb_ep *ep);
245void usb_ep_fifo_flush(struct usb_ep *ep);
246#else
239static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, 247static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
240 unsigned maxpacket_limit) 248 unsigned maxpacket_limit)
241{ 249{ }
242 ep->maxpacket_limit = maxpacket_limit;
243 ep->maxpacket = maxpacket_limit;
244}
245
246/**
247 * usb_ep_enable - configure endpoint, making it usable
248 * @ep:the endpoint being configured. may not be the endpoint named "ep0".
249 * drivers discover endpoints through the ep_list of a usb_gadget.
250 *
251 * When configurations are set, or when interface settings change, the driver
252 * will enable or disable the relevant endpoints. while it is enabled, an
253 * endpoint may be used for i/o until the driver receives a disconnect() from
254 * the host or until the endpoint is disabled.
255 *
256 * the ep0 implementation (which calls this routine) must ensure that the
257 * hardware capabilities of each endpoint match the descriptor provided
258 * for it. for example, an endpoint named "ep2in-bulk" would be usable
259 * for interrupt transfers as well as bulk, but it likely couldn't be used
260 * for iso transfers or for endpoint 14. some endpoints are fully
261 * configurable, with more generic names like "ep-a". (remember that for
262 * USB, "in" means "towards the USB master".)
263 *
264 * returns zero, or a negative error code.
265 */
266static inline int usb_ep_enable(struct usb_ep *ep) 250static inline int usb_ep_enable(struct usb_ep *ep)
267{ 251{ return 0; }
268 int ret;
269
270 if (ep->enabled)
271 return 0;
272
273 ret = ep->ops->enable(ep, ep->desc);
274 if (ret)
275 return ret;
276
277 ep->enabled = true;
278
279 return 0;
280}
281
282/**
283 * usb_ep_disable - endpoint is no longer usable
284 * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
285 *
286 * no other task may be using this endpoint when this is called.
287 * any pending and uncompleted requests will complete with status
288 * indicating disconnect (-ESHUTDOWN) before this call returns.
289 * gadget drivers must call usb_ep_enable() again before queueing
290 * requests to the endpoint.
291 *
292 * returns zero, or a negative error code.
293 */
294static inline int usb_ep_disable(struct usb_ep *ep) 252static inline int usb_ep_disable(struct usb_ep *ep)
295{ 253{ return 0; }
296 int ret;
297
298 if (!ep->enabled)
299 return 0;
300
301 ret = ep->ops->disable(ep);
302 if (ret)
303 return ret;
304
305 ep->enabled = false;
306
307 return 0;
308}
309
310/**
311 * usb_ep_alloc_request - allocate a request object to use with this endpoint
312 * @ep:the endpoint to be used with with the request
313 * @gfp_flags:GFP_* flags to use
314 *
315 * Request objects must be allocated with this call, since they normally
316 * need controller-specific setup and may even need endpoint-specific
317 * resources such as allocation of DMA descriptors.
318 * Requests may be submitted with usb_ep_queue(), and receive a single
319 * completion callback. Free requests with usb_ep_free_request(), when
320 * they are no longer needed.
321 *
322 * Returns the request, or null if one could not be allocated.
323 */
324static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, 254static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
325 gfp_t gfp_flags) 255 gfp_t gfp_flags)
326{ 256{ return NULL; }
327 return ep->ops->alloc_request(ep, gfp_flags);
328}
329
330/**
331 * usb_ep_free_request - frees a request object
332 * @ep:the endpoint associated with the request
333 * @req:the request being freed
334 *
335 * Reverses the effect of usb_ep_alloc_request().
336 * Caller guarantees the request is not queued, and that it will
337 * no longer be requeued (or otherwise used).
338 */
339static inline void usb_ep_free_request(struct usb_ep *ep, 257static inline void usb_ep_free_request(struct usb_ep *ep,
340 struct usb_request *req) 258 struct usb_request *req)
341{ 259{ }
342 ep->ops->free_request(ep, req); 260static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req,
343} 261 gfp_t gfp_flags)
344 262{ return 0; }
345/**
346 * usb_ep_queue - queues (submits) an I/O request to an endpoint.
347 * @ep:the endpoint associated with the request
348 * @req:the request being submitted
349 * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
350 * pre-allocate all necessary memory with the request.
351 *
352 * This tells the device controller to perform the specified request through
353 * that endpoint (reading or writing a buffer). When the request completes,
354 * including being canceled by usb_ep_dequeue(), the request's completion
355 * routine is called to return the request to the driver. Any endpoint
356 * (except control endpoints like ep0) may have more than one transfer
357 * request queued; they complete in FIFO order. Once a gadget driver
358 * submits a request, that request may not be examined or modified until it
359 * is given back to that driver through the completion callback.
360 *
361 * Each request is turned into one or more packets. The controller driver
362 * never merges adjacent requests into the same packet. OUT transfers
363 * will sometimes use data that's already buffered in the hardware.
364 * Drivers can rely on the fact that the first byte of the request's buffer
365 * always corresponds to the first byte of some USB packet, for both
366 * IN and OUT transfers.
367 *
368 * Bulk endpoints can queue any amount of data; the transfer is packetized
369 * automatically. The last packet will be short if the request doesn't fill it
370 * out completely. Zero length packets (ZLPs) should be avoided in portable
371 * protocols since not all usb hardware can successfully handle zero length
372 * packets. (ZLPs may be explicitly written, and may be implicitly written if
373 * the request 'zero' flag is set.) Bulk endpoints may also be used
374 * for interrupt transfers; but the reverse is not true, and some endpoints
375 * won't support every interrupt transfer. (Such as 768 byte packets.)
376 *
377 * Interrupt-only endpoints are less functional than bulk endpoints, for
378 * example by not supporting queueing or not handling buffers that are
379 * larger than the endpoint's maxpacket size. They may also treat data
380 * toggle differently.
381 *
382 * Control endpoints ... after getting a setup() callback, the driver queues
383 * one response (even if it would be zero length). That enables the
384 * status ack, after transferring data as specified in the response. Setup
385 * functions may return negative error codes to generate protocol stalls.
386 * (Note that some USB device controllers disallow protocol stall responses
387 * in some cases.) When control responses are deferred (the response is
388 * written after the setup callback returns), then usb_ep_set_halt() may be
389 * used on ep0 to trigger protocol stalls. Depending on the controller,
390 * it may not be possible to trigger a status-stage protocol stall when the
391 * data stage is over, that is, from within the response's completion
392 * routine.
393 *
394 * For periodic endpoints, like interrupt or isochronous ones, the usb host
395 * arranges to poll once per interval, and the gadget driver usually will
396 * have queued some data to transfer at that time.
397 *
398 * Returns zero, or a negative error code. Endpoints that are not enabled
399 * report errors; errors will also be
400 * reported when the usb peripheral is disconnected.
401 */
402static inline int usb_ep_queue(struct usb_ep *ep,
403 struct usb_request *req, gfp_t gfp_flags)
404{
405 if (WARN_ON_ONCE(!ep->enabled && ep->address))
406 return -ESHUTDOWN;
407
408 return ep->ops->queue(ep, req, gfp_flags);
409}
410
411/**
412 * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
413 * @ep:the endpoint associated with the request
414 * @req:the request being canceled
415 *
416 * If the request is still active on the endpoint, it is dequeued and its
417 * completion routine is called (with status -ECONNRESET); else a negative
418 * error code is returned. This is guaranteed to happen before the call to
419 * usb_ep_dequeue() returns.
420 *
421 * Note that some hardware can't clear out write fifos (to unlink the request
422 * at the head of the queue) except as part of disconnecting from usb. Such
423 * restrictions prevent drivers from supporting configuration changes,
424 * even to configuration zero (a "chapter 9" requirement).
425 */
426static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 263static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
427{ 264{ return 0; }
428 return ep->ops->dequeue(ep, req);
429}
430
431/**
432 * usb_ep_set_halt - sets the endpoint halt feature.
433 * @ep: the non-isochronous endpoint being stalled
434 *
435 * Use this to stall an endpoint, perhaps as an error report.
436 * Except for control endpoints,
437 * the endpoint stays halted (will not stream any data) until the host
438 * clears this feature; drivers may need to empty the endpoint's request
439 * queue first, to make sure no inappropriate transfers happen.
440 *
441 * Note that while an endpoint CLEAR_FEATURE will be invisible to the
442 * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
443 * current altsetting, see usb_ep_clear_halt(). When switching altsettings,
444 * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
445 *
446 * Returns zero, or a negative error code. On success, this call sets
447 * underlying hardware state that blocks data transfers.
448 * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
449 * transfer requests are still queued, or if the controller hardware
450 * (usually a FIFO) still holds bytes that the host hasn't collected.
451 */
452static inline int usb_ep_set_halt(struct usb_ep *ep) 265static inline int usb_ep_set_halt(struct usb_ep *ep)
453{ 266{ return 0; }
454 return ep->ops->set_halt(ep, 1);
455}
456
457/**
458 * usb_ep_clear_halt - clears endpoint halt, and resets toggle
459 * @ep:the bulk or interrupt endpoint being reset
460 *
461 * Use this when responding to the standard usb "set interface" request,
462 * for endpoints that aren't reconfigured, after clearing any other state
463 * in the endpoint's i/o queue.
464 *
465 * Returns zero, or a negative error code. On success, this call clears
466 * the underlying hardware state reflecting endpoint halt and data toggle.
467 * Note that some hardware can't support this request (like pxa2xx_udc),
468 * and accordingly can't correctly implement interface altsettings.
469 */
470static inline int usb_ep_clear_halt(struct usb_ep *ep) 267static inline int usb_ep_clear_halt(struct usb_ep *ep)
471{ 268{ return 0; }
472 return ep->ops->set_halt(ep, 0); 269static inline int usb_ep_set_wedge(struct usb_ep *ep)
473} 270{ return 0; }
474
475/**
476 * usb_ep_set_wedge - sets the halt feature and ignores clear requests
477 * @ep: the endpoint being wedged
478 *
479 * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
480 * requests. If the gadget driver clears the halt status, it will
481 * automatically unwedge the endpoint.
482 *
483 * Returns zero on success, else negative errno.
484 */
485static inline int
486usb_ep_set_wedge(struct usb_ep *ep)
487{
488 if (ep->ops->set_wedge)
489 return ep->ops->set_wedge(ep);
490 else
491 return ep->ops->set_halt(ep, 1);
492}
493
494/**
495 * usb_ep_fifo_status - returns number of bytes in fifo, or error
496 * @ep: the endpoint whose fifo status is being checked.
497 *
498 * FIFO endpoints may have "unclaimed data" in them in certain cases,
499 * such as after aborted transfers. Hosts may not have collected all
500 * the IN data written by the gadget driver (and reported by a request
501 * completion). The gadget driver may not have collected all the data
502 * written OUT to it by the host. Drivers that need precise handling for
503 * fault reporting or recovery may need to use this call.
504 *
505 * This returns the number of such bytes in the fifo, or a negative
506 * errno if the endpoint doesn't use a FIFO or doesn't support such
507 * precise handling.
508 */
509static inline int usb_ep_fifo_status(struct usb_ep *ep) 271static inline int usb_ep_fifo_status(struct usb_ep *ep)
510{ 272{ return 0; }
511 if (ep->ops->fifo_status)
512 return ep->ops->fifo_status(ep);
513 else
514 return -EOPNOTSUPP;
515}
516
517/**
518 * usb_ep_fifo_flush - flushes contents of a fifo
519 * @ep: the endpoint whose fifo is being flushed.
520 *
521 * This call may be used to flush the "unclaimed data" that may exist in
522 * an endpoint fifo after abnormal transaction terminations. The call
523 * must never be used except when endpoint is not being used for any
524 * protocol translation.
525 */
526static inline void usb_ep_fifo_flush(struct usb_ep *ep) 273static inline void usb_ep_fifo_flush(struct usb_ep *ep)
527{ 274{ }
528 if (ep->ops->fifo_flush) 275#endif /* USB_GADGET */
529 ep->ops->fifo_flush(ep);
530}
531
532 276
533/*-------------------------------------------------------------------------*/ 277/*-------------------------------------------------------------------------*/
534 278
@@ -582,6 +326,7 @@ struct usb_gadget_ops {
582 * @dev: Driver model state for this abstract device. 326 * @dev: Driver model state for this abstract device.
583 * @out_epnum: last used out ep number 327 * @out_epnum: last used out ep number
584 * @in_epnum: last used in ep number 328 * @in_epnum: last used in ep number
329 * @mA: last set mA value
585 * @otg_caps: OTG capabilities of this gadget. 330 * @otg_caps: OTG capabilities of this gadget.
586 * @sg_supported: true if we can handle scatter-gather 331 * @sg_supported: true if we can handle scatter-gather
587 * @is_otg: True if the USB device port uses a Mini-AB jack, so that the 332 * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
@@ -638,6 +383,7 @@ struct usb_gadget {
638 struct device dev; 383 struct device dev;
639 unsigned out_epnum; 384 unsigned out_epnum;
640 unsigned in_epnum; 385 unsigned in_epnum;
386 unsigned mA;
641 struct usb_otg_caps *otg_caps; 387 struct usb_otg_caps *otg_caps;
642 388
643 unsigned sg_supported:1; 389 unsigned sg_supported:1;
@@ -760,251 +506,44 @@ static inline int gadget_is_otg(struct usb_gadget *g)
760#endif 506#endif
761} 507}
762 508
763/** 509/*-------------------------------------------------------------------------*/
764 * usb_gadget_frame_number - returns the current frame number
765 * @gadget: controller that reports the frame number
766 *
767 * Returns the usb frame number, normally eleven bits from a SOF packet,
768 * or negative errno if this device doesn't support this capability.
769 */
770static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
771{
772 return gadget->ops->get_frame(gadget);
773}
774 510
775/** 511#if IS_ENABLED(CONFIG_USB_GADGET)
776 * usb_gadget_wakeup - tries to wake up the host connected to this gadget 512int usb_gadget_frame_number(struct usb_gadget *gadget);
777 * @gadget: controller used to wake up the host 513int usb_gadget_wakeup(struct usb_gadget *gadget);
778 * 514int usb_gadget_set_selfpowered(struct usb_gadget *gadget);
779 * Returns zero on success, else negative error code if the hardware 515int usb_gadget_clear_selfpowered(struct usb_gadget *gadget);
780 * doesn't support such attempts, or its support has not been enabled 516int usb_gadget_vbus_connect(struct usb_gadget *gadget);
781 * by the usb host. Drivers must return device descriptors that report 517int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA);
782 * their ability to support this, or hosts won't enable it. 518int usb_gadget_vbus_disconnect(struct usb_gadget *gadget);
783 * 519int usb_gadget_connect(struct usb_gadget *gadget);
784 * This may also try to use SRP to wake the host and start enumeration, 520int usb_gadget_disconnect(struct usb_gadget *gadget);
785 * even if OTG isn't otherwise in use. OTG devices may also start 521int usb_gadget_deactivate(struct usb_gadget *gadget);
786 * remote wakeup even when hosts don't explicitly enable it. 522int usb_gadget_activate(struct usb_gadget *gadget);
787 */ 523#else
524static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
525{ return 0; }
788static inline int usb_gadget_wakeup(struct usb_gadget *gadget) 526static inline int usb_gadget_wakeup(struct usb_gadget *gadget)
789{ 527{ return 0; }
790 if (!gadget->ops->wakeup)
791 return -EOPNOTSUPP;
792 return gadget->ops->wakeup(gadget);
793}
794
795/**
796 * usb_gadget_set_selfpowered - sets the device selfpowered feature.
797 * @gadget:the device being declared as self-powered
798 *
799 * this affects the device status reported by the hardware driver
800 * to reflect that it now has a local power supply.
801 *
802 * returns zero on success, else negative errno.
803 */
804static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) 528static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
805{ 529{ return 0; }
806 if (!gadget->ops->set_selfpowered)
807 return -EOPNOTSUPP;
808 return gadget->ops->set_selfpowered(gadget, 1);
809}
810
811/**
812 * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
813 * @gadget:the device being declared as bus-powered
814 *
815 * this affects the device status reported by the hardware driver.
816 * some hardware may not support bus-powered operation, in which
817 * case this feature's value can never change.
818 *
819 * returns zero on success, else negative errno.
820 */
821static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) 530static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
822{ 531{ return 0; }
823 if (!gadget->ops->set_selfpowered)
824 return -EOPNOTSUPP;
825 return gadget->ops->set_selfpowered(gadget, 0);
826}
827
828/**
829 * usb_gadget_vbus_connect - Notify controller that VBUS is powered
830 * @gadget:The device which now has VBUS power.
831 * Context: can sleep
832 *
833 * This call is used by a driver for an external transceiver (or GPIO)
834 * that detects a VBUS power session starting. Common responses include
835 * resuming the controller, activating the D+ (or D-) pullup to let the
836 * host detect that a USB device is attached, and starting to draw power
837 * (8mA or possibly more, especially after SET_CONFIGURATION).
838 *
839 * Returns zero on success, else negative errno.
840 */
841static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget) 532static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget)
842{ 533{ return 0; }
843 if (!gadget->ops->vbus_session)
844 return -EOPNOTSUPP;
845 return gadget->ops->vbus_session(gadget, 1);
846}
847
848/**
849 * usb_gadget_vbus_draw - constrain controller's VBUS power usage
850 * @gadget:The device whose VBUS usage is being described
851 * @mA:How much current to draw, in milliAmperes. This should be twice
852 * the value listed in the configuration descriptor bMaxPower field.
853 *
854 * This call is used by gadget drivers during SET_CONFIGURATION calls,
855 * reporting how much power the device may consume. For example, this
856 * could affect how quickly batteries are recharged.
857 *
858 * Returns zero on success, else negative errno.
859 */
860static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 534static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
861{ 535{ return 0; }
862 if (!gadget->ops->vbus_draw)
863 return -EOPNOTSUPP;
864 return gadget->ops->vbus_draw(gadget, mA);
865}
866
867/**
868 * usb_gadget_vbus_disconnect - notify controller about VBUS session end
869 * @gadget:the device whose VBUS supply is being described
870 * Context: can sleep
871 *
872 * This call is used by a driver for an external transceiver (or GPIO)
873 * that detects a VBUS power session ending. Common responses include
874 * reversing everything done in usb_gadget_vbus_connect().
875 *
876 * Returns zero on success, else negative errno.
877 */
878static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) 536static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
879{ 537{ return 0; }
880 if (!gadget->ops->vbus_session)
881 return -EOPNOTSUPP;
882 return gadget->ops->vbus_session(gadget, 0);
883}
884
885/**
886 * usb_gadget_connect - software-controlled connect to USB host
887 * @gadget:the peripheral being connected
888 *
889 * Enables the D+ (or potentially D-) pullup. The host will start
890 * enumerating this gadget when the pullup is active and a VBUS session
891 * is active (the link is powered). This pullup is always enabled unless
892 * usb_gadget_disconnect() has been used to disable it.
893 *
894 * Returns zero on success, else negative errno.
895 */
896static inline int usb_gadget_connect(struct usb_gadget *gadget) 538static inline int usb_gadget_connect(struct usb_gadget *gadget)
897{ 539{ return 0; }
898 int ret;
899
900 if (!gadget->ops->pullup)
901 return -EOPNOTSUPP;
902
903 if (gadget->deactivated) {
904 /*
905 * If gadget is deactivated we only save new state.
906 * Gadget will be connected automatically after activation.
907 */
908 gadget->connected = true;
909 return 0;
910 }
911
912 ret = gadget->ops->pullup(gadget, 1);
913 if (!ret)
914 gadget->connected = 1;
915 return ret;
916}
917
918/**
919 * usb_gadget_disconnect - software-controlled disconnect from USB host
920 * @gadget:the peripheral being disconnected
921 *
922 * Disables the D+ (or potentially D-) pullup, which the host may see
923 * as a disconnect (when a VBUS session is active). Not all systems
924 * support software pullup controls.
925 *
926 * Returns zero on success, else negative errno.
927 */
928static inline int usb_gadget_disconnect(struct usb_gadget *gadget) 540static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
929{ 541{ return 0; }
930 int ret;
931
932 if (!gadget->ops->pullup)
933 return -EOPNOTSUPP;
934
935 if (gadget->deactivated) {
936 /*
937 * If gadget is deactivated we only save new state.
938 * Gadget will stay disconnected after activation.
939 */
940 gadget->connected = false;
941 return 0;
942 }
943
944 ret = gadget->ops->pullup(gadget, 0);
945 if (!ret)
946 gadget->connected = 0;
947 return ret;
948}
949
950/**
951 * usb_gadget_deactivate - deactivate function which is not ready to work
952 * @gadget: the peripheral being deactivated
953 *
954 * This routine may be used during the gadget driver bind() call to prevent
955 * the peripheral from ever being visible to the USB host, unless later
956 * usb_gadget_activate() is called. For example, user mode components may
957 * need to be activated before the system can talk to hosts.
958 *
959 * Returns zero on success, else negative errno.
960 */
961static inline int usb_gadget_deactivate(struct usb_gadget *gadget) 542static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
962{ 543{ return 0; }
963 int ret;
964
965 if (gadget->deactivated)
966 return 0;
967
968 if (gadget->connected) {
969 ret = usb_gadget_disconnect(gadget);
970 if (ret)
971 return ret;
972 /*
973 * If gadget was being connected before deactivation, we want
974 * to reconnect it in usb_gadget_activate().
975 */
976 gadget->connected = true;
977 }
978 gadget->deactivated = true;
979
980 return 0;
981}
982
983/**
984 * usb_gadget_activate - activate function which is not ready to work
985 * @gadget: the peripheral being activated
986 *
987 * This routine activates gadget which was previously deactivated with
988 * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
989 *
990 * Returns zero on success, else negative errno.
991 */
992static inline int usb_gadget_activate(struct usb_gadget *gadget) 544static inline int usb_gadget_activate(struct usb_gadget *gadget)
993{ 545{ return 0; }
994 if (!gadget->deactivated) 546#endif /* CONFIG_USB_GADGET */
995 return 0;
996
997 gadget->deactivated = false;
998
999 /*
1000 * If gadget has been connected before deactivation, or became connected
1001 * while it was being deactivated, we call usb_gadget_connect().
1002 */
1003 if (gadget->connected)
1004 return usb_gadget_connect(gadget);
1005
1006 return 0;
1007}
1008 547
1009/*-------------------------------------------------------------------------*/ 548/*-------------------------------------------------------------------------*/
1010 549
@@ -1034,6 +573,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
1034 * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, 573 * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
1035 * this driver will be bound to any available UDC. 574 * this driver will be bound to any available UDC.
1036 * @pending: UDC core private data used for deferred probe of this driver. 575 * @pending: UDC core private data used for deferred probe of this driver.
576 * @match_existing_only: If udc is not found, return an error and don't add this
577 * gadget driver to list of pending driver
1037 * 578 *
1038 * Devices are disabled till a gadget driver successfully bind()s, which 579 * Devices are disabled till a gadget driver successfully bind()s, which
1039 * means the driver will handle setup() requests needed to enumerate (and 580 * means the driver will handle setup() requests needed to enumerate (and
@@ -1097,6 +638,7 @@ struct usb_gadget_driver {
1097 638
1098 char *udc_name; 639 char *udc_name;
1099 struct list_head pending; 640 struct list_head pending;
641 unsigned match_existing_only:1;
1100}; 642};
1101 643
1102 644
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
deleted file mode 100644
index 8c8f6854c993..000000000000
--- a/include/linux/usb/msm_hsusb.h
+++ /dev/null
@@ -1,200 +0,0 @@
1/* linux/include/asm-arm/arch-msm/hsusb.h
2 *
3 * Copyright (C) 2008 Google, Inc.
4 * Author: Brian Swetland <swetland@google.com>
5 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef __ASM_ARCH_MSM_HSUSB_H
19#define __ASM_ARCH_MSM_HSUSB_H
20
21#include <linux/extcon.h>
22#include <linux/types.h>
23#include <linux/usb/otg.h>
24#include <linux/clk.h>
25
26/**
27 * OTG control
28 *
29 * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
30 * only configuration.
31 * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
32 * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
33 * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
34 *
35 */
36enum otg_control_type {
37 OTG_NO_CONTROL = 0,
38 OTG_PHY_CONTROL,
39 OTG_PMIC_CONTROL,
40 OTG_USER_CONTROL,
41};
42
43/**
44 * PHY used in
45 *
46 * INVALID_PHY Unsupported PHY
47 * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
48 * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
49 *
50 */
51enum msm_usb_phy_type {
52 INVALID_PHY = 0,
53 CI_45NM_INTEGRATED_PHY,
54 SNPS_28NM_INTEGRATED_PHY,
55};
56
57#define IDEV_CHG_MAX 1500
58#define IUNIT 100
59
60/**
61 * Different states involved in USB charger detection.
62 *
63 * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
64 * process is not yet started.
65 * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
66 * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
67 * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
68 * between SDP and DCP/CDP).
69 * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
70 * between DCP and CDP).
71 * USB_CHG_STATE_DETECTED USB charger type is determined.
72 *
73 */
74enum usb_chg_state {
75 USB_CHG_STATE_UNDEFINED = 0,
76 USB_CHG_STATE_WAIT_FOR_DCD,
77 USB_CHG_STATE_DCD_DONE,
78 USB_CHG_STATE_PRIMARY_DONE,
79 USB_CHG_STATE_SECONDARY_DONE,
80 USB_CHG_STATE_DETECTED,
81};
82
83/**
84 * USB charger types
85 *
86 * USB_INVALID_CHARGER Invalid USB charger.
87 * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
88 * on USB2.0 compliant host/hub.
89 * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
90 * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
91 * IDEV_CHG_MAX can be drawn irrespective of USB state.
92 *
93 */
94enum usb_chg_type {
95 USB_INVALID_CHARGER = 0,
96 USB_SDP_CHARGER,
97 USB_DCP_CHARGER,
98 USB_CDP_CHARGER,
99};
100
101/**
102 * struct msm_otg_platform_data - platform device data
103 * for msm_otg driver.
104 * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
105 * "do not overwrite default vaule at this address".
106 * @phy_init_sz: PHY configuration sequence size.
107 * @vbus_power: VBUS power on/off routine.
108 * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
109 * @mode: Supported mode (OTG/peripheral/host).
110 * @otg_control: OTG switch controlled by user/Id pin
111 */
112struct msm_otg_platform_data {
113 int *phy_init_seq;
114 int phy_init_sz;
115 void (*vbus_power)(bool on);
116 unsigned power_budget;
117 enum usb_dr_mode mode;
118 enum otg_control_type otg_control;
119 enum msm_usb_phy_type phy_type;
120 void (*setup_gpio)(enum usb_otg_state state);
121};
122
123/**
124 * struct msm_usb_cable - structure for exteternal connector cable
125 * state tracking
126 * @nb: hold event notification callback
127 * @conn: used for notification registration
128 */
129struct msm_usb_cable {
130 struct notifier_block nb;
131 struct extcon_dev *extcon;
132};
133
134/**
135 * struct msm_otg: OTG driver data. Shared by HCD and DCD.
136 * @otg: USB OTG Transceiver structure.
137 * @pdata: otg device platform data.
138 * @irq: IRQ number assigned for HSUSB controller.
139 * @clk: clock struct of usb_hs_clk.
140 * @pclk: clock struct of usb_hs_pclk.
141 * @core_clk: clock struct of usb_hs_core_clk.
142 * @regs: ioremapped register base address.
143 * @inputs: OTG state machine inputs(Id, SessValid etc).
144 * @sm_work: OTG state machine work.
145 * @in_lpm: indicates low power mode (LPM) state.
146 * @async_int: Async interrupt arrived.
147 * @cur_power: The amount of mA available from downstream port.
148 * @chg_work: Charger detection work.
149 * @chg_state: The state of charger detection process.
150 * @chg_type: The type of charger attached.
151 * @dcd_retires: The retry count used to track Data contact
152 * detection process.
153 * @manual_pullup: true if VBUS is not routed to USB controller/phy
154 * and controller driver therefore enables pull-up explicitly before
155 * starting controller using usbcmd run/stop bit.
156 * @vbus: VBUS signal state trakining, using extcon framework
157 * @id: ID signal state trakining, using extcon framework
158 * @switch_gpio: Descriptor for GPIO used to control external Dual
159 * SPDT USB Switch.
160 * @reboot: Used to inform the driver to route USB D+/D- line to Device
161 * connector
162 */
163struct msm_otg {
164 struct usb_phy phy;
165 struct msm_otg_platform_data *pdata;
166 int irq;
167 struct clk *clk;
168 struct clk *pclk;
169 struct clk *core_clk;
170 void __iomem *regs;
171#define ID 0
172#define B_SESS_VLD 1
173 unsigned long inputs;
174 struct work_struct sm_work;
175 atomic_t in_lpm;
176 int async_int;
177 unsigned cur_power;
178 int phy_number;
179 struct delayed_work chg_work;
180 enum usb_chg_state chg_state;
181 enum usb_chg_type chg_type;
182 u8 dcd_retries;
183 struct regulator *v3p3;
184 struct regulator *v1p8;
185 struct regulator *vddcx;
186
187 struct reset_control *phy_rst;
188 struct reset_control *link_rst;
189 int vdd_levels[3];
190
191 bool manual_pullup;
192
193 struct msm_usb_cable vbus;
194 struct msm_usb_cable id;
195
196 struct gpio_desc *switch_gpio;
197 struct notifier_block reboot;
198};
199
200#endif
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 0b3da40a525e..d315c8907869 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -142,10 +142,11 @@ enum musb_vbus_id_status {
142}; 142};
143 143
144#if IS_ENABLED(CONFIG_USB_MUSB_HDRC) 144#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
145void musb_mailbox(enum musb_vbus_id_status status); 145int musb_mailbox(enum musb_vbus_id_status status);
146#else 146#else
147static inline void musb_mailbox(enum musb_vbus_id_status status) 147static inline int musb_mailbox(enum musb_vbus_id_status status)
148{ 148{
149 return 0;
149} 150}
150#endif 151#endif
151 152
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index de3237fce6b2..5ff9032ee1b4 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -12,7 +12,7 @@
12#include <linux/usb/phy.h> 12#include <linux/usb/phy.h>
13 13
14#if IS_ENABLED(CONFIG_OF) 14#if IS_ENABLED(CONFIG_OF)
15enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np); 15enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
16bool of_usb_host_tpl_support(struct device_node *np); 16bool of_usb_host_tpl_support(struct device_node *np);
17int of_usb_update_otg_caps(struct device_node *np, 17int of_usb_update_otg_caps(struct device_node *np,
18 struct usb_otg_caps *otg_caps); 18 struct usb_otg_caps *otg_caps);
@@ -20,7 +20,7 @@ struct device_node *usb_of_get_child_node(struct device_node *parent,
20 int portnum); 20 int portnum);
21#else 21#else
22static inline enum usb_dr_mode 22static inline enum usb_dr_mode
23of_usb_get_dr_mode_by_phy(struct device_node *phy_np) 23of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
24{ 24{
25 return USB_DR_MODE_UNKNOWN; 25 return USB_DR_MODE_UNKNOWN;
26} 26}
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
deleted file mode 100644
index 376654b5b0f7..000000000000
--- a/include/linux/usb/xhci_pdriver.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful, but
7 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
8 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
9 * for more details.
10 *
11 */
12
13#ifndef __USB_CORE_XHCI_PDRIVER_H
14#define __USB_CORE_XHCI_PDRIVER_H
15
16/**
17 * struct usb_xhci_pdata - platform_data for generic xhci platform driver
18 *
19 * @usb3_lpm_capable: determines if this xhci platform supports USB3
20 * LPM capability
21 *
22 */
23struct usb_xhci_pdata {
24 unsigned usb3_lpm_capable:1;
25};
26
27#endif /* __USB_CORE_XHCI_PDRIVER_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 8d7634247fb4..6abd24f258bc 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -45,7 +45,7 @@ void poke_blanked_console(void);
45int con_font_op(struct vc_data *vc, struct console_font_op *op); 45int con_font_op(struct vc_data *vc, struct console_font_op *op);
46int con_set_cmap(unsigned char __user *cmap); 46int con_set_cmap(unsigned char __user *cmap);
47int con_get_cmap(unsigned char __user *cmap); 47int con_get_cmap(unsigned char __user *cmap);
48void scrollback(struct vc_data *vc, int lines); 48void scrollback(struct vc_data *vc);
49void scrollfront(struct vc_data *vc, int lines); 49void scrollfront(struct vc_data *vc, int lines);
50void clear_buffer_attributes(struct vc_data *vc); 50void clear_buffer_attributes(struct vc_data *vc);
51void update_region(struct vc_data *vc, unsigned long start, int count); 51void update_region(struct vc_data *vc, unsigned long start, int count);
@@ -59,14 +59,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg);
59#ifdef CONFIG_CONSOLE_TRANSLATIONS 59#ifdef CONFIG_CONSOLE_TRANSLATIONS
60/* consolemap.c */ 60/* consolemap.c */
61 61
62struct unimapinit;
63struct unipair; 62struct unipair;
64 63
65int con_set_trans_old(unsigned char __user * table); 64int con_set_trans_old(unsigned char __user * table);
66int con_get_trans_old(unsigned char __user * table); 65int con_get_trans_old(unsigned char __user * table);
67int con_set_trans_new(unsigned short __user * table); 66int con_set_trans_new(unsigned short __user * table);
68int con_get_trans_new(unsigned short __user * table); 67int con_get_trans_new(unsigned short __user * table);
69int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui); 68int con_clear_unimap(struct vc_data *vc);
70int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list); 69int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list);
71int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list); 70int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list);
72int con_set_default_unimap(struct vc_data *vc); 71int con_set_default_unimap(struct vc_data *vc);
@@ -92,7 +91,7 @@ static inline int con_get_trans_new(unsigned short __user *table)
92{ 91{
93 return -EINVAL; 92 return -EINVAL;
94} 93}
95static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui) 94static inline int con_clear_unimap(struct vc_data *vc)
96{ 95{
97 return 0; 96 return 0;
98} 97}
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index fa2196990f84..aa9bfea8804a 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -12,11 +12,9 @@ struct task_struct;
12/* 12/*
13 * vtime_accounting_cpu_enabled() definitions/declarations 13 * vtime_accounting_cpu_enabled() definitions/declarations
14 */ 14 */
15#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 15#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
16static inline bool vtime_accounting_cpu_enabled(void) { return true; } 16static inline bool vtime_accounting_cpu_enabled(void) { return true; }
17#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 17#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
18
19#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
20/* 18/*
21 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful 19 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
22 * in that case and compute the tickless cputime. 20 * in that case and compute the tickless cputime.
@@ -37,11 +35,9 @@ static inline bool vtime_accounting_cpu_enabled(void)
37 35
38 return false; 36 return false;
39} 37}
40#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 38#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
41
42#ifndef CONFIG_VIRT_CPU_ACCOUNTING
43static inline bool vtime_accounting_cpu_enabled(void) { return false; } 39static inline bool vtime_accounting_cpu_enabled(void) { return false; }
44#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 40#endif
45 41
46 42
47/* 43/*
@@ -64,35 +60,15 @@ extern void vtime_account_system(struct task_struct *tsk);
64extern void vtime_account_idle(struct task_struct *tsk); 60extern void vtime_account_idle(struct task_struct *tsk);
65extern void vtime_account_user(struct task_struct *tsk); 61extern void vtime_account_user(struct task_struct *tsk);
66 62
67#ifdef __ARCH_HAS_VTIME_ACCOUNT
68extern void vtime_account_irq_enter(struct task_struct *tsk);
69#else
70extern void vtime_common_account_irq_enter(struct task_struct *tsk);
71static inline void vtime_account_irq_enter(struct task_struct *tsk)
72{
73 if (vtime_accounting_cpu_enabled())
74 vtime_common_account_irq_enter(tsk);
75}
76#endif /* __ARCH_HAS_VTIME_ACCOUNT */
77
78#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 63#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
79 64
80static inline void vtime_task_switch(struct task_struct *prev) { } 65static inline void vtime_task_switch(struct task_struct *prev) { }
81static inline void vtime_account_system(struct task_struct *tsk) { } 66static inline void vtime_account_system(struct task_struct *tsk) { }
82static inline void vtime_account_user(struct task_struct *tsk) { } 67static inline void vtime_account_user(struct task_struct *tsk) { }
83static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
84#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 68#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
85 69
86#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 70#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
87extern void arch_vtime_task_switch(struct task_struct *tsk); 71extern void arch_vtime_task_switch(struct task_struct *tsk);
88extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
89
90static inline void vtime_account_irq_exit(struct task_struct *tsk)
91{
92 if (vtime_accounting_cpu_enabled())
93 vtime_gen_account_irq_exit(tsk);
94}
95
96extern void vtime_user_enter(struct task_struct *tsk); 72extern void vtime_user_enter(struct task_struct *tsk);
97 73
98static inline void vtime_user_exit(struct task_struct *tsk) 74static inline void vtime_user_exit(struct task_struct *tsk)
@@ -103,11 +79,6 @@ extern void vtime_guest_enter(struct task_struct *tsk);
103extern void vtime_guest_exit(struct task_struct *tsk); 79extern void vtime_guest_exit(struct task_struct *tsk);
104extern void vtime_init_idle(struct task_struct *tsk, int cpu); 80extern void vtime_init_idle(struct task_struct *tsk, int cpu);
105#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
106static inline void vtime_account_irq_exit(struct task_struct *tsk)
107{
108 /* On hard|softirq exit we always account to hard|softirq cputime */
109 vtime_account_system(tsk);
110}
111static inline void vtime_user_enter(struct task_struct *tsk) { } 82static inline void vtime_user_enter(struct task_struct *tsk) { }
112static inline void vtime_user_exit(struct task_struct *tsk) { } 83static inline void vtime_user_exit(struct task_struct *tsk) { }
113static inline void vtime_guest_enter(struct task_struct *tsk) { } 84static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -115,6 +86,19 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { }
115static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } 86static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
116#endif 87#endif
117 88
89#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
90extern void vtime_account_irq_enter(struct task_struct *tsk);
91static inline void vtime_account_irq_exit(struct task_struct *tsk)
92{
93 /* On hard|softirq exit we always account to hard|softirq cputime */
94 vtime_account_system(tsk);
95}
96#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
97static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
98static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
99#endif
100
101
118#ifdef CONFIG_IRQ_TIME_ACCOUNTING 102#ifdef CONFIG_IRQ_TIME_ACCOUNTING
119extern void irqtime_account_irq(struct task_struct *tsk); 103extern void irqtime_account_irq(struct task_struct *tsk);
120#else 104#else
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 98a938aabdfb..7a8d6037a4bb 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * v4l2-mc.h - Media Controller V4L2 types and prototypes 2 * v4l2-mc.h - Media Controller V4L2 types and prototypes
3 * 3 *
4 * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4 * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
5 * Copyright (C) 2006-2010 Nokia Corporation 5 * Copyright (C) 2006-2010 Nokia Corporation
6 * Copyright (c) 2016 Intel Corporation. 6 * Copyright (c) 2016 Intel Corporation.
7 * 7 *
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 791800ddd6d9..6360c259da6d 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -34,6 +34,9 @@
34 34
35#define BOND_DEFAULT_MIIMON 100 35#define BOND_DEFAULT_MIIMON 100
36 36
37#ifndef __long_aligned
38#define __long_aligned __attribute__((aligned((sizeof(long)))))
39#endif
37/* 40/*
38 * Less bad way to call ioctl from within the kernel; this needs to be 41 * Less bad way to call ioctl from within the kernel; this needs to be
39 * done some other way to get the call out of interrupt context. 42 * done some other way to get the call out of interrupt context.
@@ -138,7 +141,9 @@ struct bond_params {
138 struct reciprocal_value reciprocal_packets_per_slave; 141 struct reciprocal_value reciprocal_packets_per_slave;
139 u16 ad_actor_sys_prio; 142 u16 ad_actor_sys_prio;
140 u16 ad_user_port_key; 143 u16 ad_user_port_key;
141 u8 ad_actor_system[ETH_ALEN]; 144
145 /* 2 bytes of padding : see ether_addr_equal_64bits() */
146 u8 ad_actor_system[ETH_ALEN + 2];
142}; 147};
143 148
144struct bond_parm_tbl { 149struct bond_parm_tbl {
diff --git a/include/net/compat.h b/include/net/compat.h
index 48103cf94e97..13de0ccaa059 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
42 42
43int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, 43int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
44 struct sockaddr __user **, struct iovec **); 44 struct sockaddr __user **, struct iovec **);
45struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
45asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, 46asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
46 unsigned int); 47 unsigned int);
47asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, 48asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
diff --git a/include/net/gre.h b/include/net/gre.h
index 5dce30a6abe3..7a54a31d1d4c 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -26,7 +26,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
26struct net_device *gretap_fb_dev_create(struct net *net, const char *name, 26struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
27 u8 name_assign_type); 27 u8 name_assign_type);
28int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, 28int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
29 bool *csum_err, __be16 proto); 29 bool *csum_err, __be16 proto, int nhs);
30 30
31static inline int gre_calc_hlen(__be16 o_flags) 31static inline int gre_calc_hlen(__be16 o_flags)
32{ 32{
diff --git a/include/net/ip.h b/include/net/ip.h
index 37165fba3741..08f36cd2b874 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
313 return min(dst->dev->mtu, IP_MAX_MTU); 313 return min(dst->dev->mtu, IP_MAX_MTU);
314} 314}
315 315
316static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) 316static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
317 const struct sk_buff *skb)
317{ 318{
318 struct sock *sk = skb->sk;
319
320 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 319 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
321 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 320 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
322 321
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d325c81332e3..43a5a0e4524c 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops {
63 u8 *protocol, struct flowi6 *fl6); 63 u8 *protocol, struct flowi6 *fl6);
64}; 64};
65 65
66#ifdef CONFIG_INET
67
66extern const struct ip6_tnl_encap_ops __rcu * 68extern const struct ip6_tnl_encap_ops __rcu *
67 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; 69 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
68 70
@@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev);
138int ip6_tnl_get_iflink(const struct net_device *dev); 140int ip6_tnl_get_iflink(const struct net_device *dev);
139int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); 141int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
140 142
141#ifdef CONFIG_INET
142static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, 143static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
143 struct net_device *dev) 144 struct net_device *dev)
144{ 145{
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index af4c10ebb241..cd6018a9ee24 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1232const char *ip_vs_state_name(__u16 proto, int state); 1232const char *ip_vs_state_name(__u16 proto, int state);
1233 1233
1234void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1234void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
1235int ip_vs_check_template(struct ip_vs_conn *ct); 1235int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
1236void ip_vs_random_dropentry(struct netns_ipvs *ipvs); 1236void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
1237int ip_vs_conn_init(void); 1237int ip_vs_conn_init(void);
1238void ip_vs_conn_cleanup(void); 1238void ip_vs_conn_cleanup(void);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index dd78bea227c8..b6083c34ef0d 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -284,6 +284,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
284 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; 284 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
285} 285}
286 286
287/* jiffies until ct expires, 0 if already expired */
288static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
289{
290 long timeout = (long)ct->timeout.expires - (long)jiffies;
291
292 return timeout > 0 ? timeout : 0;
293}
294
287struct kernel_param; 295struct kernel_param;
288 296
289int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 297int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 9c5638ad872e..0dbce55437f2 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -28,8 +28,8 @@ struct nf_queue_handler {
28 struct nf_hook_ops *ops); 28 struct nf_hook_ops *ops);
29}; 29};
30 30
31void nf_register_queue_handler(const struct nf_queue_handler *qh); 31void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
32void nf_unregister_queue_handler(void); 32void nf_unregister_queue_handler(struct net *net);
33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); 33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
34 34
35void nf_queue_entry_get_refs(struct nf_queue_entry *entry); 35void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 092235458691..f7c291ff4074 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -167,6 +167,7 @@ struct nft_set_elem {
167 167
168struct nft_set; 168struct nft_set;
169struct nft_set_iter { 169struct nft_set_iter {
170 u8 genmask;
170 unsigned int count; 171 unsigned int count;
171 unsigned int skip; 172 unsigned int skip;
172 int err; 173 int err;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 38aa4983e2a9..36d723579af2 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -5,11 +5,13 @@
5 5
6struct proc_dir_entry; 6struct proc_dir_entry;
7struct nf_logger; 7struct nf_logger;
8struct nf_queue_handler;
8 9
9struct netns_nf { 10struct netns_nf {
10#if defined CONFIG_PROC_FS 11#if defined CONFIG_PROC_FS
11 struct proc_dir_entry *proc_netfilter; 12 struct proc_dir_entry *proc_netfilter;
12#endif 13#endif
14 const struct nf_queue_handler __rcu *queue_handler;
13 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; 15 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
14#ifdef CONFIG_SYSCTL 16#ifdef CONFIG_SYSCTL
15 struct ctl_table_header *nf_log_dir_header; 17 struct ctl_table_header *nf_log_dir_header;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 0f7efa88f210..3722dda0199d 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -392,16 +392,20 @@ struct tc_cls_u32_offload {
392 }; 392 };
393}; 393};
394 394
395static inline bool tc_should_offload(struct net_device *dev, u32 flags) 395static inline bool tc_should_offload(const struct net_device *dev,
396 const struct tcf_proto *tp, u32 flags)
396{ 397{
398 const struct Qdisc *sch = tp->q;
399 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
400
397 if (!(dev->features & NETIF_F_HW_TC)) 401 if (!(dev->features & NETIF_F_HW_TC))
398 return false; 402 return false;
399
400 if (flags & TCA_CLS_FLAGS_SKIP_HW) 403 if (flags & TCA_CLS_FLAGS_SKIP_HW)
401 return false; 404 return false;
402
403 if (!dev->netdev_ops->ndo_setup_tc) 405 if (!dev->netdev_ops->ndo_setup_tc)
404 return false; 406 return false;
407 if (cops && cops->tcf_cl_offload)
408 return cops->tcf_cl_offload(tp->classid);
405 409
406 return true; 410 return true;
407} 411}
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 401038d2f9b8..fea53f4d92ca 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
61} 61}
62 62
63struct qdisc_watchdog { 63struct qdisc_watchdog {
64 u64 last_expires;
64 struct hrtimer timer; 65 struct hrtimer timer;
65 struct Qdisc *qdisc; 66 struct Qdisc *qdisc;
66}; 67};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a1fd76c22a59..62d553184e91 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -168,6 +168,7 @@ struct Qdisc_class_ops {
168 168
169 /* Filter manipulation */ 169 /* Filter manipulation */
170 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); 170 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
171 bool (*tcf_cl_offload)(u32 classid);
171 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 172 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
172 u32 classid); 173 u32 classid);
173 void (*unbind_tcf)(struct Qdisc *, unsigned long); 174 void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
691 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 692 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
692 if (!sch->gso_skb) { 693 if (!sch->gso_skb) {
693 sch->gso_skb = sch->dequeue(sch); 694 sch->gso_skb = sch->dequeue(sch);
694 if (sch->gso_skb) 695 if (sch->gso_skb) {
695 /* it's still part of the queue */ 696 /* it's still part of the queue */
697 qdisc_qstats_backlog_inc(sch, sch->gso_skb);
696 sch->q.qlen++; 698 sch->q.qlen++;
699 }
697 } 700 }
698 701
699 return sch->gso_skb; 702 return sch->gso_skb;
@@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
706 709
707 if (skb) { 710 if (skb) {
708 sch->gso_skb = NULL; 711 sch->gso_skb = NULL;
712 qdisc_qstats_backlog_dec(sch, skb);
709 sch->q.qlen--; 713 sch->q.qlen--;
710 } else { 714 } else {
711 skb = sch->dequeue(sch); 715 skb = sch->dequeue(sch);
diff --git a/include/net/sock.h b/include/net/sock.h
index 649d2a8c17fc..ff5be7e8ddea 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1576,7 +1576,13 @@ static inline void sock_put(struct sock *sk)
1576 */ 1576 */
1577void sock_gen_put(struct sock *sk); 1577void sock_gen_put(struct sock *sk);
1578 1578
1579int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); 1579int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1580 unsigned int trim_cap);
1581static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1582 const int nested)
1583{
1584 return __sk_receive_skb(sk, skb, nested, 1);
1585}
1580 1586
1581static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1587static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1582{ 1588{
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 985619a59323..1d8e158241da 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -60,7 +60,7 @@ struct switchdev_attr {
60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
61 u8 stp_state; /* PORT_STP_STATE */ 61 u8 stp_state; /* PORT_STP_STATE */
62 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ 62 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
63 u32 ageing_time; /* BRIDGE_AGEING_TIME */ 63 clock_t ageing_time; /* BRIDGE_AGEING_TIME */
64 bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ 64 bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
65 } u; 65 } u;
66}; 66};
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index dc9a09aefb33..c55facd17b7e 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -36,7 +36,7 @@ struct tcf_meta_ops {
36 int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *); 36 int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
37 int (*decode)(struct sk_buff *, void *, u16 len); 37 int (*decode)(struct sk_buff *, void *, u16 len);
38 int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi); 38 int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
39 int (*alloc)(struct tcf_meta_info *, void *); 39 int (*alloc)(struct tcf_meta_info *, void *, gfp_t);
40 void (*release)(struct tcf_meta_info *); 40 void (*release)(struct tcf_meta_info *);
41 int (*validate)(void *val, int len); 41 int (*validate)(void *val, int len);
42 struct module *owner; 42 struct module *owner;
@@ -48,8 +48,8 @@ int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
48int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi); 48int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
49int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, 49int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
50 const void *dval); 50 const void *dval);
51int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval); 51int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
52int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval); 52int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
53int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi); 53int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
54int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi); 54int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
55int ife_validate_meta_u32(void *val, int len); 55int ife_validate_meta_u32(void *val, int len);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 432bed510369..7e440d41487a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -217,10 +217,10 @@ enum ib_device_cap_flags {
217 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 217 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
218 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 218 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
219 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 219 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
220 IB_DEVICE_ON_DEMAND_PAGING = (1 << 31), 220 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
221 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 221 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
222 IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33), 222 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
223 IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34), 223 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
224}; 224};
225 225
226enum ib_signature_prot_cap { 226enum ib_signature_prot_cap {
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 16274e2133cd..9c9a27d42aaa 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -203,7 +203,9 @@ struct rvt_driver_provided {
203 203
204 /* 204 /*
205 * Allocate a private queue pair data structure for driver specific 205 * Allocate a private queue pair data structure for driver specific
206 * information which is opaque to rdmavt. 206 * information which is opaque to rdmavt. Errors are returned via
207 * ERR_PTR(err). The driver is free to return NULL or a valid
208 * pointer.
207 */ 209 */
208 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 210 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
209 gfp_t gfp); 211 gfp_t gfp);
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 8bdae34d1f9a..ec10cfef166a 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -245,6 +245,7 @@ endif
245header-y += hw_breakpoint.h 245header-y += hw_breakpoint.h
246header-y += l2tp.h 246header-y += l2tp.h
247header-y += libc-compat.h 247header-y += libc-compat.h
248header-y += lirc.h
248header-y += limits.h 249header-y += limits.h
249header-y += llc.h 250header-y += llc.h
250header-y += loop.h 251header-y += loop.h
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 23c6960e94a4..2bdd1e3e7007 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 {
118 }; 118 };
119 union { 119 union {
120 char name[BTRFS_SUBVOL_NAME_MAX + 1]; 120 char name[BTRFS_SUBVOL_NAME_MAX + 1];
121 u64 devid; 121 __u64 devid;
122 }; 122 };
123}; 123};
124 124
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 9222db8ccccc..5f030b46cff4 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices {
1353 ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, 1353 ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
1354 ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, 1354 ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
1355 ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, 1355 ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
1356 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
1357 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
1358 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
1359 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
1360 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
1361 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
1362 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
1363 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
1364 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
1356 1365
1357 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1366 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
1358 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* 1367 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices {
1361 */ 1370 */
1362 1371
1363 __ETHTOOL_LINK_MODE_LAST 1372 __ETHTOOL_LINK_MODE_LAST
1364 = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 1373 = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1365}; 1374};
1366 1375
1367#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1376#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 5974fae54e12..27e17363263a 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -105,6 +105,9 @@
105 * 105 *
106 * 7.24 106 * 7.24
107 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support 107 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
108 *
109 * 7.25
110 * - add FUSE_PARALLEL_DIROPS
108 */ 111 */
109 112
110#ifndef _LINUX_FUSE_H 113#ifndef _LINUX_FUSE_H
@@ -140,7 +143,7 @@
140#define FUSE_KERNEL_VERSION 7 143#define FUSE_KERNEL_VERSION 7
141 144
142/** Minor version number of this interface */ 145/** Minor version number of this interface */
143#define FUSE_KERNEL_MINOR_VERSION 24 146#define FUSE_KERNEL_MINOR_VERSION 25
144 147
145/** The node ID of the root inode */ 148/** The node ID of the root inode */
146#define FUSE_ROOT_ID 1 149#define FUSE_ROOT_ID 1
@@ -234,6 +237,7 @@ struct fuse_file_lock {
234 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 237 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
235 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 238 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
236 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens 239 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
240 * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
237 */ 241 */
238#define FUSE_ASYNC_READ (1 << 0) 242#define FUSE_ASYNC_READ (1 << 0)
239#define FUSE_POSIX_LOCKS (1 << 1) 243#define FUSE_POSIX_LOCKS (1 << 1)
@@ -253,6 +257,7 @@ struct fuse_file_lock {
253#define FUSE_ASYNC_DIO (1 << 15) 257#define FUSE_ASYNC_DIO (1 << 15)
254#define FUSE_WRITEBACK_CACHE (1 << 16) 258#define FUSE_WRITEBACK_CACHE (1 << 16)
255#define FUSE_NO_OPEN_SUPPORT (1 << 17) 259#define FUSE_NO_OPEN_SUPPORT (1 << 17)
260#define FUSE_PARALLEL_DIROPS (1 << 18)
256 261
257/** 262/**
258 * CUSE INIT request/reply flags 263 * CUSE INIT request/reply flags
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index ca1054dd8249..72a04a0e8cce 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -1,5 +1,5 @@
1#ifndef _UAPI_LINUX_GTP_H_ 1#ifndef _UAPI_LINUX_GTP_H_
2#define _UAPI_LINUX_GTP_H__ 2#define _UAPI_LINUX_GTP_H_
3 3
4enum gtp_genl_cmds { 4enum gtp_genl_cmds {
5 GTP_CMD_NEWPDP, 5 GTP_CMD_NEWPDP,
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index b0916fc72cce..22e5e589a274 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -39,6 +39,7 @@ enum iio_chan_type {
39 IIO_RESISTANCE, 39 IIO_RESISTANCE,
40 IIO_PH, 40 IIO_PH,
41 IIO_UVINDEX, 41 IIO_UVINDEX,
42 IIO_ELECTRICALCONDUCTIVITY,
42}; 43};
43 44
44enum iio_modifier { 45enum iio_modifier {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 87cf351bab03..d6d071fc3c56 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -611,6 +611,37 @@
611#define KEY_KBDINPUTASSIST_ACCEPT 0x264 611#define KEY_KBDINPUTASSIST_ACCEPT 0x264
612#define KEY_KBDINPUTASSIST_CANCEL 0x265 612#define KEY_KBDINPUTASSIST_CANCEL 0x265
613 613
614/* Diagonal movement keys */
615#define KEY_RIGHT_UP 0x266
616#define KEY_RIGHT_DOWN 0x267
617#define KEY_LEFT_UP 0x268
618#define KEY_LEFT_DOWN 0x269
619
620#define KEY_ROOT_MENU 0x26a /* Show Device's Root Menu */
621/* Show Top Menu of the Media (e.g. DVD) */
622#define KEY_MEDIA_TOP_MENU 0x26b
623#define KEY_NUMERIC_11 0x26c
624#define KEY_NUMERIC_12 0x26d
625/*
626 * Toggle Audio Description: refers to an audio service that helps blind and
627 * visually impaired consumers understand the action in a program. Note: in
628 * some countries this is referred to as "Video Description".
629 */
630#define KEY_AUDIO_DESC 0x26e
631#define KEY_3D_MODE 0x26f
632#define KEY_NEXT_FAVORITE 0x270
633#define KEY_STOP_RECORD 0x271
634#define KEY_PAUSE_RECORD 0x272
635#define KEY_VOD 0x273 /* Video on Demand */
636#define KEY_UNMUTE 0x274
637#define KEY_FASTREVERSE 0x275
638#define KEY_SLOWREVERSE 0x276
639/*
640 * Control a data application associated with the currently viewed channel,
641 * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
642 */
643#define KEY_DATA 0x275
644
614#define BTN_TRIGGER_HAPPY 0x2c0 645#define BTN_TRIGGER_HAPPY 0x2c0
615#define BTN_TRIGGER_HAPPY1 0x2c0 646#define BTN_TRIGGER_HAPPY1 0x2c0
616#define BTN_TRIGGER_HAPPY2 0x2c1 647#define BTN_TRIGGER_HAPPY2 0x2c1
@@ -749,6 +780,7 @@
749#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ 780#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
750#define SW_LINEIN_INSERT 0x0d /* set = inserted */ 781#define SW_LINEIN_INSERT 0x0d /* set = inserted */
751#define SW_MUTE_DEVICE 0x0e /* set = device disabled */ 782#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
783#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
752#define SW_MAX 0x0f 784#define SW_MAX 0x0f
753#define SW_CNT (SW_MAX+1) 785#define SW_CNT (SW_MAX+1)
754 786
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 01113841190d..c51494119817 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -247,6 +247,7 @@ struct input_mask {
247#define BUS_ATARI 0x1B 247#define BUS_ATARI 0x1B
248#define BUS_SPI 0x1C 248#define BUS_SPI 0x1C
249#define BUS_RMI 0x1D 249#define BUS_RMI 0x1D
250#define BUS_CEC 0x1E
250 251
251/* 252/*
252 * MT_TOOL types 253 * MT_TOOL types
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 1d973d2ba417..cd26d7a0fd07 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -33,6 +33,7 @@ header-y += xt_NFLOG.h
33header-y += xt_NFQUEUE.h 33header-y += xt_NFQUEUE.h
34header-y += xt_RATEEST.h 34header-y += xt_RATEEST.h
35header-y += xt_SECMARK.h 35header-y += xt_SECMARK.h
36header-y += xt_SYNPROXY.h
36header-y += xt_TCPMSS.h 37header-y += xt_TCPMSS.h
37header-y += xt_TCPOPTSTRIP.h 38header-y += xt_TCPOPTSTRIP.h
38header-y += xt_TEE.h 39header-y += xt_TEE.h
diff --git a/include/uapi/linux/netfilter/xt_SYNPROXY.h b/include/uapi/linux/netfilter/xt_SYNPROXY.h
index 2d59fbaa93c6..ca67e61d2a61 100644
--- a/include/uapi/linux/netfilter/xt_SYNPROXY.h
+++ b/include/uapi/linux/netfilter/xt_SYNPROXY.h
@@ -1,6 +1,8 @@
1#ifndef _XT_SYNPROXY_H 1#ifndef _XT_SYNPROXY_H
2#define _XT_SYNPROXY_H 2#define _XT_SYNPROXY_H
3 3
4#include <linux/types.h>
5
4#define XT_SYNPROXY_OPT_MSS 0x01 6#define XT_SYNPROXY_OPT_MSS 0x01
5#define XT_SYNPROXY_OPT_WSCALE 0x02 7#define XT_SYNPROXY_OPT_WSCALE 0x02
6#define XT_SYNPROXY_OPT_SACK_PERM 0x04 8#define XT_SYNPROXY_OPT_SACK_PERM 0x04
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 36ce552cf6a9..c66a485a24ac 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -276,6 +276,9 @@ enum perf_event_read_format {
276 276
277/* 277/*
278 * Hardware event_id to monitor via a performance monitoring event: 278 * Hardware event_id to monitor via a performance monitoring event:
279 *
280 * @sample_max_stack: Max number of frame pointers in a callchain,
281 * should be < /proc/sys/kernel/perf_event_max_stack
279 */ 282 */
280struct perf_event_attr { 283struct perf_event_attr {
281 284
@@ -385,7 +388,8 @@ struct perf_event_attr {
385 * Wakeup watermark for AUX area 388 * Wakeup watermark for AUX area
386 */ 389 */
387 __u32 aux_watermark; 390 __u32 aux_watermark;
388 __u32 __reserved_2; /* align to __u64 */ 391 __u16 sample_max_stack;
392 __u16 __reserved_2; /* align to __u64 */
389}; 393};
390 394
391#define perf_flags(attr) (*(&(attr)->read_format + 1)) 395#define perf_flags(attr) (*(&(attr)->read_format + 1))
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index eba5914ba5d1..f4297c8a42fe 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -145,6 +145,8 @@ enum {
145 TCA_POLICE_PEAKRATE, 145 TCA_POLICE_PEAKRATE,
146 TCA_POLICE_AVRATE, 146 TCA_POLICE_AVRATE,
147 TCA_POLICE_RESULT, 147 TCA_POLICE_RESULT,
148 TCA_POLICE_TM,
149 TCA_POLICE_PAD,
148 __TCA_POLICE_MAX 150 __TCA_POLICE_MAX
149#define TCA_POLICE_RESULT TCA_POLICE_RESULT 151#define TCA_POLICE_RESULT TCA_POLICE_RESULT
150}; 152};
@@ -173,7 +175,7 @@ enum {
173 TCA_U32_DIVISOR, 175 TCA_U32_DIVISOR,
174 TCA_U32_SEL, 176 TCA_U32_SEL,
175 TCA_U32_POLICE, 177 TCA_U32_POLICE,
176 TCA_U32_ACT, 178 TCA_U32_ACT,
177 TCA_U32_INDEV, 179 TCA_U32_INDEV,
178 TCA_U32_PCNT, 180 TCA_U32_PCNT,
179 TCA_U32_MARK, 181 TCA_U32_MARK,
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index a7f27704f980..691984cb0b91 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -1,5 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += asequencer.h 2header-y += asequencer.h
3header-y += asoc.h
3header-y += asound.h 4header-y += asound.h
4header-y += asound_fm.h 5header-y += asound_fm.h
5header-y += compress_offload.h 6header-y += compress_offload.h
@@ -10,3 +11,5 @@ header-y += hdsp.h
10header-y += hdspm.h 11header-y += hdspm.h
11header-y += sb16_csp.h 12header-y += sb16_csp.h
12header-y += sfnt_info.h 13header-y += sfnt_info.h
14header-y += tlv.h
15header-y += usb_stream.h