summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/crypto/internal/hash.h8
-rw-r--r--include/drm/drm_connector.h10
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_mode_config.h18
-rw-r--r--include/linux/compiler.h47
-rw-r--r--include/linux/completion.h45
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/intel-pti.h (renamed from include/linux/pti.h)6
-rw-r--r--include/linux/lockdep.h125
-rw-r--r--include/linux/oom.h9
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/ptr_ring.h9
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/rwlock_types.h3
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/spinlock_types.h3
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/tee_drv.h12
-rw-r--r--include/linux/trace.h2
-rw-r--r--include/net/gue.h18
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/trace/events/preemptirq.h11
-rw-r--r--include/uapi/linux/pkt_sched.h1
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/tee.h7
30 files changed, 122 insertions, 253 deletions
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index f0b44c16e88f..c2bae8da642c 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
82 struct ahash_instance *inst); 82 struct ahash_instance *inst);
83void ahash_free_instance(struct crypto_instance *inst); 83void ahash_free_instance(struct crypto_instance *inst);
84 84
85int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
86 unsigned int keylen);
87
88static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
89{
90 return alg->setkey != shash_no_setkey;
91}
92
85int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, 93int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
86 struct hash_alg_common *alg, 94 struct hash_alg_common *alg,
87 struct crypto_instance *inst); 95 struct crypto_instance *inst);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index a4649c56ca2f..5971577016a2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -24,6 +24,7 @@
24#define __DRM_CONNECTOR_H__ 24#define __DRM_CONNECTOR_H__
25 25
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/llist.h>
27#include <linux/ctype.h> 28#include <linux/ctype.h>
28#include <linux/hdmi.h> 29#include <linux/hdmi.h>
29#include <drm/drm_mode_object.h> 30#include <drm/drm_mode_object.h>
@@ -918,12 +919,13 @@ struct drm_connector {
918 uint16_t tile_h_size, tile_v_size; 919 uint16_t tile_h_size, tile_v_size;
919 920
920 /** 921 /**
921 * @free_work: 922 * @free_node:
922 * 923 *
923 * Work used only by &drm_connector_iter to be able to clean up a 924 * List used only by &drm_connector_iter to be able to clean up a
924 * connector from any context. 925 * connector from any context, in conjunction with
926 * &drm_mode_config.connector_free_work.
925 */ 927 */
926 struct work_struct free_work; 928 struct llist_node free_node;
927}; 929};
928 930
929#define obj_to_connector(x) container_of(x, struct drm_connector, base) 931#define obj_to_connector(x) container_of(x, struct drm_connector, base)
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 2ec41d032e56..efe6d5a8e834 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -465,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector,
465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, 465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
466 struct i2c_adapter *adapter); 466 struct i2c_adapter *adapter);
467struct edid *drm_edid_duplicate(const struct edid *edid); 467struct edid *drm_edid_duplicate(const struct edid *edid);
468void drm_reset_display_info(struct drm_connector *connector);
469u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
468int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 470int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
469 471
470u8 drm_match_cea_mode(const struct drm_display_mode *to_match); 472u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index b21e827c5c78..b0ce26d71296 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/idr.h> 28#include <linux/idr.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/llist.h>
30 31
31#include <drm/drm_modeset_lock.h> 32#include <drm/drm_modeset_lock.h>
32 33
@@ -393,7 +394,7 @@ struct drm_mode_config {
393 394
394 /** 395 /**
395 * @connector_list_lock: Protects @num_connector and 396 * @connector_list_lock: Protects @num_connector and
396 * @connector_list. 397 * @connector_list and @connector_free_list.
397 */ 398 */
398 spinlock_t connector_list_lock; 399 spinlock_t connector_list_lock;
399 /** 400 /**
@@ -414,6 +415,21 @@ struct drm_mode_config {
414 */ 415 */
415 struct list_head connector_list; 416 struct list_head connector_list;
416 /** 417 /**
418 * @connector_free_list:
419 *
420 * List of connector objects linked with &drm_connector.free_head.
421 * Protected by @connector_list_lock. Used by
422 * drm_for_each_connector_iter() and
423 * &struct drm_connector_list_iter to savely free connectors using
424 * @connector_free_work.
425 */
426 struct llist_head connector_free_list;
427 /**
428 * @connector_free_work: Work to clean up @connector_free_list.
429 */
430 struct work_struct connector_free_work;
431
432 /**
417 * @num_encoder: 433 * @num_encoder:
418 * 434 *
419 * Number of encoders on this device. This is invariant over the 435 * Number of encoders on this device. This is invariant over the
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 188ed9f65517..52e611ab9a6c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
220/* 220/*
221 * Prevent the compiler from merging or refetching reads or writes. The 221 * Prevent the compiler from merging or refetching reads or writes. The
222 * compiler is also forbidden from reordering successive instances of 222 * compiler is also forbidden from reordering successive instances of
223 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 223 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
224 * compiler is aware of some particular ordering. One way to make the 224 * particular ordering. One way to make the compiler aware of ordering is to
225 * compiler aware of ordering is to put the two invocations of READ_ONCE, 225 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
226 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 226 * statements.
227 * 227 *
228 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 228 * These two macros will also work on aggregate data types like structs or
229 * data types like structs or unions. If the size of the accessed data 229 * unions. If the size of the accessed data type exceeds the word size of
230 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 230 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
231 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at 231 * fall back to memcpy(). There's at least two memcpy()s: one for the
232 * least two memcpy()s: one for the __builtin_memcpy() and then one for 232 * __builtin_memcpy() and then one for the macro doing the copy of variable
233 * the macro doing the copy of variable - '__u' allocated on the stack. 233 * - '__u' allocated on the stack.
234 * 234 *
235 * Their two major use cases are: (1) Mediating communication between 235 * Their two major use cases are: (1) Mediating communication between
236 * process-level code and irq/NMI handlers, all running on the same CPU, 236 * process-level code and irq/NMI handlers, all running on the same CPU,
237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
238 * mutilate accesses that either do not require ordering or that interact 238 * mutilate accesses that either do not require ordering or that interact
239 * with an explicit memory barrier or atomic instruction that provides the 239 * with an explicit memory barrier or atomic instruction that provides the
240 * required ordering. 240 * required ordering.
@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
327 compiletime_assert(__native_word(t), \ 327 compiletime_assert(__native_word(t), \
328 "Need native word sized stores/loads for atomicity.") 328 "Need native word sized stores/loads for atomicity.")
329 329
330/*
331 * Prevent the compiler from merging or refetching accesses. The compiler
332 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
333 * but only when the compiler is aware of some particular ordering. One way
334 * to make the compiler aware of ordering is to put the two invocations of
335 * ACCESS_ONCE() in different C statements.
336 *
337 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
338 * on a union member will work as long as the size of the member matches the
339 * size of the union and the size is smaller than word size.
340 *
341 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
342 * between process-level code and irq/NMI handlers, all running on the same CPU,
343 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
344 * mutilate accesses that either do not require ordering or that interact
345 * with an explicit memory barrier or atomic instruction that provides the
346 * required ordering.
347 *
348 * If possible use READ_ONCE()/WRITE_ONCE() instead.
349 */
350#define __ACCESS_ONCE(x) ({ \
351 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
352 (volatile typeof(x) *)&(x); })
353#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
354
355#endif /* __LINUX_COMPILER_H */ 330#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 0662a417febe..94a59ba7d422 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,9 +10,6 @@
10 */ 10 */
11 11
12#include <linux/wait.h> 12#include <linux/wait.h>
13#ifdef CONFIG_LOCKDEP_COMPLETIONS
14#include <linux/lockdep.h>
15#endif
16 13
17/* 14/*
18 * struct completion - structure used to maintain state for a "completion" 15 * struct completion - structure used to maintain state for a "completion"
@@ -29,58 +26,16 @@
29struct completion { 26struct completion {
30 unsigned int done; 27 unsigned int done;
31 wait_queue_head_t wait; 28 wait_queue_head_t wait;
32#ifdef CONFIG_LOCKDEP_COMPLETIONS
33 struct lockdep_map_cross map;
34#endif
35}; 29};
36 30
37#ifdef CONFIG_LOCKDEP_COMPLETIONS
38static inline void complete_acquire(struct completion *x)
39{
40 lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
41}
42
43static inline void complete_release(struct completion *x)
44{
45 lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
46}
47
48static inline void complete_release_commit(struct completion *x)
49{
50 lock_commit_crosslock((struct lockdep_map *)&x->map);
51}
52
53#define init_completion_map(x, m) \
54do { \
55 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
56 (m)->name, (m)->key, 0); \
57 __init_completion(x); \
58} while (0)
59
60#define init_completion(x) \
61do { \
62 static struct lock_class_key __key; \
63 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
64 "(completion)" #x, \
65 &__key, 0); \
66 __init_completion(x); \
67} while (0)
68#else
69#define init_completion_map(x, m) __init_completion(x) 31#define init_completion_map(x, m) __init_completion(x)
70#define init_completion(x) __init_completion(x) 32#define init_completion(x) __init_completion(x)
71static inline void complete_acquire(struct completion *x) {} 33static inline void complete_acquire(struct completion *x) {}
72static inline void complete_release(struct completion *x) {} 34static inline void complete_release(struct completion *x) {}
73static inline void complete_release_commit(struct completion *x) {} 35static inline void complete_release_commit(struct completion *x) {}
74#endif
75 36
76#ifdef CONFIG_LOCKDEP_COMPLETIONS
77#define COMPLETION_INITIALIZER(work) \
78 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
79 STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
80#else
81#define COMPLETION_INITIALIZER(work) \ 37#define COMPLETION_INITIALIZER(work) \
82 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
83#endif
84 39
85#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ 40#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
86 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 (*({ init_completion_map(&(work), &(map)); &(work); }))
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 099058e1178b..631286535d0f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
83extern void set_groups(struct cred *, struct group_info *); 83extern void set_groups(struct cred *, struct group_info *);
84extern int groups_search(const struct group_info *, kgid_t); 84extern int groups_search(const struct group_info *, kgid_t);
85extern bool may_setgroups(void); 85extern bool may_setgroups(void);
86extern void groups_sort(struct group_info *);
86 87
87/* 88/*
88 * The security context of a task 89 * The security context of a task
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 7c3a365f7e12..fa14f834e4ed 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,6 +15,7 @@
15#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/bug.h>
18 19
19struct idr { 20struct idr {
20 struct radix_tree_root idr_rt; 21 struct radix_tree_root idr_rt;
diff --git a/include/linux/pti.h b/include/linux/intel-pti.h
index b3ea01a3197e..2710d72de3c9 100644
--- a/include/linux/pti.h
+++ b/include/linux/intel-pti.h
@@ -22,8 +22,8 @@
22 * interface to write out it's contents for debugging a mobile system. 22 * interface to write out it's contents for debugging a mobile system.
23 */ 23 */
24 24
25#ifndef PTI_H_ 25#ifndef LINUX_INTEL_PTI_H_
26#define PTI_H_ 26#define LINUX_INTEL_PTI_H_
27 27
28/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ 28/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
29#define PTI_LASTDWORD_DTS 0x30 29#define PTI_LASTDWORD_DTS 0x30
@@ -40,4 +40,4 @@ struct pti_masterchannel *pti_request_masterchannel(u8 type,
40 const char *thread_name); 40 const char *thread_name);
41void pti_release_masterchannel(struct pti_masterchannel *mc); 41void pti_release_masterchannel(struct pti_masterchannel *mc);
42 42
43#endif /*PTI_H_*/ 43#endif /* LINUX_INTEL_PTI_H_ */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index a842551fe044..2e75dc34bff5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -158,12 +158,6 @@ struct lockdep_map {
158 int cpu; 158 int cpu;
159 unsigned long ip; 159 unsigned long ip;
160#endif 160#endif
161#ifdef CONFIG_LOCKDEP_CROSSRELEASE
162 /*
163 * Whether it's a crosslock.
164 */
165 int cross;
166#endif
167}; 161};
168 162
169static inline void lockdep_copy_map(struct lockdep_map *to, 163static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,96 +261,9 @@ struct held_lock {
267 unsigned int hardirqs_off:1; 261 unsigned int hardirqs_off:1;
268 unsigned int references:12; /* 32 bits */ 262 unsigned int references:12; /* 32 bits */
269 unsigned int pin_count; 263 unsigned int pin_count;
270#ifdef CONFIG_LOCKDEP_CROSSRELEASE
271 /*
272 * Generation id.
273 *
274 * A value of cross_gen_id will be stored when holding this,
275 * which is globally increased whenever each crosslock is held.
276 */
277 unsigned int gen_id;
278#endif
279};
280
281#ifdef CONFIG_LOCKDEP_CROSSRELEASE
282#define MAX_XHLOCK_TRACE_ENTRIES 5
283
284/*
285 * This is for keeping locks waiting for commit so that true dependencies
286 * can be added at commit step.
287 */
288struct hist_lock {
289 /*
290 * Id for each entry in the ring buffer. This is used to
291 * decide whether the ring buffer was overwritten or not.
292 *
293 * For example,
294 *
295 * |<----------- hist_lock ring buffer size ------->|
296 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
297 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
298 *
299 * where 'p' represents an acquisition in process
300 * context, 'i' represents an acquisition in irq
301 * context.
302 *
303 * In this example, the ring buffer was overwritten by
304 * acquisitions in irq context, that should be detected on
305 * rollback or commit.
306 */
307 unsigned int hist_id;
308
309 /*
310 * Seperate stack_trace data. This will be used at commit step.
311 */
312 struct stack_trace trace;
313 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
314
315 /*
316 * Seperate hlock instance. This will be used at commit step.
317 *
318 * TODO: Use a smaller data structure containing only necessary
319 * data. However, we should make lockdep code able to handle the
320 * smaller one first.
321 */
322 struct held_lock hlock;
323}; 264};
324 265
325/* 266/*
326 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
327 * be called instead of lockdep_init_map().
328 */
329struct cross_lock {
330 /*
331 * When more than one acquisition of crosslocks are overlapped,
332 * we have to perform commit for them based on cross_gen_id of
333 * the first acquisition, which allows us to add more true
334 * dependencies.
335 *
336 * Moreover, when no acquisition of a crosslock is in progress,
337 * we should not perform commit because the lock might not exist
338 * any more, which might cause incorrect memory access. So we
339 * have to track the number of acquisitions of a crosslock.
340 */
341 int nr_acquire;
342
343 /*
344 * Seperate hlock instance. This will be used at commit step.
345 *
346 * TODO: Use a smaller data structure containing only necessary
347 * data. However, we should make lockdep code able to handle the
348 * smaller one first.
349 */
350 struct held_lock hlock;
351};
352
353struct lockdep_map_cross {
354 struct lockdep_map map;
355 struct cross_lock xlock;
356};
357#endif
358
359/*
360 * Initialization, self-test and debugging-output methods: 267 * Initialization, self-test and debugging-output methods:
361 */ 268 */
362extern void lockdep_info(void); 269extern void lockdep_info(void);
@@ -560,37 +467,6 @@ enum xhlock_context_t {
560 XHLOCK_CTX_NR, 467 XHLOCK_CTX_NR,
561}; 468};
562 469
563#ifdef CONFIG_LOCKDEP_CROSSRELEASE
564extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
565 const char *name,
566 struct lock_class_key *key,
567 int subclass);
568extern void lock_commit_crosslock(struct lockdep_map *lock);
569
570/*
571 * What we essencially have to initialize is 'nr_acquire'. Other members
572 * will be initialized in add_xlock().
573 */
574#define STATIC_CROSS_LOCK_INIT() \
575 { .nr_acquire = 0,}
576
577#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
578 { .map.name = (_name), .map.key = (void *)(_key), \
579 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
580
581/*
582 * To initialize a lockdep_map statically use this macro.
583 * Note that _name must not be NULL.
584 */
585#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
586 { .name = (_name), .key = (void *)(_key), .cross = 0, }
587
588extern void crossrelease_hist_start(enum xhlock_context_t c);
589extern void crossrelease_hist_end(enum xhlock_context_t c);
590extern void lockdep_invariant_state(bool force);
591extern void lockdep_init_task(struct task_struct *task);
592extern void lockdep_free_task(struct task_struct *task);
593#else /* !CROSSRELEASE */
594#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 470#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
595/* 471/*
596 * To initialize a lockdep_map statically use this macro. 472 * To initialize a lockdep_map statically use this macro.
@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
604static inline void lockdep_invariant_state(bool force) {} 480static inline void lockdep_invariant_state(bool force) {}
605static inline void lockdep_init_task(struct task_struct *task) {} 481static inline void lockdep_init_task(struct task_struct *task) {}
606static inline void lockdep_free_task(struct task_struct *task) {} 482static inline void lockdep_free_task(struct task_struct *task) {}
607#endif /* CROSSRELEASE */
608 483
609#ifdef CONFIG_LOCK_STAT 484#ifdef CONFIG_LOCK_STAT
610 485
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 01c91d874a57..5bad038ac012 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
67} 67}
68 68
69/* 69/*
70 * Use this helper if tsk->mm != mm and the victim mm needs a special
71 * handling. This is guaranteed to stay true after once set.
72 */
73static inline bool mm_is_oom_victim(struct mm_struct *mm)
74{
75 return test_bit(MMF_OOM_VICTIM, &mm->flags);
76}
77
78/*
70 * Checks whether a page fault on the given mm is still reliable. 79 * Checks whether a page fault on the given mm is still reliable.
71 * This is no longer true if the oom reaper started to reap the 80 * This is no longer true if the oom reaper started to reap the
72 * address space which is reflected by MMF_UNSTABLE flag set in 81 * address space which is reflected by MMF_UNSTABLE flag set in
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0403894147a3..c170c9250c8b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1674static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, 1674static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1675 unsigned int devfn) 1675 unsigned int devfn)
1676{ return NULL; } 1676{ return NULL; }
1677static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1678 unsigned int bus, unsigned int devfn)
1679{ return NULL; }
1677 1680
1678static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1681static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1679static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1682static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 65d39115f06d..492ed473ba7e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev);
765extern int pm_generic_poweroff(struct device *dev); 765extern int pm_generic_poweroff(struct device *dev);
766extern void pm_generic_complete(struct device *dev); 766extern void pm_generic_complete(struct device *dev);
767 767
768extern void dev_pm_skip_next_resume_phases(struct device *dev);
768extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); 769extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
769 770
770#else /* !CONFIG_PM_SLEEP */ 771#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 37b4bb2545b3..6866df4f31b5 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
101 101
102/* Note: callers invoking this in a loop must use a compiler barrier, 102/* Note: callers invoking this in a loop must use a compiler barrier,
103 * for example cpu_relax(). Callers must hold producer_lock. 103 * for example cpu_relax(). Callers must hold producer_lock.
104 * Callers are responsible for making sure pointer that is being queued
105 * points to a valid data.
104 */ 106 */
105static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) 107static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
106{ 108{
107 if (unlikely(!r->size) || r->queue[r->producer]) 109 if (unlikely(!r->size) || r->queue[r->producer])
108 return -ENOSPC; 110 return -ENOSPC;
109 111
112 /* Make sure the pointer we are storing points to a valid data. */
113 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
114 smp_wmb();
115
110 r->queue[r->producer++] = ptr; 116 r->queue[r->producer++] = ptr;
111 if (unlikely(r->producer >= r->size)) 117 if (unlikely(r->producer >= r->size))
112 r->producer = 0; 118 r->producer = 0;
@@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
275 if (ptr) 281 if (ptr)
276 __ptr_ring_discard_one(r); 282 __ptr_ring_discard_one(r);
277 283
284 /* Make sure anyone accessing data through the pointer is up to date. */
285 /* Pairs with smp_wmb in __ptr_ring_produce. */
286 smp_read_barrier_depends();
278 return ptr; 287 return ptr;
279} 288}
280 289
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index d574361943ea..fcbeed4053ef 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
99 struct rb_root *root); 99 struct rb_root *root);
100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, 100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
101 struct rb_root *root); 101 struct rb_root *root);
102extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
103 struct rb_root_cached *root);
102 104
103static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, 105static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
104 struct rb_node **rb_link) 106 struct rb_node **rb_link)
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index cc0072e93e36..857a72ceb794 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -10,9 +10,6 @@
10 */ 10 */
11typedef struct { 11typedef struct {
12 arch_rwlock_t raw_lock; 12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK 13#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu; 14 unsigned int magic, owner_cpu;
18 void *owner; 15 void *owner;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21991d668d35..d2588263a989 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -849,17 +849,6 @@ struct task_struct {
849 struct held_lock held_locks[MAX_LOCK_DEPTH]; 849 struct held_lock held_locks[MAX_LOCK_DEPTH];
850#endif 850#endif
851 851
852#ifdef CONFIG_LOCKDEP_CROSSRELEASE
853#define MAX_XHLOCKS_NR 64UL
854 struct hist_lock *xhlocks; /* Crossrelease history locks */
855 unsigned int xhlock_idx;
856 /* For restoring at history boundaries */
857 unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
858 unsigned int hist_id;
859 /* For overwrite check at each context exit */
860 unsigned int hist_id_save[XHLOCK_CTX_NR];
861#endif
862
863#ifdef CONFIG_UBSAN 852#ifdef CONFIG_UBSAN
864 unsigned int in_ubsan; 853 unsigned int in_ubsan;
865#endif 854#endif
@@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
1503 __set_task_comm(tsk, from, false); 1492 __set_task_comm(tsk, from, false);
1504} 1493}
1505 1494
1506extern char *get_task_comm(char *to, struct task_struct *tsk); 1495extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1496#define get_task_comm(buf, tsk) ({ \
1497 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1498 __get_task_comm(buf, sizeof(buf), tsk); \
1499})
1507 1500
1508#ifdef CONFIG_SMP 1501#ifdef CONFIG_SMP
1509void scheduler_ipi(void); 1502void scheduler_ipi(void);
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 9c8847395b5e..ec912d01126f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ 70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
73#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 74#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
74 75
75#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ 76#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a39186194cd6..3bf273538840 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -107,16 +107,11 @@ do { \
107 107
108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
109 109
110#ifdef CONFIG_GENERIC_LOCKBREAK
111#define raw_spin_is_contended(lock) ((lock)->break_lock)
112#else
113
114#ifdef arch_spin_is_contended 110#ifdef arch_spin_is_contended
115#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 111#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
116#else 112#else
117#define raw_spin_is_contended(lock) (((void)(lock), 0)) 113#define raw_spin_is_contended(lock) (((void)(lock), 0))
118#endif /*arch_spin_is_contended*/ 114#endif /*arch_spin_is_contended*/
119#endif
120 115
121/* 116/*
122 * This barrier must provide two things: 117 * This barrier must provide two things:
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb13a5d..24b4e6f2c1a2 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -19,9 +19,6 @@
19 19
20typedef struct raw_spinlock { 20typedef struct raw_spinlock {
21 arch_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock;
24#endif
25#ifdef CONFIG_DEBUG_SPINLOCK 22#ifdef CONFIG_DEBUG_SPINLOCK
26 unsigned int magic, owner_cpu; 23 unsigned int magic, owner_cpu;
27 void *owner; 24 void *owner;
diff --git a/include/linux/string.h b/include/linux/string.h
index 410ecf17de3c..cfd83eb2f926 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
259{ 259{
260 __kernel_size_t ret; 260 __kernel_size_t ret;
261 size_t p_size = __builtin_object_size(p, 0); 261 size_t p_size = __builtin_object_size(p, 0);
262 if (p_size == (size_t)-1) 262
263 /* Work around gcc excess stack consumption issue */
264 if (p_size == (size_t)-1 ||
265 (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
263 return __builtin_strlen(p); 266 return __builtin_strlen(p);
264 ret = strnlen(p, p_size); 267 ret = strnlen(p, p_size);
265 if (p_size <= ret) 268 if (p_size <= ret)
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index a1d7f467657c..41bd4bded28c 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -452,4 +452,16 @@ static inline int tee_shm_get_id(struct tee_shm *shm)
452 */ 452 */
453struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); 453struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
454 454
455static inline bool tee_param_is_memref(struct tee_param *param)
456{
457 switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
458 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
459 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
460 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
461 return true;
462 default:
463 return false;
464 }
465}
466
455#endif /*__TEE_DRV_H*/ 467#endif /*__TEE_DRV_H*/
diff --git a/include/linux/trace.h b/include/linux/trace.h
index d24991c1fef3..b95ffb2188ab 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -18,7 +18,7 @@
18 */ 18 */
19struct trace_export { 19struct trace_export {
20 struct trace_export __rcu *next; 20 struct trace_export __rcu *next;
21 void (*write)(const void *, unsigned int); 21 void (*write)(struct trace_export *, const void *, unsigned int);
22}; 22};
23 23
24int register_ftrace_export(struct trace_export *export); 24int register_ftrace_export(struct trace_export *export);
diff --git a/include/net/gue.h b/include/net/gue.h
index 2fdb29ca74c2..fdad41469b65 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -44,10 +44,10 @@ struct guehdr {
44#else 44#else
45#error "Please fix <asm/byteorder.h>" 45#error "Please fix <asm/byteorder.h>"
46#endif 46#endif
47 __u8 proto_ctype; 47 __u8 proto_ctype;
48 __u16 flags; 48 __be16 flags;
49 }; 49 };
50 __u32 word; 50 __be32 word;
51 }; 51 };
52}; 52};
53 53
@@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags)
84 * if there is an unknown standard or private flags, or the options length for 84 * if there is an unknown standard or private flags, or the options length for
85 * the flags exceeds the options length specific in hlen of the GUE header. 85 * the flags exceeds the options length specific in hlen of the GUE header.
86 */ 86 */
87static inline int validate_gue_flags(struct guehdr *guehdr, 87static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen)
88 size_t optlen)
89{ 88{
89 __be16 flags = guehdr->flags;
90 size_t len; 90 size_t len;
91 __be32 flags = guehdr->flags;
92 91
93 if (flags & ~GUE_FLAGS_ALL) 92 if (flags & ~GUE_FLAGS_ALL)
94 return 1; 93 return 1;
@@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr,
101 /* Private flags are last four bytes accounted in 100 /* Private flags are last four bytes accounted in
102 * guehdr_flags_len 101 * guehdr_flags_len
103 */ 102 */
104 flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); 103 __be32 pflags = *(__be32 *)((void *)&guehdr[1] +
104 len - GUE_LEN_PRIV);
105 105
106 if (flags & ~GUE_PFLAGS_ALL) 106 if (pflags & ~GUE_PFLAGS_ALL)
107 return 1; 107 return 1;
108 108
109 len += guehdr_priv_flags_len(flags); 109 len += guehdr_priv_flags_len(pflags);
110 if (len > optlen) 110 if (len > optlen)
111 return 1; 111 return 1;
112 } 112 }
diff --git a/include/net/ip.h b/include/net/ip.h
index 9896f46cbbf1..af8addbaa3c1 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -34,6 +34,7 @@
34#include <net/flow_dissector.h> 34#include <net/flow_dissector.h>
35 35
36#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 36#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
37#define IPV4_MIN_MTU 68 /* RFC 791 */
37 38
38struct sock; 39struct sock;
39 40
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 65d0d25f2648..83a3e47d5845 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -71,6 +71,7 @@ struct Qdisc {
71 * qdisc_tree_decrease_qlen() should stop. 71 * qdisc_tree_decrease_qlen() should stop.
72 */ 72 */
73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
74#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
74 u32 limit; 75 u32 limit;
75 const struct Qdisc_ops *ops; 76 const struct Qdisc_ops *ops;
76 struct qdisc_size_table __rcu *stab; 77 struct qdisc_size_table __rcu *stab;
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index f5024c560d8f..9c4eb33c5a1d 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
56 56
57#include <trace/define_trace.h> 57#include <trace/define_trace.h>
58 58
59#else /* !CONFIG_PREEMPTIRQ_EVENTS */ 59#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
60 60
61#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
61#define trace_irq_enable(...) 62#define trace_irq_enable(...)
62#define trace_irq_disable(...) 63#define trace_irq_disable(...)
63#define trace_preempt_enable(...)
64#define trace_preempt_disable(...)
65#define trace_irq_enable_rcuidle(...) 64#define trace_irq_enable_rcuidle(...)
66#define trace_irq_disable_rcuidle(...) 65#define trace_irq_disable_rcuidle(...)
66#endif
67
68#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
69#define trace_preempt_enable(...)
70#define trace_preempt_disable(...)
67#define trace_preempt_enable_rcuidle(...) 71#define trace_preempt_enable_rcuidle(...)
68#define trace_preempt_disable_rcuidle(...) 72#define trace_preempt_disable_rcuidle(...)
69
70#endif 73#endif
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index af3cc2f4e1ad..37b5096ae97b 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -256,7 +256,6 @@ struct tc_red_qopt {
256#define TC_RED_ECN 1 256#define TC_RED_ECN 1
257#define TC_RED_HARDDROP 2 257#define TC_RED_HARDDROP 2
258#define TC_RED_ADAPTATIVE 4 258#define TC_RED_ADAPTATIVE 4
259#define TC_RED_OFFLOADED 8
260}; 259};
261 260
262struct tc_red_xstats { 261struct tc_red_xstats {
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index d8b5f80c2ea6..843e29aa3cac 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -557,6 +557,7 @@ enum {
557 TCA_PAD, 557 TCA_PAD,
558 TCA_DUMP_INVISIBLE, 558 TCA_DUMP_INVISIBLE,
559 TCA_CHAIN, 559 TCA_CHAIN,
560 TCA_HW_OFFLOAD,
560 __TCA_MAX 561 __TCA_MAX
561}; 562};
562 563
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index d41a07afe3fc..4b9eb064d7e7 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -155,6 +155,13 @@ struct tee_ioctl_buf_data {
155 */ 155 */
156#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff 156#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
157 157
158/* Meta parameter carrying extra information about the message. */
159#define TEE_IOCTL_PARAM_ATTR_META 0x100
160
161/* Mask of all known attr bits */
162#define TEE_IOCTL_PARAM_ATTR_MASK \
163 (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META)
164
158/* 165/*
159 * Matches TEEC_LOGIN_* in GP TEE Client API 166 * Matches TEEC_LOGIN_* in GP TEE Client API
160 * Are only defined for GP compliant TEEs 167 * Are only defined for GP compliant TEEs