aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorWolfram Sang <wsa@the-dreams.de>2018-01-03 16:50:51 -0500
committerWolfram Sang <wsa@the-dreams.de>2018-01-03 16:50:51 -0500
commitfddfa22a4403cd19548de075ddada0c7c966a232 (patch)
treef981a63ff0c1062aecb5ab48b0585ffe97a7c20b /include/linux
parent639136d2a70ab9837befb22ad5b3d67cb4db2216 (diff)
parent0f30aca72c3b68f4b6a443193b574f14106cd61e (diff)
Merge tag 'at24-4.16-updates-for-wolfram' of git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux into i2c/for-4.16
"AT24 updates for 4.16 merge window The driver has been converted to using regmap instead of raw i2c and smbus calls which shrank the code significantly. Device tree binding document has been cleaned up. Device tree support in the driver has been improved and we now support all at24 models as well as two new DT properties (no-read-rollover and wp-gpios). We no longer user unreadable magic values for driver data as the way it was implemented caused problems for some EEPROM models - we switched to regular structs. Aside from that, there's a bunch of coding style fixes and minor improvements all over the place."
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blk_types.h9
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/compiler.h47
-rw-r--r--include/linux/completion.h45
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/gpio/driver.h33
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h5
-rw-r--r--include/linux/intel-pti.h43
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h17
-rw-r--r--include/linux/irqdesc.h15
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/kmemcheck.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/lockdep.h125
-rw-r--r--include/linux/mfd/rtsx_pci.h2
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h8
-rw-r--r--include/linux/oom.h9
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/platform_data/at24.h2
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pti.h50
-rw-r--r--include/linux/ptr_ring.h9
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/rculist_nulls.h38
-rw-r--r--include/linux/rwlock_types.h3
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/serdev.h2
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/spinlock_types.h3
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/sysfs.h6
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/tick.h1
-rw-r--r--include/linux/timer.h4
-rw-r--r--include/linux/trace.h2
-rw-r--r--include/linux/usb/usbnet.h1
49 files changed, 223 insertions, 359 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 82f0c8fd7be8..23d29b39f71e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
492 492
493#define bio_set_dev(bio, bdev) \ 493#define bio_set_dev(bio, bdev) \
494do { \ 494do { \
495 if ((bio)->bi_disk != (bdev)->bd_disk) \
496 bio_clear_flag(bio, BIO_THROTTLED);\
495 (bio)->bi_disk = (bdev)->bd_disk; \ 497 (bio)->bi_disk = (bdev)->bd_disk; \
496 (bio)->bi_partno = (bdev)->bd_partno; \ 498 (bio)->bi_partno = (bdev)->bd_partno; \
497} while (0) 499} while (0)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a1e628e032da..9e7d8bd776d2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -50,8 +50,6 @@ struct blk_issue_stat {
50struct bio { 50struct bio {
51 struct bio *bi_next; /* request queue link */ 51 struct bio *bi_next; /* request queue link */
52 struct gendisk *bi_disk; 52 struct gendisk *bi_disk;
53 u8 bi_partno;
54 blk_status_t bi_status;
55 unsigned int bi_opf; /* bottom bits req flags, 53 unsigned int bi_opf; /* bottom bits req flags,
56 * top bits REQ_OP. Use 54 * top bits REQ_OP. Use
57 * accessors. 55 * accessors.
@@ -59,8 +57,8 @@ struct bio {
59 unsigned short bi_flags; /* status, etc and bvec pool number */ 57 unsigned short bi_flags; /* status, etc and bvec pool number */
60 unsigned short bi_ioprio; 58 unsigned short bi_ioprio;
61 unsigned short bi_write_hint; 59 unsigned short bi_write_hint;
62 60 blk_status_t bi_status;
63 struct bvec_iter bi_iter; 61 u8 bi_partno;
64 62
65 /* Number of segments in this BIO after 63 /* Number of segments in this BIO after
66 * physical address coalescing is performed. 64 * physical address coalescing is performed.
@@ -74,8 +72,9 @@ struct bio {
74 unsigned int bi_seg_front_size; 72 unsigned int bi_seg_front_size;
75 unsigned int bi_seg_back_size; 73 unsigned int bi_seg_back_size;
76 74
77 atomic_t __bi_remaining; 75 struct bvec_iter bi_iter;
78 76
77 atomic_t __bi_remaining;
79 bio_end_io_t *bi_end_io; 78 bio_end_io_t *bi_end_io;
80 79
81 void *bi_private; 80 void *bi_private;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8089ca17db9a..0ce8a372d506 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
135struct request { 135struct request {
136 struct list_head queuelist; 136 struct list_head queuelist;
137 union { 137 union {
138 call_single_data_t csd; 138 struct __call_single_data csd;
139 u64 fifo_time; 139 u64 fifo_time;
140 }; 140 };
141 141
@@ -241,14 +241,24 @@ struct request {
241 struct request *next_rq; 241 struct request *next_rq;
242}; 242};
243 243
244static inline bool blk_op_is_scsi(unsigned int op)
245{
246 return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
247}
248
249static inline bool blk_op_is_private(unsigned int op)
250{
251 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
252}
253
244static inline bool blk_rq_is_scsi(struct request *rq) 254static inline bool blk_rq_is_scsi(struct request *rq)
245{ 255{
246 return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; 256 return blk_op_is_scsi(req_op(rq));
247} 257}
248 258
249static inline bool blk_rq_is_private(struct request *rq) 259static inline bool blk_rq_is_private(struct request *rq)
250{ 260{
251 return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; 261 return blk_op_is_private(req_op(rq));
252} 262}
253 263
254static inline bool blk_rq_is_passthrough(struct request *rq) 264static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
256 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 266 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
257} 267}
258 268
269static inline bool bio_is_passthrough(struct bio *bio)
270{
271 unsigned op = bio_op(bio);
272
273 return blk_op_is_scsi(op) || blk_op_is_private(op);
274}
275
259static inline unsigned short req_get_ioprio(struct request *req) 276static inline unsigned short req_get_ioprio(struct request *req)
260{ 277{
261 return req->ioprio; 278 return req->ioprio;
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
948extern void blk_rq_unprep_clone(struct request *rq); 965extern void blk_rq_unprep_clone(struct request *rq);
949extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 966extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
950 struct request *rq); 967 struct request *rq);
951extern int blk_rq_append_bio(struct request *rq, struct bio *bio); 968extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
952extern void blk_delay_queue(struct request_queue *, unsigned long); 969extern void blk_delay_queue(struct request_queue *, unsigned long);
953extern void blk_queue_split(struct request_queue *, struct bio **); 970extern void blk_queue_split(struct request_queue *, struct bio **);
954extern void blk_recount_segments(struct request_queue *, struct bio *); 971extern void blk_recount_segments(struct request_queue *, struct bio *);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c561b986bab0..1632bb13ad8a 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -15,11 +15,11 @@
15 * In practice this is far bigger than any realistic pointer offset; this limit 15 * In practice this is far bigger than any realistic pointer offset; this limit
16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
17 */ 17 */
18#define BPF_MAX_VAR_OFF (1ULL << 31) 18#define BPF_MAX_VAR_OFF (1 << 29)
19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
20 * that converting umax_value to int cannot overflow. 20 * that converting umax_value to int cannot overflow.
21 */ 21 */
22#define BPF_MAX_VAR_SIZ INT_MAX 22#define BPF_MAX_VAR_SIZ (1 << 29)
23 23
24/* Liveness marks, used for registers and spilled-regs (in stack slots). 24/* Liveness marks, used for registers and spilled-regs (in stack slots).
25 * Read marks propagate upwards until they find a write mark; they record that 25 * Read marks propagate upwards until they find a write mark; they record that
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 188ed9f65517..52e611ab9a6c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
220/* 220/*
221 * Prevent the compiler from merging or refetching reads or writes. The 221 * Prevent the compiler from merging or refetching reads or writes. The
222 * compiler is also forbidden from reordering successive instances of 222 * compiler is also forbidden from reordering successive instances of
223 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 223 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
224 * compiler is aware of some particular ordering. One way to make the 224 * particular ordering. One way to make the compiler aware of ordering is to
225 * compiler aware of ordering is to put the two invocations of READ_ONCE, 225 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
226 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 226 * statements.
227 * 227 *
228 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 228 * These two macros will also work on aggregate data types like structs or
229 * data types like structs or unions. If the size of the accessed data 229 * unions. If the size of the accessed data type exceeds the word size of
230 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 230 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
231 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at 231 * fall back to memcpy(). There's at least two memcpy()s: one for the
232 * least two memcpy()s: one for the __builtin_memcpy() and then one for 232 * __builtin_memcpy() and then one for the macro doing the copy of variable
233 * the macro doing the copy of variable - '__u' allocated on the stack. 233 * - '__u' allocated on the stack.
234 * 234 *
235 * Their two major use cases are: (1) Mediating communication between 235 * Their two major use cases are: (1) Mediating communication between
236 * process-level code and irq/NMI handlers, all running on the same CPU, 236 * process-level code and irq/NMI handlers, all running on the same CPU,
237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
238 * mutilate accesses that either do not require ordering or that interact 238 * mutilate accesses that either do not require ordering or that interact
239 * with an explicit memory barrier or atomic instruction that provides the 239 * with an explicit memory barrier or atomic instruction that provides the
240 * required ordering. 240 * required ordering.
@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
327 compiletime_assert(__native_word(t), \ 327 compiletime_assert(__native_word(t), \
328 "Need native word sized stores/loads for atomicity.") 328 "Need native word sized stores/loads for atomicity.")
329 329
330/*
331 * Prevent the compiler from merging or refetching accesses. The compiler
332 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
333 * but only when the compiler is aware of some particular ordering. One way
334 * to make the compiler aware of ordering is to put the two invocations of
335 * ACCESS_ONCE() in different C statements.
336 *
337 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
338 * on a union member will work as long as the size of the member matches the
339 * size of the union and the size is smaller than word size.
340 *
341 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
342 * between process-level code and irq/NMI handlers, all running on the same CPU,
343 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
344 * mutilate accesses that either do not require ordering or that interact
345 * with an explicit memory barrier or atomic instruction that provides the
346 * required ordering.
347 *
348 * If possible use READ_ONCE()/WRITE_ONCE() instead.
349 */
350#define __ACCESS_ONCE(x) ({ \
351 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
352 (volatile typeof(x) *)&(x); })
353#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
354
355#endif /* __LINUX_COMPILER_H */ 330#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 0662a417febe..94a59ba7d422 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,9 +10,6 @@
10 */ 10 */
11 11
12#include <linux/wait.h> 12#include <linux/wait.h>
13#ifdef CONFIG_LOCKDEP_COMPLETIONS
14#include <linux/lockdep.h>
15#endif
16 13
17/* 14/*
18 * struct completion - structure used to maintain state for a "completion" 15 * struct completion - structure used to maintain state for a "completion"
@@ -29,58 +26,16 @@
29struct completion { 26struct completion {
30 unsigned int done; 27 unsigned int done;
31 wait_queue_head_t wait; 28 wait_queue_head_t wait;
32#ifdef CONFIG_LOCKDEP_COMPLETIONS
33 struct lockdep_map_cross map;
34#endif
35}; 29};
36 30
37#ifdef CONFIG_LOCKDEP_COMPLETIONS
38static inline void complete_acquire(struct completion *x)
39{
40 lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
41}
42
43static inline void complete_release(struct completion *x)
44{
45 lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
46}
47
48static inline void complete_release_commit(struct completion *x)
49{
50 lock_commit_crosslock((struct lockdep_map *)&x->map);
51}
52
53#define init_completion_map(x, m) \
54do { \
55 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
56 (m)->name, (m)->key, 0); \
57 __init_completion(x); \
58} while (0)
59
60#define init_completion(x) \
61do { \
62 static struct lock_class_key __key; \
63 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
64 "(completion)" #x, \
65 &__key, 0); \
66 __init_completion(x); \
67} while (0)
68#else
69#define init_completion_map(x, m) __init_completion(x) 31#define init_completion_map(x, m) __init_completion(x)
70#define init_completion(x) __init_completion(x) 32#define init_completion(x) __init_completion(x)
71static inline void complete_acquire(struct completion *x) {} 33static inline void complete_acquire(struct completion *x) {}
72static inline void complete_release(struct completion *x) {} 34static inline void complete_release(struct completion *x) {}
73static inline void complete_release_commit(struct completion *x) {} 35static inline void complete_release_commit(struct completion *x) {}
74#endif
75 36
76#ifdef CONFIG_LOCKDEP_COMPLETIONS
77#define COMPLETION_INITIALIZER(work) \
78 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
79 STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
80#else
81#define COMPLETION_INITIALIZER(work) \ 37#define COMPLETION_INITIALIZER(work) \
82 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
83#endif
84 39
85#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ 40#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
86 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 (*({ init_completion_map(&(work), &(map)); &(work); }))
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 201ab7267986..1a32e558eb11 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -86,7 +86,7 @@ enum cpuhp_state {
86 CPUHP_MM_ZSWP_POOL_PREPARE, 86 CPUHP_MM_ZSWP_POOL_PREPARE,
87 CPUHP_KVM_PPC_BOOK3S_PREPARE, 87 CPUHP_KVM_PPC_BOOK3S_PREPARE,
88 CPUHP_ZCOMP_PREPARE, 88 CPUHP_ZCOMP_PREPARE,
89 CPUHP_TIMERS_DEAD, 89 CPUHP_TIMERS_PREPARE,
90 CPUHP_MIPS_SOC_PREPARE, 90 CPUHP_MIPS_SOC_PREPARE,
91 CPUHP_BP_PREPARE_DYN, 91 CPUHP_BP_PREPARE_DYN,
92 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, 92 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 099058e1178b..631286535d0f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
83extern void set_groups(struct cred *, struct group_info *); 83extern void set_groups(struct cred *, struct group_info *);
84extern int groups_search(const struct group_info *, kgid_t); 84extern int groups_search(const struct group_info *, kgid_t);
85extern bool may_setgroups(void); 85extern bool may_setgroups(void);
86extern void groups_sort(struct group_info *);
86 87
87/* 88/*
88 * The security context of a task 89 * The security context of a task
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index f36ecc2a5712..3b0ba54cc4d5 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -216,6 +216,8 @@ static inline void debugfs_remove(struct dentry *dentry)
216static inline void debugfs_remove_recursive(struct dentry *dentry) 216static inline void debugfs_remove_recursive(struct dentry *dentry)
217{ } 217{ }
218 218
219const struct file_operations *debugfs_real_fops(const struct file *filp);
220
219static inline int debugfs_file_get(struct dentry *dentry) 221static inline int debugfs_file_get(struct dentry *dentry)
220{ 222{
221 return 0; 223 return 0;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e8f8e8fb244d..81ed9b2d84dc 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -704,7 +704,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
704 return ret; 704 return ret;
705} 705}
706 706
707#ifdef CONFIG_HAS_DMA
708static inline int dma_get_cache_alignment(void) 707static inline int dma_get_cache_alignment(void)
709{ 708{
710#ifdef ARCH_DMA_MINALIGN 709#ifdef ARCH_DMA_MINALIGN
@@ -712,7 +711,6 @@ static inline int dma_get_cache_alignment(void)
712#endif 711#endif
713 return 1; 712 return 1;
714} 713}
715#endif
716 714
717/* flags for the coherent memory api */ 715/* flags for the coherent memory api */
718#define DMA_MEMORY_EXCLUSIVE 0x01 716#define DMA_MEMORY_EXCLUSIVE 0x01
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 55e672592fa9..7258cd676df4 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -66,9 +66,10 @@ struct gpio_irq_chip {
66 /** 66 /**
67 * @lock_key: 67 * @lock_key:
68 * 68 *
69 * Per GPIO IRQ chip lockdep class. 69 * Per GPIO IRQ chip lockdep classes.
70 */ 70 */
71 struct lock_class_key *lock_key; 71 struct lock_class_key *lock_key;
72 struct lock_class_key *request_key;
72 73
73 /** 74 /**
74 * @parent_handler: 75 * @parent_handler:
@@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
323 324
324/* add/remove chips */ 325/* add/remove chips */
325extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 326extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
326 struct lock_class_key *lock_key); 327 struct lock_class_key *lock_key,
328 struct lock_class_key *request_key);
327 329
328/** 330/**
329 * gpiochip_add_data() - register a gpio_chip 331 * gpiochip_add_data() - register a gpio_chip
@@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
350 */ 352 */
351#ifdef CONFIG_LOCKDEP 353#ifdef CONFIG_LOCKDEP
352#define gpiochip_add_data(chip, data) ({ \ 354#define gpiochip_add_data(chip, data) ({ \
353 static struct lock_class_key key; \ 355 static struct lock_class_key lock_key; \
354 gpiochip_add_data_with_key(chip, data, &key); \ 356 static struct lock_class_key request_key; \
357 gpiochip_add_data_with_key(chip, data, &lock_key, \
358 &request_key); \
355 }) 359 })
356#else 360#else
357#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL) 361#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
358#endif 362#endif
359 363
360static inline int gpiochip_add(struct gpio_chip *chip) 364static inline int gpiochip_add(struct gpio_chip *chip)
@@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
429 irq_flow_handler_t handler, 433 irq_flow_handler_t handler,
430 unsigned int type, 434 unsigned int type,
431 bool threaded, 435 bool threaded,
432 struct lock_class_key *lock_key); 436 struct lock_class_key *lock_key,
437 struct lock_class_key *request_key);
433 438
434#ifdef CONFIG_LOCKDEP 439#ifdef CONFIG_LOCKDEP
435 440
@@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
445 irq_flow_handler_t handler, 450 irq_flow_handler_t handler,
446 unsigned int type) 451 unsigned int type)
447{ 452{
448 static struct lock_class_key key; 453 static struct lock_class_key lock_key;
454 static struct lock_class_key request_key;
449 455
450 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 456 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
451 handler, type, false, &key); 457 handler, type, false,
458 &lock_key, &request_key);
452} 459}
453 460
454static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 461static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
458 unsigned int type) 465 unsigned int type)
459{ 466{
460 467
461 static struct lock_class_key key; 468 static struct lock_class_key lock_key;
469 static struct lock_class_key request_key;
462 470
463 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 471 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
464 handler, type, true, &key); 472 handler, type, true,
473 &lock_key, &request_key);
465} 474}
466#else 475#else
467static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, 476static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
@@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
471 unsigned int type) 480 unsigned int type)
472{ 481{
473 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 482 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
474 handler, type, false, NULL); 483 handler, type, false, NULL, NULL);
475} 484}
476 485
477static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 486static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
481 unsigned int type) 490 unsigned int type)
482{ 491{
483 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 492 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
484 handler, type, true, NULL); 493 handler, type, true, NULL, NULL);
485} 494}
486#endif /* CONFIG_LOCKDEP */ 495#endif /* CONFIG_LOCKDEP */
487 496
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f3e97c5f94c9..6c9336626592 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -708,6 +708,7 @@ struct vmbus_channel {
708 u8 monitor_bit; 708 u8 monitor_bit;
709 709
710 bool rescind; /* got rescind msg */ 710 bool rescind; /* got rescind msg */
711 struct completion rescind_event;
711 712
712 u32 ringbuffer_gpadlhandle; 713 u32 ringbuffer_gpadlhandle;
713 714
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 7c3a365f7e12..fa14f834e4ed 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,6 +15,7 @@
15#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/bug.h>
18 19
19struct idr { 20struct idr {
20 struct radix_tree_root idr_rt; 21 struct radix_tree_root idr_rt;
diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
index 34d59bfdce2d..464458d20b16 100644
--- a/include/linux/iio/timer/stm32-lptim-trigger.h
+++ b/include/linux/iio/timer/stm32-lptim-trigger.h
@@ -16,11 +16,14 @@
16#define LPTIM2_OUT "lptim2_out" 16#define LPTIM2_OUT "lptim2_out"
17#define LPTIM3_OUT "lptim3_out" 17#define LPTIM3_OUT "lptim3_out"
18 18
19#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) 19#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
20bool is_stm32_lptim_trigger(struct iio_trigger *trig); 20bool is_stm32_lptim_trigger(struct iio_trigger *trig);
21#else 21#else
22static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig) 22static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
23{ 23{
24#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
25 pr_warn_once("stm32 lptim_trigger not linked in\n");
26#endif
24 return false; 27 return false;
25} 28}
26#endif 29#endif
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h
new file mode 100644
index 000000000000..2710d72de3c9
--- /dev/null
+++ b/include/linux/intel-pti.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) Intel 2011
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 *
15 * The PTI (Parallel Trace Interface) driver directs trace data routed from
16 * various parts in the system out through the Intel Penwell PTI port and
17 * out of the mobile device for analysis with a debugging tool
18 * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
19 * compact JTAG, standard.
20 *
21 * This header file will allow other parts of the OS to use the
22 * interface to write out it's contents for debugging a mobile system.
23 */
24
25#ifndef LINUX_INTEL_PTI_H_
26#define LINUX_INTEL_PTI_H_
27
28/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
29#define PTI_LASTDWORD_DTS 0x30
30
31/* basic structure used as a write address to the PTI HW */
32struct pti_masterchannel {
33 u8 master;
34 u8 channel;
35};
36
37/* the following functions are defined in misc/pti.c */
38void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
39struct pti_masterchannel *pti_request_masterchannel(u8 type,
40 const char *thread_name);
41void pti_release_masterchannel(struct pti_masterchannel *mc);
42
43#endif /* LINUX_INTEL_PTI_H_ */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index cb18c6290ca8..8415bf1a9776 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
273 * 100: prefer care-of address 273 * 100: prefer care-of address
274 */ 274 */
275 dontfrag:1, 275 dontfrag:1,
276 autoflowlabel:1; 276 autoflowlabel:1,
277 autoflowlabel_set:1;
277 __u8 min_hopcount; 278 __u8 min_hopcount;
278 __u8 tclass; 279 __u8 tclass;
279 __be32 rcv_flowinfo; 280 __be32 rcv_flowinfo;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e140f69163b6..a0231e96a578 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -212,6 +212,7 @@ struct irq_data {
212 * mask. Applies only to affinity managed irqs. 212 * mask. Applies only to affinity managed irqs.
213 * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target 213 * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
214 * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set 214 * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
215 * IRQD_CAN_RESERVE - Can use reservation mode
215 */ 216 */
216enum { 217enum {
217 IRQD_TRIGGER_MASK = 0xf, 218 IRQD_TRIGGER_MASK = 0xf,
@@ -233,6 +234,7 @@ enum {
233 IRQD_MANAGED_SHUTDOWN = (1 << 23), 234 IRQD_MANAGED_SHUTDOWN = (1 << 23),
234 IRQD_SINGLE_TARGET = (1 << 24), 235 IRQD_SINGLE_TARGET = (1 << 24),
235 IRQD_DEFAULT_TRIGGER_SET = (1 << 25), 236 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
237 IRQD_CAN_RESERVE = (1 << 26),
236}; 238};
237 239
238#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) 240#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
377 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; 379 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
378} 380}
379 381
382static inline void irqd_set_can_reserve(struct irq_data *d)
383{
384 __irqd_to_state(d) |= IRQD_CAN_RESERVE;
385}
386
387static inline void irqd_clr_can_reserve(struct irq_data *d)
388{
389 __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
390}
391
392static inline bool irqd_can_reserve(struct irq_data *d)
393{
394 return __irqd_to_state(d) & IRQD_CAN_RESERVE;
395}
396
380#undef __irqd_to_state 397#undef __irqd_to_state
381 398
382static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 399static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dd418955962b..25b33b664537 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -230,7 +230,7 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
230 data->chip = chip; 230 data->chip = chip;
231} 231}
232 232
233static inline int irq_balancing_disabled(unsigned int irq) 233static inline bool irq_balancing_disabled(unsigned int irq)
234{ 234{
235 struct irq_desc *desc; 235 struct irq_desc *desc;
236 236
@@ -238,7 +238,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
238 return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; 238 return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
239} 239}
240 240
241static inline int irq_is_percpu(unsigned int irq) 241static inline bool irq_is_percpu(unsigned int irq)
242{ 242{
243 struct irq_desc *desc; 243 struct irq_desc *desc;
244 244
@@ -246,7 +246,7 @@ static inline int irq_is_percpu(unsigned int irq)
246 return desc->status_use_accessors & IRQ_PER_CPU; 246 return desc->status_use_accessors & IRQ_PER_CPU;
247} 247}
248 248
249static inline int irq_is_percpu_devid(unsigned int irq) 249static inline bool irq_is_percpu_devid(unsigned int irq)
250{ 250{
251 struct irq_desc *desc; 251 struct irq_desc *desc;
252 252
@@ -255,12 +255,15 @@ static inline int irq_is_percpu_devid(unsigned int irq)
255} 255}
256 256
257static inline void 257static inline void
258irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) 258irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
259 struct lock_class_key *request_class)
259{ 260{
260 struct irq_desc *desc = irq_to_desc(irq); 261 struct irq_desc *desc = irq_to_desc(irq);
261 262
262 if (desc) 263 if (desc) {
263 lockdep_set_class(&desc->lock, class); 264 lockdep_set_class(&desc->lock, lock_class);
265 lockdep_set_class(&desc->request_mutex, request_class);
266 }
264} 267}
265 268
266#ifdef CONFIG_IRQ_PREFLOW_FASTEOI 269#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index a34355d19546..48c7e86bb556 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -113,7 +113,7 @@ struct irq_domain_ops {
113 unsigned int nr_irqs, void *arg); 113 unsigned int nr_irqs, void *arg);
114 void (*free)(struct irq_domain *d, unsigned int virq, 114 void (*free)(struct irq_domain *d, unsigned int virq,
115 unsigned int nr_irqs); 115 unsigned int nr_irqs);
116 int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); 116 int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); 117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, 118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
119 unsigned long *out_hwirq, unsigned int *out_type); 119 unsigned long *out_hwirq, unsigned int *out_type);
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/include/linux/kmemcheck.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 893d6d606cd0..6bdd4b9f6611 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -232,7 +232,7 @@ struct kvm_vcpu {
232 struct mutex mutex; 232 struct mutex mutex;
233 struct kvm_run *run; 233 struct kvm_run *run;
234 234
235 int guest_fpu_loaded, guest_xcr0_loaded; 235 int guest_xcr0_loaded;
236 struct swait_queue_head wq; 236 struct swait_queue_head wq;
237 struct pid __rcu *pid; 237 struct pid __rcu *pid;
238 int sigset_active; 238 int sigset_active;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index a842551fe044..2e75dc34bff5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -158,12 +158,6 @@ struct lockdep_map {
158 int cpu; 158 int cpu;
159 unsigned long ip; 159 unsigned long ip;
160#endif 160#endif
161#ifdef CONFIG_LOCKDEP_CROSSRELEASE
162 /*
163 * Whether it's a crosslock.
164 */
165 int cross;
166#endif
167}; 161};
168 162
169static inline void lockdep_copy_map(struct lockdep_map *to, 163static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,96 +261,9 @@ struct held_lock {
267 unsigned int hardirqs_off:1; 261 unsigned int hardirqs_off:1;
268 unsigned int references:12; /* 32 bits */ 262 unsigned int references:12; /* 32 bits */
269 unsigned int pin_count; 263 unsigned int pin_count;
270#ifdef CONFIG_LOCKDEP_CROSSRELEASE
271 /*
272 * Generation id.
273 *
274 * A value of cross_gen_id will be stored when holding this,
275 * which is globally increased whenever each crosslock is held.
276 */
277 unsigned int gen_id;
278#endif
279};
280
281#ifdef CONFIG_LOCKDEP_CROSSRELEASE
282#define MAX_XHLOCK_TRACE_ENTRIES 5
283
284/*
285 * This is for keeping locks waiting for commit so that true dependencies
286 * can be added at commit step.
287 */
288struct hist_lock {
289 /*
290 * Id for each entry in the ring buffer. This is used to
291 * decide whether the ring buffer was overwritten or not.
292 *
293 * For example,
294 *
295 * |<----------- hist_lock ring buffer size ------->|
296 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
297 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
298 *
299 * where 'p' represents an acquisition in process
300 * context, 'i' represents an acquisition in irq
301 * context.
302 *
303 * In this example, the ring buffer was overwritten by
304 * acquisitions in irq context, that should be detected on
305 * rollback or commit.
306 */
307 unsigned int hist_id;
308
309 /*
310 * Seperate stack_trace data. This will be used at commit step.
311 */
312 struct stack_trace trace;
313 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
314
315 /*
316 * Seperate hlock instance. This will be used at commit step.
317 *
318 * TODO: Use a smaller data structure containing only necessary
319 * data. However, we should make lockdep code able to handle the
320 * smaller one first.
321 */
322 struct held_lock hlock;
323}; 264};
324 265
325/* 266/*
326 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
327 * be called instead of lockdep_init_map().
328 */
329struct cross_lock {
330 /*
331 * When more than one acquisition of crosslocks are overlapped,
332 * we have to perform commit for them based on cross_gen_id of
333 * the first acquisition, which allows us to add more true
334 * dependencies.
335 *
336 * Moreover, when no acquisition of a crosslock is in progress,
337 * we should not perform commit because the lock might not exist
338 * any more, which might cause incorrect memory access. So we
339 * have to track the number of acquisitions of a crosslock.
340 */
341 int nr_acquire;
342
343 /*
344 * Seperate hlock instance. This will be used at commit step.
345 *
346 * TODO: Use a smaller data structure containing only necessary
347 * data. However, we should make lockdep code able to handle the
348 * smaller one first.
349 */
350 struct held_lock hlock;
351};
352
353struct lockdep_map_cross {
354 struct lockdep_map map;
355 struct cross_lock xlock;
356};
357#endif
358
359/*
360 * Initialization, self-test and debugging-output methods: 267 * Initialization, self-test and debugging-output methods:
361 */ 268 */
362extern void lockdep_info(void); 269extern void lockdep_info(void);
@@ -560,37 +467,6 @@ enum xhlock_context_t {
560 XHLOCK_CTX_NR, 467 XHLOCK_CTX_NR,
561}; 468};
562 469
563#ifdef CONFIG_LOCKDEP_CROSSRELEASE
564extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
565 const char *name,
566 struct lock_class_key *key,
567 int subclass);
568extern void lock_commit_crosslock(struct lockdep_map *lock);
569
570/*
571 * What we essencially have to initialize is 'nr_acquire'. Other members
572 * will be initialized in add_xlock().
573 */
574#define STATIC_CROSS_LOCK_INIT() \
575 { .nr_acquire = 0,}
576
577#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
578 { .map.name = (_name), .map.key = (void *)(_key), \
579 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
580
581/*
582 * To initialize a lockdep_map statically use this macro.
583 * Note that _name must not be NULL.
584 */
585#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
586 { .name = (_name), .key = (void *)(_key), .cross = 0, }
587
588extern void crossrelease_hist_start(enum xhlock_context_t c);
589extern void crossrelease_hist_end(enum xhlock_context_t c);
590extern void lockdep_invariant_state(bool force);
591extern void lockdep_init_task(struct task_struct *task);
592extern void lockdep_free_task(struct task_struct *task);
593#else /* !CROSSRELEASE */
594#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 470#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
595/* 471/*
596 * To initialize a lockdep_map statically use this macro. 472 * To initialize a lockdep_map statically use this macro.
@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
604static inline void lockdep_invariant_state(bool force) {} 480static inline void lockdep_invariant_state(bool force) {}
605static inline void lockdep_init_task(struct task_struct *task) {} 481static inline void lockdep_init_task(struct task_struct *task) {}
606static inline void lockdep_free_task(struct task_struct *task) {} 482static inline void lockdep_free_task(struct task_struct *task) {}
607#endif /* CROSSRELEASE */
608 483
609#ifdef CONFIG_LOCK_STAT 484#ifdef CONFIG_LOCK_STAT
610 485
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index a2a1318a3d0c..c3d3f04d8cc6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -915,10 +915,10 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
915#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6) 915#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
916 916
917enum dev_aspm_mode { 917enum dev_aspm_mode {
918 DEV_ASPM_DISABLE = 0,
919 DEV_ASPM_DYNAMIC, 918 DEV_ASPM_DYNAMIC,
920 DEV_ASPM_BACKDOOR, 919 DEV_ASPM_BACKDOOR,
921 DEV_ASPM_STATIC, 920 DEV_ASPM_STATIC,
921 DEV_ASPM_DISABLE,
922}; 922};
923 923
924/* 924/*
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a886b51511ab..1f509d072026 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -556,6 +556,7 @@ struct mlx5_core_sriov {
556}; 556};
557 557
558struct mlx5_irq_info { 558struct mlx5_irq_info {
559 cpumask_var_t mask;
559 char name[MLX5_MAX_IRQ_NAME]; 560 char name[MLX5_MAX_IRQ_NAME];
560}; 561};
561 562
@@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
1048 enum mlx5_eq_type type); 1049 enum mlx5_eq_type type);
1049int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 1050int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1050int mlx5_start_eqs(struct mlx5_core_dev *dev); 1051int mlx5_start_eqs(struct mlx5_core_dev *dev);
1051int mlx5_stop_eqs(struct mlx5_core_dev *dev); 1052void mlx5_stop_eqs(struct mlx5_core_dev *dev);
1052int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, 1053int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1053 unsigned int *irqn); 1054 unsigned int *irqn);
1054int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 1055int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1164,6 +1165,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1164int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); 1165int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1165bool mlx5_lag_is_active(struct mlx5_core_dev *dev); 1166bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1166struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); 1167struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1168int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1169 u64 *values,
1170 int num_counters,
1171 size_t *offsets);
1167struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); 1172struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1168void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); 1173void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1169 1174
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 38a7577a9ce7..d44ec5f41d4a 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -147,7 +147,7 @@ enum {
147 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, 147 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
148 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, 148 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
149 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, 149 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
150 MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, 150 MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
151 MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, 151 MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
152 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, 152 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
153 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, 153 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
@@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
7239 u8 vxlan_udp_port[0x10]; 7239 u8 vxlan_udp_port[0x10];
7240}; 7240};
7241 7241
7242struct mlx5_ifc_set_rate_limit_out_bits { 7242struct mlx5_ifc_set_pp_rate_limit_out_bits {
7243 u8 status[0x8]; 7243 u8 status[0x8];
7244 u8 reserved_at_8[0x18]; 7244 u8 reserved_at_8[0x18];
7245 7245
@@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
7248 u8 reserved_at_40[0x40]; 7248 u8 reserved_at_40[0x40];
7249}; 7249};
7250 7250
7251struct mlx5_ifc_set_rate_limit_in_bits { 7251struct mlx5_ifc_set_pp_rate_limit_in_bits {
7252 u8 opcode[0x10]; 7252 u8 opcode[0x10];
7253 u8 reserved_at_10[0x10]; 7253 u8 reserved_at_10[0x10];
7254 7254
@@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
7261 u8 reserved_at_60[0x20]; 7261 u8 reserved_at_60[0x20];
7262 7262
7263 u8 rate_limit[0x20]; 7263 u8 rate_limit[0x20];
7264
7265 u8 reserved_at_a0[0x160];
7264}; 7266};
7265 7267
7266struct mlx5_ifc_access_register_out_bits { 7268struct mlx5_ifc_access_register_out_bits {
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 01c91d874a57..5bad038ac012 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
67} 67}
68 68
69/* 69/*
70 * Use this helper if tsk->mm != mm and the victim mm needs a special
71 * handling. This is guaranteed to stay true after once set.
72 */
73static inline bool mm_is_oom_victim(struct mm_struct *mm)
74{
75 return test_bit(MMF_OOM_VICTIM, &mm->flags);
76}
77
78/*
70 * Checks whether a page fault on the given mm is still reliable. 79 * Checks whether a page fault on the given mm is still reliable.
71 * This is no longer true if the oom reaper started to reap the 80 * This is no longer true if the oom reaper started to reap the
72 * address space which is reflected by MMF_UNSTABLE flag set in 81 * address space which is reflected by MMF_UNSTABLE flag set in
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0403894147a3..c170c9250c8b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1674static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, 1674static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1675 unsigned int devfn) 1675 unsigned int devfn)
1676{ return NULL; } 1676{ return NULL; }
1677static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1678 unsigned int bus, unsigned int devfn)
1679{ return NULL; }
1677 1680
1678static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1681static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1679static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1682static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2c9c87d8a0c1..7546822a1d74 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -15,6 +15,7 @@
15#define _LINUX_PERF_EVENT_H 15#define _LINUX_PERF_EVENT_H
16 16
17#include <uapi/linux/perf_event.h> 17#include <uapi/linux/perf_event.h>
18#include <uapi/linux/bpf_perf_event.h>
18 19
19/* 20/*
20 * Kernel-internal data types and definitions: 21 * Kernel-internal data types and definitions:
@@ -787,7 +788,7 @@ struct perf_output_handle {
787}; 788};
788 789
789struct bpf_perf_event_data_kern { 790struct bpf_perf_event_data_kern {
790 struct pt_regs *regs; 791 bpf_user_pt_regs_t *regs;
791 struct perf_sample_data *data; 792 struct perf_sample_data *data;
792 struct perf_event *event; 793 struct perf_event *event;
793}; 794};
@@ -1177,6 +1178,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);
1177 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) 1178 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1178# define perf_instruction_pointer(regs) instruction_pointer(regs) 1179# define perf_instruction_pointer(regs) instruction_pointer(regs)
1179#endif 1180#endif
1181#ifndef perf_arch_bpf_user_pt_regs
1182# define perf_arch_bpf_user_pt_regs(regs) regs
1183#endif
1180 1184
1181static inline bool has_branch_stack(struct perf_event *event) 1185static inline bool has_branch_stack(struct perf_event *event)
1182{ 1186{
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index 271a4e25af67..63507ff464ee 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -50,6 +50,8 @@ struct at24_platform_data {
50#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ 50#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */
51#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ 51#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */
52#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ 52#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */
53#define AT24_FLAG_NO_RDROL BIT(1) /* does not auto-rollover reads to */
54 /* the next slave address */
53 55
54 void (*setup)(struct nvmem_device *nvmem, void *context); 56 void (*setup)(struct nvmem_device *nvmem, void *context);
55 void *context; 57 void *context;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 65d39115f06d..492ed473ba7e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev);
765extern int pm_generic_poweroff(struct device *dev); 765extern int pm_generic_poweroff(struct device *dev);
766extern void pm_generic_complete(struct device *dev); 766extern void pm_generic_complete(struct device *dev);
767 767
768extern void dev_pm_skip_next_resume_phases(struct device *dev);
768extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); 769extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
769 770
770#else /* !CONFIG_PM_SLEEP */ 771#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pti.h b/include/linux/pti.h
index b3ea01a3197e..0174883a935a 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -1,43 +1,11 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) Intel 2011 2#ifndef _INCLUDE_PTI_H
3 * 3#define _INCLUDE_PTI_H
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 *
15 * The PTI (Parallel Trace Interface) driver directs trace data routed from
16 * various parts in the system out through the Intel Penwell PTI port and
17 * out of the mobile device for analysis with a debugging tool
18 * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
19 * compact JTAG, standard.
20 *
21 * This header file will allow other parts of the OS to use the
22 * interface to write out it's contents for debugging a mobile system.
23 */
24 4
25#ifndef PTI_H_ 5#ifdef CONFIG_PAGE_TABLE_ISOLATION
26#define PTI_H_ 6#include <asm/pti.h>
7#else
8static inline void pti_init(void) { }
9#endif
27 10
28/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ 11#endif
29#define PTI_LASTDWORD_DTS 0x30
30
31/* basic structure used as a write address to the PTI HW */
32struct pti_masterchannel {
33 u8 master;
34 u8 channel;
35};
36
37/* the following functions are defined in misc/pti.c */
38void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
39struct pti_masterchannel *pti_request_masterchannel(u8 type,
40 const char *thread_name);
41void pti_release_masterchannel(struct pti_masterchannel *mc);
42
43#endif /*PTI_H_*/
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 37b4bb2545b3..6866df4f31b5 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
101 101
102/* Note: callers invoking this in a loop must use a compiler barrier, 102/* Note: callers invoking this in a loop must use a compiler barrier,
103 * for example cpu_relax(). Callers must hold producer_lock. 103 * for example cpu_relax(). Callers must hold producer_lock.
104 * Callers are responsible for making sure pointer that is being queued
105 * points to a valid data.
104 */ 106 */
105static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) 107static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
106{ 108{
107 if (unlikely(!r->size) || r->queue[r->producer]) 109 if (unlikely(!r->size) || r->queue[r->producer])
108 return -ENOSPC; 110 return -ENOSPC;
109 111
112 /* Make sure the pointer we are storing points to a valid data. */
113 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
114 smp_wmb();
115
110 r->queue[r->producer++] = ptr; 116 r->queue[r->producer++] = ptr;
111 if (unlikely(r->producer >= r->size)) 117 if (unlikely(r->producer >= r->size))
112 r->producer = 0; 118 r->producer = 0;
@@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
275 if (ptr) 281 if (ptr)
276 __ptr_ring_discard_one(r); 282 __ptr_ring_discard_one(r);
277 283
284 /* Make sure anyone accessing data through the pointer is up to date. */
285 /* Pairs with smp_wmb in __ptr_ring_produce. */
286 smp_read_barrier_depends();
278 return ptr; 287 return ptr;
279} 288}
280 289
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index d574361943ea..fcbeed4053ef 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
99 struct rb_root *root); 99 struct rb_root *root);
100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, 100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
101 struct rb_root *root); 101 struct rb_root *root);
102extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
103 struct rb_root_cached *root);
102 104
103static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, 105static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
104 struct rb_node **rb_link) 106 struct rb_node **rb_link)
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index a328e8181e49..e4b257ff881b 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -101,44 +101,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
101} 101}
102 102
103/** 103/**
104 * hlist_nulls_add_tail_rcu
105 * @n: the element to add to the hash list.
106 * @h: the list to add to.
107 *
108 * Description:
109 * Adds the specified element to the end of the specified hlist_nulls,
110 * while permitting racing traversals. NOTE: tail insertion requires
111 * list traversal.
112 *
113 * The caller must take whatever precautions are necessary
114 * (such as holding appropriate locks) to avoid racing
115 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
116 * or hlist_nulls_del_rcu(), running on this same list.
117 * However, it is perfectly legal to run concurrently with
118 * the _rcu list-traversal primitives, such as
119 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
120 * problems on Alpha CPUs. Regardless of the type of CPU, the
121 * list-traversal primitive must be guarded by rcu_read_lock().
122 */
123static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
124 struct hlist_nulls_head *h)
125{
126 struct hlist_nulls_node *i, *last = NULL;
127
128 for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
129 i = hlist_nulls_next_rcu(i))
130 last = i;
131
132 if (last) {
133 n->next = last->next;
134 n->pprev = &last->next;
135 rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
136 } else {
137 hlist_nulls_add_head_rcu(n, h);
138 }
139}
140
141/**
142 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 104 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
143 * @tpos: the type * to use as a loop cursor. 105 * @tpos: the type * to use as a loop cursor.
144 * @pos: the &struct hlist_nulls_node to use as a loop cursor. 106 * @pos: the &struct hlist_nulls_node to use as a loop cursor.
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index cc0072e93e36..857a72ceb794 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -10,9 +10,6 @@
10 */ 10 */
11typedef struct { 11typedef struct {
12 arch_rwlock_t raw_lock; 12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK 13#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu; 14 unsigned int magic, owner_cpu;
18 void *owner; 15 void *owner;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21991d668d35..d2588263a989 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -849,17 +849,6 @@ struct task_struct {
849 struct held_lock held_locks[MAX_LOCK_DEPTH]; 849 struct held_lock held_locks[MAX_LOCK_DEPTH];
850#endif 850#endif
851 851
852#ifdef CONFIG_LOCKDEP_CROSSRELEASE
853#define MAX_XHLOCKS_NR 64UL
854 struct hist_lock *xhlocks; /* Crossrelease history locks */
855 unsigned int xhlock_idx;
856 /* For restoring at history boundaries */
857 unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
858 unsigned int hist_id;
859 /* For overwrite check at each context exit */
860 unsigned int hist_id_save[XHLOCK_CTX_NR];
861#endif
862
863#ifdef CONFIG_UBSAN 852#ifdef CONFIG_UBSAN
864 unsigned int in_ubsan; 853 unsigned int in_ubsan;
865#endif 854#endif
@@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
1503 __set_task_comm(tsk, from, false); 1492 __set_task_comm(tsk, from, false);
1504} 1493}
1505 1494
1506extern char *get_task_comm(char *to, struct task_struct *tsk); 1495extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1496#define get_task_comm(buf, tsk) ({ \
1497 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1498 __get_task_comm(buf, sizeof(buf), tsk); \
1499})
1507 1500
1508#ifdef CONFIG_SMP 1501#ifdef CONFIG_SMP
1509void scheduler_ipi(void); 1502void scheduler_ipi(void);
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 9c8847395b5e..ec912d01126f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ 70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
73#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 74#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
74 75
75#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ 76#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index e69402d4a8ae..d609e6dc5bad 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -184,7 +184,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
184 struct serdev_device *serdev = ctrl->serdev; 184 struct serdev_device *serdev = ctrl->serdev;
185 185
186 if (!serdev || !serdev->ops->receive_buf) 186 if (!serdev || !serdev->ops->receive_buf)
187 return -EINVAL; 187 return 0;
188 188
189 return serdev->ops->receive_buf(serdev, data, count); 189 return serdev->ops->receive_buf(serdev, data, count);
190} 190}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bc486ef23f20..a38c80e9f91e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1406,8 +1406,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
1406} 1406}
1407 1407
1408/* 1408/*
1409 * If users == 1, we are the only owner and are can avoid redundant 1409 * If users == 1, we are the only owner and can avoid redundant atomic changes.
1410 * atomic change.
1411 */ 1410 */
1412 1411
1413/** 1412/**
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 7b2170bfd6e7..bc6bb325d1bf 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
126 * for that name. This appears in the sysfs "modalias" attribute 126 * for that name. This appears in the sysfs "modalias" attribute
127 * for driver coldplugging, and in uevents used for hotplugging 127 * for driver coldplugging, and in uevents used for hotplugging
128 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when 128 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
129 * when not using a GPIO line) 129 * not using a GPIO line)
130 * 130 *
131 * @statistics: statistics for the spi_device 131 * @statistics: statistics for the spi_device
132 * 132 *
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a39186194cd6..3bf273538840 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -107,16 +107,11 @@ do { \
107 107
108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
109 109
110#ifdef CONFIG_GENERIC_LOCKBREAK
111#define raw_spin_is_contended(lock) ((lock)->break_lock)
112#else
113
114#ifdef arch_spin_is_contended 110#ifdef arch_spin_is_contended
115#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 111#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
116#else 112#else
117#define raw_spin_is_contended(lock) (((void)(lock), 0)) 113#define raw_spin_is_contended(lock) (((void)(lock), 0))
118#endif /*arch_spin_is_contended*/ 114#endif /*arch_spin_is_contended*/
119#endif
120 115
121/* 116/*
122 * This barrier must provide two things: 117 * This barrier must provide two things:
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb13a5d..24b4e6f2c1a2 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -19,9 +19,6 @@
19 19
20typedef struct raw_spinlock { 20typedef struct raw_spinlock {
21 arch_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock;
24#endif
25#ifdef CONFIG_DEBUG_SPINLOCK 22#ifdef CONFIG_DEBUG_SPINLOCK
26 unsigned int magic, owner_cpu; 23 unsigned int magic, owner_cpu;
27 void *owner; 24 void *owner;
diff --git a/include/linux/string.h b/include/linux/string.h
index 410ecf17de3c..cfd83eb2f926 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
259{ 259{
260 __kernel_size_t ret; 260 __kernel_size_t ret;
261 size_t p_size = __builtin_object_size(p, 0); 261 size_t p_size = __builtin_object_size(p, 0);
262 if (p_size == (size_t)-1) 262
263 /* Work around gcc excess stack consumption issue */
264 if (p_size == (size_t)-1 ||
265 (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
263 return __builtin_strlen(p); 266 return __builtin_strlen(p);
264 ret = strnlen(p, p_size); 267 ret = strnlen(p, p_size);
265 if (p_size <= ret) 268 if (p_size <= ret)
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index e32dfe098e82..40839c02d28c 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -117,6 +117,12 @@ struct attribute_group {
117 .show = _name##_show, \ 117 .show = _name##_show, \
118} 118}
119 119
120#define __ATTR_RO_MODE(_name, _mode) { \
121 .attr = { .name = __stringify(_name), \
122 .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
123 .show = _name##_show, \
124}
125
120#define __ATTR_WO(_name) { \ 126#define __ATTR_WO(_name) { \
121 .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ 127 .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
122 .store = _name##_store, \ 128 .store = _name##_store, \
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index df5d97a85e1a..ca4a6361389b 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -224,7 +224,8 @@ struct tcp_sock {
224 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ 224 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
225 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ 225 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
226 fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ 226 fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
227 unused:3; 227 is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
228 unused:2;
228 u8 nonagle : 4,/* Disable Nagle algorithm? */ 229 u8 nonagle : 4,/* Disable Nagle algorithm? */
229 thin_lto : 1,/* Use linear timeouts for thin streams */ 230 thin_lto : 1,/* Use linear timeouts for thin streams */
230 unused1 : 1, 231 unused1 : 1,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f442d1a42025..7cc35921218e 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void);
119extern void tick_nohz_irq_exit(void); 119extern void tick_nohz_irq_exit(void);
120extern ktime_t tick_nohz_get_sleep_length(void); 120extern ktime_t tick_nohz_get_sleep_length(void);
121extern unsigned long tick_nohz_get_idle_calls(void); 121extern unsigned long tick_nohz_get_idle_calls(void);
122extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
122extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 123extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
123extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 124extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
124#else /* !CONFIG_NO_HZ_COMMON */ 125#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 04af640ea95b..2448f9cc48a3 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j);
207unsigned long round_jiffies_up_relative(unsigned long j); 207unsigned long round_jiffies_up_relative(unsigned long j);
208 208
209#ifdef CONFIG_HOTPLUG_CPU 209#ifdef CONFIG_HOTPLUG_CPU
210int timers_prepare_cpu(unsigned int cpu);
210int timers_dead_cpu(unsigned int cpu); 211int timers_dead_cpu(unsigned int cpu);
211#else 212#else
212#define timers_dead_cpu NULL 213#define timers_prepare_cpu NULL
214#define timers_dead_cpu NULL
213#endif 215#endif
214 216
215#endif 217#endif
diff --git a/include/linux/trace.h b/include/linux/trace.h
index d24991c1fef3..b95ffb2188ab 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -18,7 +18,7 @@
18 */ 18 */
19struct trace_export { 19struct trace_export {
20 struct trace_export __rcu *next; 20 struct trace_export __rcu *next;
21 void (*write)(const void *, unsigned int); 21 void (*write)(struct trace_export *, const void *, unsigned int);
22}; 22};
23 23
24int register_ftrace_export(struct trace_export *export); 24int register_ftrace_export(struct trace_export *export);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index a69877734c4e..e2ec3582e549 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -82,6 +82,7 @@ struct usbnet {
82# define EVENT_RX_KILL 10 82# define EVENT_RX_KILL 10
83# define EVENT_LINK_CHANGE 11 83# define EVENT_LINK_CHANGE 11
84# define EVENT_SET_RX_MODE 12 84# define EVENT_SET_RX_MODE 12
85# define EVENT_NO_IP_ALIGN 13
85}; 86};
86 87
87static inline struct usb_driver *driver_of(struct usb_interface *intf) 88static inline struct usb_driver *driver_of(struct usb_interface *intf)