diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-01-02 08:46:35 -0500 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-01-02 08:46:35 -0500 |
| commit | b6a09416e83ffe4eccfb4ef1b91b3b66483fa810 (patch) | |
| tree | b30f266e85047244dcdb47d5afc134e76aec530d /include | |
| parent | db809859c8cee415293b830e67178f526d1eb2be (diff) | |
| parent | 30a7acd573899fd8b8ac39236eff6468b195ac7d (diff) | |
Merge 4.15-rc6 into char-misc-next
We want the fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
57 files changed, 315 insertions, 398 deletions
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index ea189d88a3cc..8ac4e68a12f0 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h | |||
| @@ -7,9 +7,10 @@ | |||
| 7 | #ifndef _ASM_GENERIC_MM_HOOKS_H | 7 | #ifndef _ASM_GENERIC_MM_HOOKS_H |
| 8 | #define _ASM_GENERIC_MM_HOOKS_H | 8 | #define _ASM_GENERIC_MM_HOOKS_H |
| 9 | 9 | ||
| 10 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | 10 | static inline int arch_dup_mmap(struct mm_struct *oldmm, |
| 11 | struct mm_struct *mm) | 11 | struct mm_struct *mm) |
| 12 | { | 12 | { |
| 13 | return 0; | ||
| 13 | } | 14 | } |
| 14 | 15 | ||
| 15 | static inline void arch_exit_mmap(struct mm_struct *mm) | 16 | static inline void arch_exit_mmap(struct mm_struct *mm) |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b234d54f2cb6..868e68561f91 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
| @@ -1025,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd) | |||
| 1025 | struct file; | 1025 | struct file; |
| 1026 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | 1026 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
| 1027 | unsigned long size, pgprot_t *vma_prot); | 1027 | unsigned long size, pgprot_t *vma_prot); |
| 1028 | |||
| 1029 | #ifndef CONFIG_X86_ESPFIX64 | ||
| 1030 | static inline void init_espfix_bsp(void) { } | ||
| 1031 | #endif | ||
| 1032 | |||
| 1028 | #endif /* !__ASSEMBLY__ */ | 1033 | #endif /* !__ASSEMBLY__ */ |
| 1029 | 1034 | ||
| 1030 | #ifndef io_remap_pfn_range | 1035 | #ifndef io_remap_pfn_range |
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index f0b44c16e88f..c2bae8da642c 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h | |||
| @@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl, | |||
| 82 | struct ahash_instance *inst); | 82 | struct ahash_instance *inst); |
| 83 | void ahash_free_instance(struct crypto_instance *inst); | 83 | void ahash_free_instance(struct crypto_instance *inst); |
| 84 | 84 | ||
| 85 | int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, | ||
| 86 | unsigned int keylen); | ||
| 87 | |||
| 88 | static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) | ||
| 89 | { | ||
| 90 | return alg->setkey != shash_no_setkey; | ||
| 91 | } | ||
| 92 | |||
| 85 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | 93 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, |
| 86 | struct hash_alg_common *alg, | 94 | struct hash_alg_common *alg, |
| 87 | struct crypto_instance *inst); | 95 | struct crypto_instance *inst); |
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index cceafa01f907..b67404fc4b34 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h | |||
| @@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast( | |||
| 27 | 27 | ||
| 28 | struct mcryptd_cpu_queue { | 28 | struct mcryptd_cpu_queue { |
| 29 | struct crypto_queue queue; | 29 | struct crypto_queue queue; |
| 30 | spinlock_t q_lock; | ||
| 30 | struct work_struct work; | 31 | struct work_struct work; |
| 31 | }; | 32 | }; |
| 32 | 33 | ||
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index a4649c56ca2f..5971577016a2 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #define __DRM_CONNECTOR_H__ | 24 | #define __DRM_CONNECTOR_H__ |
| 25 | 25 | ||
| 26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
| 27 | #include <linux/llist.h> | ||
| 27 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
| 28 | #include <linux/hdmi.h> | 29 | #include <linux/hdmi.h> |
| 29 | #include <drm/drm_mode_object.h> | 30 | #include <drm/drm_mode_object.h> |
| @@ -918,12 +919,13 @@ struct drm_connector { | |||
| 918 | uint16_t tile_h_size, tile_v_size; | 919 | uint16_t tile_h_size, tile_v_size; |
| 919 | 920 | ||
| 920 | /** | 921 | /** |
| 921 | * @free_work: | 922 | * @free_node: |
| 922 | * | 923 | * |
| 923 | * Work used only by &drm_connector_iter to be able to clean up a | 924 | * List used only by &drm_connector_iter to be able to clean up a |
| 924 | * connector from any context. | 925 | * connector from any context, in conjunction with |
| 926 | * &drm_mode_config.connector_free_work. | ||
| 925 | */ | 927 | */ |
| 926 | struct work_struct free_work; | 928 | struct llist_node free_node; |
| 927 | }; | 929 | }; |
| 928 | 930 | ||
| 929 | #define obj_to_connector(x) container_of(x, struct drm_connector, base) | 931 | #define obj_to_connector(x) container_of(x, struct drm_connector, base) |
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 2ec41d032e56..efe6d5a8e834 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h | |||
| @@ -465,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector, | |||
| 465 | struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, | 465 | struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, |
| 466 | struct i2c_adapter *adapter); | 466 | struct i2c_adapter *adapter); |
| 467 | struct edid *drm_edid_duplicate(const struct edid *edid); | 467 | struct edid *drm_edid_duplicate(const struct edid *edid); |
| 468 | void drm_reset_display_info(struct drm_connector *connector); | ||
| 469 | u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid); | ||
| 468 | int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); | 470 | int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
| 469 | 471 | ||
| 470 | u8 drm_match_cea_mode(const struct drm_display_mode *to_match); | 472 | u8 drm_match_cea_mode(const struct drm_display_mode *to_match); |
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index b21e827c5c78..b0ce26d71296 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 28 | #include <linux/idr.h> | 28 | #include <linux/idr.h> |
| 29 | #include <linux/workqueue.h> | 29 | #include <linux/workqueue.h> |
| 30 | #include <linux/llist.h> | ||
| 30 | 31 | ||
| 31 | #include <drm/drm_modeset_lock.h> | 32 | #include <drm/drm_modeset_lock.h> |
| 32 | 33 | ||
| @@ -393,7 +394,7 @@ struct drm_mode_config { | |||
| 393 | 394 | ||
| 394 | /** | 395 | /** |
| 395 | * @connector_list_lock: Protects @num_connector and | 396 | * @connector_list_lock: Protects @num_connector and |
| 396 | * @connector_list. | 397 | * @connector_list and @connector_free_list. |
| 397 | */ | 398 | */ |
| 398 | spinlock_t connector_list_lock; | 399 | spinlock_t connector_list_lock; |
| 399 | /** | 400 | /** |
| @@ -414,6 +415,21 @@ struct drm_mode_config { | |||
| 414 | */ | 415 | */ |
| 415 | struct list_head connector_list; | 416 | struct list_head connector_list; |
| 416 | /** | 417 | /** |
| 418 | * @connector_free_list: | ||
| 419 | * | ||
| 420 | * List of connector objects linked with &drm_connector.free_head. | ||
| 421 | * Protected by @connector_list_lock. Used by | ||
| 422 | * drm_for_each_connector_iter() and | ||
| 423 | * &struct drm_connector_list_iter to savely free connectors using | ||
| 424 | * @connector_free_work. | ||
| 425 | */ | ||
| 426 | struct llist_head connector_free_list; | ||
| 427 | /** | ||
| 428 | * @connector_free_work: Work to clean up @connector_free_list. | ||
| 429 | */ | ||
| 430 | struct work_struct connector_free_work; | ||
| 431 | |||
| 432 | /** | ||
| 417 | * @num_encoder: | 433 | * @num_encoder: |
| 418 | * | 434 | * |
| 419 | * Number of encoders on this device. This is invariant over the | 435 | * Number of encoders on this device. This is invariant over the |
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 6e45608b2399..9da6ce22803f 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h | |||
| @@ -62,7 +62,7 @@ struct arch_timer_cpu { | |||
| 62 | bool enabled; | 62 | bool enabled; |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | int kvm_timer_hyp_init(void); | 65 | int kvm_timer_hyp_init(bool); |
| 66 | int kvm_timer_enable(struct kvm_vcpu *vcpu); | 66 | int kvm_timer_enable(struct kvm_vcpu *vcpu); |
| 67 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); | 67 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); |
| 68 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); | 68 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 82f0c8fd7be8..23d29b39f71e 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx); | |||
| 492 | 492 | ||
| 493 | #define bio_set_dev(bio, bdev) \ | 493 | #define bio_set_dev(bio, bdev) \ |
| 494 | do { \ | 494 | do { \ |
| 495 | if ((bio)->bi_disk != (bdev)->bd_disk) \ | ||
| 496 | bio_clear_flag(bio, BIO_THROTTLED);\ | ||
| 495 | (bio)->bi_disk = (bdev)->bd_disk; \ | 497 | (bio)->bi_disk = (bdev)->bd_disk; \ |
| 496 | (bio)->bi_partno = (bdev)->bd_partno; \ | 498 | (bio)->bi_partno = (bdev)->bd_partno; \ |
| 497 | } while (0) | 499 | } while (0) |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a1e628e032da..9e7d8bd776d2 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -50,8 +50,6 @@ struct blk_issue_stat { | |||
| 50 | struct bio { | 50 | struct bio { |
| 51 | struct bio *bi_next; /* request queue link */ | 51 | struct bio *bi_next; /* request queue link */ |
| 52 | struct gendisk *bi_disk; | 52 | struct gendisk *bi_disk; |
| 53 | u8 bi_partno; | ||
| 54 | blk_status_t bi_status; | ||
| 55 | unsigned int bi_opf; /* bottom bits req flags, | 53 | unsigned int bi_opf; /* bottom bits req flags, |
| 56 | * top bits REQ_OP. Use | 54 | * top bits REQ_OP. Use |
| 57 | * accessors. | 55 | * accessors. |
| @@ -59,8 +57,8 @@ struct bio { | |||
| 59 | unsigned short bi_flags; /* status, etc and bvec pool number */ | 57 | unsigned short bi_flags; /* status, etc and bvec pool number */ |
| 60 | unsigned short bi_ioprio; | 58 | unsigned short bi_ioprio; |
| 61 | unsigned short bi_write_hint; | 59 | unsigned short bi_write_hint; |
| 62 | 60 | blk_status_t bi_status; | |
| 63 | struct bvec_iter bi_iter; | 61 | u8 bi_partno; |
| 64 | 62 | ||
| 65 | /* Number of segments in this BIO after | 63 | /* Number of segments in this BIO after |
| 66 | * physical address coalescing is performed. | 64 | * physical address coalescing is performed. |
| @@ -74,8 +72,9 @@ struct bio { | |||
| 74 | unsigned int bi_seg_front_size; | 72 | unsigned int bi_seg_front_size; |
| 75 | unsigned int bi_seg_back_size; | 73 | unsigned int bi_seg_back_size; |
| 76 | 74 | ||
| 77 | atomic_t __bi_remaining; | 75 | struct bvec_iter bi_iter; |
| 78 | 76 | ||
| 77 | atomic_t __bi_remaining; | ||
| 79 | bio_end_io_t *bi_end_io; | 78 | bio_end_io_t *bi_end_io; |
| 80 | 79 | ||
| 81 | void *bi_private; | 80 | void *bi_private; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8089ca17db9a..0ce8a372d506 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t; | |||
| 135 | struct request { | 135 | struct request { |
| 136 | struct list_head queuelist; | 136 | struct list_head queuelist; |
| 137 | union { | 137 | union { |
| 138 | call_single_data_t csd; | 138 | struct __call_single_data csd; |
| 139 | u64 fifo_time; | 139 | u64 fifo_time; |
| 140 | }; | 140 | }; |
| 141 | 141 | ||
| @@ -241,14 +241,24 @@ struct request { | |||
| 241 | struct request *next_rq; | 241 | struct request *next_rq; |
| 242 | }; | 242 | }; |
| 243 | 243 | ||
| 244 | static inline bool blk_op_is_scsi(unsigned int op) | ||
| 245 | { | ||
| 246 | return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; | ||
| 247 | } | ||
| 248 | |||
| 249 | static inline bool blk_op_is_private(unsigned int op) | ||
| 250 | { | ||
| 251 | return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; | ||
| 252 | } | ||
| 253 | |||
| 244 | static inline bool blk_rq_is_scsi(struct request *rq) | 254 | static inline bool blk_rq_is_scsi(struct request *rq) |
| 245 | { | 255 | { |
| 246 | return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; | 256 | return blk_op_is_scsi(req_op(rq)); |
| 247 | } | 257 | } |
| 248 | 258 | ||
| 249 | static inline bool blk_rq_is_private(struct request *rq) | 259 | static inline bool blk_rq_is_private(struct request *rq) |
| 250 | { | 260 | { |
| 251 | return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; | 261 | return blk_op_is_private(req_op(rq)); |
| 252 | } | 262 | } |
| 253 | 263 | ||
| 254 | static inline bool blk_rq_is_passthrough(struct request *rq) | 264 | static inline bool blk_rq_is_passthrough(struct request *rq) |
| @@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq) | |||
| 256 | return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); | 266 | return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); |
| 257 | } | 267 | } |
| 258 | 268 | ||
| 269 | static inline bool bio_is_passthrough(struct bio *bio) | ||
| 270 | { | ||
| 271 | unsigned op = bio_op(bio); | ||
| 272 | |||
| 273 | return blk_op_is_scsi(op) || blk_op_is_private(op); | ||
| 274 | } | ||
| 275 | |||
| 259 | static inline unsigned short req_get_ioprio(struct request *req) | 276 | static inline unsigned short req_get_ioprio(struct request *req) |
| 260 | { | 277 | { |
| 261 | return req->ioprio; | 278 | return req->ioprio; |
| @@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | |||
| 948 | extern void blk_rq_unprep_clone(struct request *rq); | 965 | extern void blk_rq_unprep_clone(struct request *rq); |
| 949 | extern blk_status_t blk_insert_cloned_request(struct request_queue *q, | 966 | extern blk_status_t blk_insert_cloned_request(struct request_queue *q, |
| 950 | struct request *rq); | 967 | struct request *rq); |
| 951 | extern int blk_rq_append_bio(struct request *rq, struct bio *bio); | 968 | extern int blk_rq_append_bio(struct request *rq, struct bio **bio); |
| 952 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 969 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
| 953 | extern void blk_queue_split(struct request_queue *, struct bio **); | 970 | extern void blk_queue_split(struct request_queue *, struct bio **); |
| 954 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 971 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c561b986bab0..1632bb13ad8a 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -15,11 +15,11 @@ | |||
| 15 | * In practice this is far bigger than any realistic pointer offset; this limit | 15 | * In practice this is far bigger than any realistic pointer offset; this limit |
| 16 | * ensures that umax_value + (int)off + (int)size cannot overflow a u64. | 16 | * ensures that umax_value + (int)off + (int)size cannot overflow a u64. |
| 17 | */ | 17 | */ |
| 18 | #define BPF_MAX_VAR_OFF (1ULL << 31) | 18 | #define BPF_MAX_VAR_OFF (1 << 29) |
| 19 | /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures | 19 | /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures |
| 20 | * that converting umax_value to int cannot overflow. | 20 | * that converting umax_value to int cannot overflow. |
| 21 | */ | 21 | */ |
| 22 | #define BPF_MAX_VAR_SIZ INT_MAX | 22 | #define BPF_MAX_VAR_SIZ (1 << 29) |
| 23 | 23 | ||
| 24 | /* Liveness marks, used for registers and spilled-regs (in stack slots). | 24 | /* Liveness marks, used for registers and spilled-regs (in stack slots). |
| 25 | * Read marks propagate upwards until they find a write mark; they record that | 25 | * Read marks propagate upwards until they find a write mark; they record that |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 188ed9f65517..52e611ab9a6c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 220 | /* | 220 | /* |
| 221 | * Prevent the compiler from merging or refetching reads or writes. The | 221 | * Prevent the compiler from merging or refetching reads or writes. The |
| 222 | * compiler is also forbidden from reordering successive instances of | 222 | * compiler is also forbidden from reordering successive instances of |
| 223 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the | 223 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 224 | * compiler is aware of some particular ordering. One way to make the | 224 | * particular ordering. One way to make the compiler aware of ordering is to |
| 225 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | 225 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 226 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. | 226 | * statements. |
| 227 | * | 227 | * |
| 228 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | 228 | * These two macros will also work on aggregate data types like structs or |
| 229 | * data types like structs or unions. If the size of the accessed data | 229 | * unions. If the size of the accessed data type exceeds the word size of |
| 230 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | 230 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 231 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at | 231 | * fall back to memcpy(). There's at least two memcpy()s: one for the |
| 232 | * least two memcpy()s: one for the __builtin_memcpy() and then one for | 232 | * __builtin_memcpy() and then one for the macro doing the copy of variable |
| 233 | * the macro doing the copy of variable - '__u' allocated on the stack. | 233 | * - '__u' allocated on the stack. |
| 234 | * | 234 | * |
| 235 | * Their two major use cases are: (1) Mediating communication between | 235 | * Their two major use cases are: (1) Mediating communication between |
| 236 | * process-level code and irq/NMI handlers, all running on the same CPU, | 236 | * process-level code and irq/NMI handlers, all running on the same CPU, |
| 237 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | 237 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
| 238 | * mutilate accesses that either do not require ordering or that interact | 238 | * mutilate accesses that either do not require ordering or that interact |
| 239 | * with an explicit memory barrier or atomic instruction that provides the | 239 | * with an explicit memory barrier or atomic instruction that provides the |
| 240 | * required ordering. | 240 | * required ordering. |
| @@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 327 | compiletime_assert(__native_word(t), \ | 327 | compiletime_assert(__native_word(t), \ |
| 328 | "Need native word sized stores/loads for atomicity.") | 328 | "Need native word sized stores/loads for atomicity.") |
| 329 | 329 | ||
| 330 | /* | ||
| 331 | * Prevent the compiler from merging or refetching accesses. The compiler | ||
| 332 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | ||
| 333 | * but only when the compiler is aware of some particular ordering. One way | ||
| 334 | * to make the compiler aware of ordering is to put the two invocations of | ||
| 335 | * ACCESS_ONCE() in different C statements. | ||
| 336 | * | ||
| 337 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE | ||
| 338 | * on a union member will work as long as the size of the member matches the | ||
| 339 | * size of the union and the size is smaller than word size. | ||
| 340 | * | ||
| 341 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication | ||
| 342 | * between process-level code and irq/NMI handlers, all running on the same CPU, | ||
| 343 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | ||
| 344 | * mutilate accesses that either do not require ordering or that interact | ||
| 345 | * with an explicit memory barrier or atomic instruction that provides the | ||
| 346 | * required ordering. | ||
| 347 | * | ||
| 348 | * If possible use READ_ONCE()/WRITE_ONCE() instead. | ||
| 349 | */ | ||
| 350 | #define __ACCESS_ONCE(x) ({ \ | ||
| 351 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ | ||
| 352 | (volatile typeof(x) *)&(x); }) | ||
| 353 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) | ||
| 354 | |||
| 355 | #endif /* __LINUX_COMPILER_H */ | 330 | #endif /* __LINUX_COMPILER_H */ |
diff --git a/include/linux/completion.h b/include/linux/completion.h index 0662a417febe..94a59ba7d422 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
| @@ -10,9 +10,6 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/wait.h> | 12 | #include <linux/wait.h> |
| 13 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 14 | #include <linux/lockdep.h> | ||
| 15 | #endif | ||
| 16 | 13 | ||
| 17 | /* | 14 | /* |
| 18 | * struct completion - structure used to maintain state for a "completion" | 15 | * struct completion - structure used to maintain state for a "completion" |
| @@ -29,58 +26,16 @@ | |||
| 29 | struct completion { | 26 | struct completion { |
| 30 | unsigned int done; | 27 | unsigned int done; |
| 31 | wait_queue_head_t wait; | 28 | wait_queue_head_t wait; |
| 32 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 33 | struct lockdep_map_cross map; | ||
| 34 | #endif | ||
| 35 | }; | 29 | }; |
| 36 | 30 | ||
| 37 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 38 | static inline void complete_acquire(struct completion *x) | ||
| 39 | { | ||
| 40 | lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_); | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline void complete_release(struct completion *x) | ||
| 44 | { | ||
| 45 | lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_); | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline void complete_release_commit(struct completion *x) | ||
| 49 | { | ||
| 50 | lock_commit_crosslock((struct lockdep_map *)&x->map); | ||
| 51 | } | ||
| 52 | |||
| 53 | #define init_completion_map(x, m) \ | ||
| 54 | do { \ | ||
| 55 | lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ | ||
| 56 | (m)->name, (m)->key, 0); \ | ||
| 57 | __init_completion(x); \ | ||
| 58 | } while (0) | ||
| 59 | |||
| 60 | #define init_completion(x) \ | ||
| 61 | do { \ | ||
| 62 | static struct lock_class_key __key; \ | ||
| 63 | lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ | ||
| 64 | "(completion)" #x, \ | ||
| 65 | &__key, 0); \ | ||
| 66 | __init_completion(x); \ | ||
| 67 | } while (0) | ||
| 68 | #else | ||
| 69 | #define init_completion_map(x, m) __init_completion(x) | 31 | #define init_completion_map(x, m) __init_completion(x) |
| 70 | #define init_completion(x) __init_completion(x) | 32 | #define init_completion(x) __init_completion(x) |
| 71 | static inline void complete_acquire(struct completion *x) {} | 33 | static inline void complete_acquire(struct completion *x) {} |
| 72 | static inline void complete_release(struct completion *x) {} | 34 | static inline void complete_release(struct completion *x) {} |
| 73 | static inline void complete_release_commit(struct completion *x) {} | 35 | static inline void complete_release_commit(struct completion *x) {} |
| 74 | #endif | ||
| 75 | 36 | ||
| 76 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 77 | #define COMPLETION_INITIALIZER(work) \ | ||
| 78 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \ | ||
| 79 | STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) } | ||
| 80 | #else | ||
| 81 | #define COMPLETION_INITIALIZER(work) \ | 37 | #define COMPLETION_INITIALIZER(work) \ |
| 82 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } | 38 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
| 83 | #endif | ||
| 84 | 39 | ||
| 85 | #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ | 40 | #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ |
| 86 | (*({ init_completion_map(&(work), &(map)); &(work); })) | 41 | (*({ init_completion_map(&(work), &(map)); &(work); })) |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 201ab7267986..1a32e558eb11 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
| @@ -86,7 +86,7 @@ enum cpuhp_state { | |||
| 86 | CPUHP_MM_ZSWP_POOL_PREPARE, | 86 | CPUHP_MM_ZSWP_POOL_PREPARE, |
| 87 | CPUHP_KVM_PPC_BOOK3S_PREPARE, | 87 | CPUHP_KVM_PPC_BOOK3S_PREPARE, |
| 88 | CPUHP_ZCOMP_PREPARE, | 88 | CPUHP_ZCOMP_PREPARE, |
| 89 | CPUHP_TIMERS_DEAD, | 89 | CPUHP_TIMERS_PREPARE, |
| 90 | CPUHP_MIPS_SOC_PREPARE, | 90 | CPUHP_MIPS_SOC_PREPARE, |
| 91 | CPUHP_BP_PREPARE_DYN, | 91 | CPUHP_BP_PREPARE_DYN, |
| 92 | CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, | 92 | CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 099058e1178b..631286535d0f 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
| @@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *); | |||
| 83 | extern void set_groups(struct cred *, struct group_info *); | 83 | extern void set_groups(struct cred *, struct group_info *); |
| 84 | extern int groups_search(const struct group_info *, kgid_t); | 84 | extern int groups_search(const struct group_info *, kgid_t); |
| 85 | extern bool may_setgroups(void); | 85 | extern bool may_setgroups(void); |
| 86 | extern void groups_sort(struct group_info *); | ||
| 86 | 87 | ||
| 87 | /* | 88 | /* |
| 88 | * The security context of a task | 89 | * The security context of a task |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 55e672592fa9..7258cd676df4 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
| @@ -66,9 +66,10 @@ struct gpio_irq_chip { | |||
| 66 | /** | 66 | /** |
| 67 | * @lock_key: | 67 | * @lock_key: |
| 68 | * | 68 | * |
| 69 | * Per GPIO IRQ chip lockdep class. | 69 | * Per GPIO IRQ chip lockdep classes. |
| 70 | */ | 70 | */ |
| 71 | struct lock_class_key *lock_key; | 71 | struct lock_class_key *lock_key; |
| 72 | struct lock_class_key *request_key; | ||
| 72 | 73 | ||
| 73 | /** | 74 | /** |
| 74 | * @parent_handler: | 75 | * @parent_handler: |
| @@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip, | |||
| 323 | 324 | ||
| 324 | /* add/remove chips */ | 325 | /* add/remove chips */ |
| 325 | extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, | 326 | extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, |
| 326 | struct lock_class_key *lock_key); | 327 | struct lock_class_key *lock_key, |
| 328 | struct lock_class_key *request_key); | ||
| 327 | 329 | ||
| 328 | /** | 330 | /** |
| 329 | * gpiochip_add_data() - register a gpio_chip | 331 | * gpiochip_add_data() - register a gpio_chip |
| @@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, | |||
| 350 | */ | 352 | */ |
| 351 | #ifdef CONFIG_LOCKDEP | 353 | #ifdef CONFIG_LOCKDEP |
| 352 | #define gpiochip_add_data(chip, data) ({ \ | 354 | #define gpiochip_add_data(chip, data) ({ \ |
| 353 | static struct lock_class_key key; \ | 355 | static struct lock_class_key lock_key; \ |
| 354 | gpiochip_add_data_with_key(chip, data, &key); \ | 356 | static struct lock_class_key request_key; \ |
| 357 | gpiochip_add_data_with_key(chip, data, &lock_key, \ | ||
| 358 | &request_key); \ | ||
| 355 | }) | 359 | }) |
| 356 | #else | 360 | #else |
| 357 | #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL) | 361 | #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) |
| 358 | #endif | 362 | #endif |
| 359 | 363 | ||
| 360 | static inline int gpiochip_add(struct gpio_chip *chip) | 364 | static inline int gpiochip_add(struct gpio_chip *chip) |
| @@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, | |||
| 429 | irq_flow_handler_t handler, | 433 | irq_flow_handler_t handler, |
| 430 | unsigned int type, | 434 | unsigned int type, |
| 431 | bool threaded, | 435 | bool threaded, |
| 432 | struct lock_class_key *lock_key); | 436 | struct lock_class_key *lock_key, |
| 437 | struct lock_class_key *request_key); | ||
| 433 | 438 | ||
| 434 | #ifdef CONFIG_LOCKDEP | 439 | #ifdef CONFIG_LOCKDEP |
| 435 | 440 | ||
| @@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, | |||
| 445 | irq_flow_handler_t handler, | 450 | irq_flow_handler_t handler, |
| 446 | unsigned int type) | 451 | unsigned int type) |
| 447 | { | 452 | { |
| 448 | static struct lock_class_key key; | 453 | static struct lock_class_key lock_key; |
| 454 | static struct lock_class_key request_key; | ||
| 449 | 455 | ||
| 450 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, | 456 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, |
| 451 | handler, type, false, &key); | 457 | handler, type, false, |
| 458 | &lock_key, &request_key); | ||
| 452 | } | 459 | } |
| 453 | 460 | ||
| 454 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, | 461 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, |
| @@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, | |||
| 458 | unsigned int type) | 465 | unsigned int type) |
| 459 | { | 466 | { |
| 460 | 467 | ||
| 461 | static struct lock_class_key key; | 468 | static struct lock_class_key lock_key; |
| 469 | static struct lock_class_key request_key; | ||
| 462 | 470 | ||
| 463 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, | 471 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, |
| 464 | handler, type, true, &key); | 472 | handler, type, true, |
| 473 | &lock_key, &request_key); | ||
| 465 | } | 474 | } |
| 466 | #else | 475 | #else |
| 467 | static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, | 476 | static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, |
| @@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, | |||
| 471 | unsigned int type) | 480 | unsigned int type) |
| 472 | { | 481 | { |
| 473 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, | 482 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, |
| 474 | handler, type, false, NULL); | 483 | handler, type, false, NULL, NULL); |
| 475 | } | 484 | } |
| 476 | 485 | ||
| 477 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, | 486 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, |
| @@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, | |||
| 481 | unsigned int type) | 490 | unsigned int type) |
| 482 | { | 491 | { |
| 483 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, | 492 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, |
| 484 | handler, type, true, NULL); | 493 | handler, type, true, NULL, NULL); |
| 485 | } | 494 | } |
| 486 | #endif /* CONFIG_LOCKDEP */ | 495 | #endif /* CONFIG_LOCKDEP */ |
| 487 | 496 | ||
diff --git a/include/linux/idr.h b/include/linux/idr.h index 7c3a365f7e12..fa14f834e4ed 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/radix-tree.h> | 15 | #include <linux/radix-tree.h> |
| 16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
| 17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
| 18 | #include <linux/bug.h> | ||
| 18 | 19 | ||
| 19 | struct idr { | 20 | struct idr { |
| 20 | struct radix_tree_root idr_rt; | 21 | struct radix_tree_root idr_rt; |
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h new file mode 100644 index 000000000000..2710d72de3c9 --- /dev/null +++ b/include/linux/intel-pti.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) Intel 2011 | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 14 | * | ||
| 15 | * The PTI (Parallel Trace Interface) driver directs trace data routed from | ||
| 16 | * various parts in the system out through the Intel Penwell PTI port and | ||
| 17 | * out of the mobile device for analysis with a debugging tool | ||
| 18 | * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, | ||
| 19 | * compact JTAG, standard. | ||
| 20 | * | ||
| 21 | * This header file will allow other parts of the OS to use the | ||
| 22 | * interface to write out it's contents for debugging a mobile system. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef LINUX_INTEL_PTI_H_ | ||
| 26 | #define LINUX_INTEL_PTI_H_ | ||
| 27 | |||
| 28 | /* offset for last dword of any PTI message. Part of MIPI P1149.7 */ | ||
| 29 | #define PTI_LASTDWORD_DTS 0x30 | ||
| 30 | |||
| 31 | /* basic structure used as a write address to the PTI HW */ | ||
| 32 | struct pti_masterchannel { | ||
| 33 | u8 master; | ||
| 34 | u8 channel; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* the following functions are defined in misc/pti.c */ | ||
| 38 | void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); | ||
| 39 | struct pti_masterchannel *pti_request_masterchannel(u8 type, | ||
| 40 | const char *thread_name); | ||
| 41 | void pti_release_masterchannel(struct pti_masterchannel *mc); | ||
| 42 | |||
| 43 | #endif /* LINUX_INTEL_PTI_H_ */ | ||
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index cb18c6290ca8..8415bf1a9776 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -273,7 +273,8 @@ struct ipv6_pinfo { | |||
| 273 | * 100: prefer care-of address | 273 | * 100: prefer care-of address |
| 274 | */ | 274 | */ |
| 275 | dontfrag:1, | 275 | dontfrag:1, |
| 276 | autoflowlabel:1; | 276 | autoflowlabel:1, |
| 277 | autoflowlabel_set:1; | ||
| 277 | __u8 min_hopcount; | 278 | __u8 min_hopcount; |
| 278 | __u8 tclass; | 279 | __u8 tclass; |
| 279 | __be32 rcv_flowinfo; | 280 | __be32 rcv_flowinfo; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index e140f69163b6..a0231e96a578 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -212,6 +212,7 @@ struct irq_data { | |||
| 212 | * mask. Applies only to affinity managed irqs. | 212 | * mask. Applies only to affinity managed irqs. |
| 213 | * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target | 213 | * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target |
| 214 | * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set | 214 | * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set |
| 215 | * IRQD_CAN_RESERVE - Can use reservation mode | ||
| 215 | */ | 216 | */ |
| 216 | enum { | 217 | enum { |
| 217 | IRQD_TRIGGER_MASK = 0xf, | 218 | IRQD_TRIGGER_MASK = 0xf, |
| @@ -233,6 +234,7 @@ enum { | |||
| 233 | IRQD_MANAGED_SHUTDOWN = (1 << 23), | 234 | IRQD_MANAGED_SHUTDOWN = (1 << 23), |
| 234 | IRQD_SINGLE_TARGET = (1 << 24), | 235 | IRQD_SINGLE_TARGET = (1 << 24), |
| 235 | IRQD_DEFAULT_TRIGGER_SET = (1 << 25), | 236 | IRQD_DEFAULT_TRIGGER_SET = (1 << 25), |
| 237 | IRQD_CAN_RESERVE = (1 << 26), | ||
| 236 | }; | 238 | }; |
| 237 | 239 | ||
| 238 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) | 240 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
| @@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) | |||
| 377 | return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; | 379 | return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; |
| 378 | } | 380 | } |
| 379 | 381 | ||
| 382 | static inline void irqd_set_can_reserve(struct irq_data *d) | ||
| 383 | { | ||
| 384 | __irqd_to_state(d) |= IRQD_CAN_RESERVE; | ||
| 385 | } | ||
| 386 | |||
| 387 | static inline void irqd_clr_can_reserve(struct irq_data *d) | ||
| 388 | { | ||
| 389 | __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; | ||
| 390 | } | ||
| 391 | |||
| 392 | static inline bool irqd_can_reserve(struct irq_data *d) | ||
| 393 | { | ||
| 394 | return __irqd_to_state(d) & IRQD_CAN_RESERVE; | ||
| 395 | } | ||
| 396 | |||
| 380 | #undef __irqd_to_state | 397 | #undef __irqd_to_state |
| 381 | 398 | ||
| 382 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 399 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 39fb3700f7a9..25b33b664537 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
| @@ -255,12 +255,15 @@ static inline bool irq_is_percpu_devid(unsigned int irq) | |||
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | static inline void | 257 | static inline void |
| 258 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) | 258 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, |
| 259 | struct lock_class_key *request_class) | ||
| 259 | { | 260 | { |
| 260 | struct irq_desc *desc = irq_to_desc(irq); | 261 | struct irq_desc *desc = irq_to_desc(irq); |
| 261 | 262 | ||
| 262 | if (desc) | 263 | if (desc) { |
| 263 | lockdep_set_class(&desc->lock, class); | 264 | lockdep_set_class(&desc->lock, lock_class); |
| 265 | lockdep_set_class(&desc->request_mutex, request_class); | ||
| 266 | } | ||
| 264 | } | 267 | } |
| 265 | 268 | ||
| 266 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | 269 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index a34355d19546..48c7e86bb556 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
| @@ -113,7 +113,7 @@ struct irq_domain_ops { | |||
| 113 | unsigned int nr_irqs, void *arg); | 113 | unsigned int nr_irqs, void *arg); |
| 114 | void (*free)(struct irq_domain *d, unsigned int virq, | 114 | void (*free)(struct irq_domain *d, unsigned int virq, |
| 115 | unsigned int nr_irqs); | 115 | unsigned int nr_irqs); |
| 116 | int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); | 116 | int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); |
| 117 | void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); | 117 | void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); |
| 118 | int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, | 118 | int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, |
| 119 | unsigned long *out_hwirq, unsigned int *out_type); | 119 | unsigned long *out_hwirq, unsigned int *out_type); |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index a842551fe044..2e75dc34bff5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -158,12 +158,6 @@ struct lockdep_map { | |||
| 158 | int cpu; | 158 | int cpu; |
| 159 | unsigned long ip; | 159 | unsigned long ip; |
| 160 | #endif | 160 | #endif |
| 161 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 162 | /* | ||
| 163 | * Whether it's a crosslock. | ||
| 164 | */ | ||
| 165 | int cross; | ||
| 166 | #endif | ||
| 167 | }; | 161 | }; |
| 168 | 162 | ||
| 169 | static inline void lockdep_copy_map(struct lockdep_map *to, | 163 | static inline void lockdep_copy_map(struct lockdep_map *to, |
| @@ -267,96 +261,9 @@ struct held_lock { | |||
| 267 | unsigned int hardirqs_off:1; | 261 | unsigned int hardirqs_off:1; |
| 268 | unsigned int references:12; /* 32 bits */ | 262 | unsigned int references:12; /* 32 bits */ |
| 269 | unsigned int pin_count; | 263 | unsigned int pin_count; |
| 270 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 271 | /* | ||
| 272 | * Generation id. | ||
| 273 | * | ||
| 274 | * A value of cross_gen_id will be stored when holding this, | ||
| 275 | * which is globally increased whenever each crosslock is held. | ||
| 276 | */ | ||
| 277 | unsigned int gen_id; | ||
| 278 | #endif | ||
| 279 | }; | ||
| 280 | |||
| 281 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 282 | #define MAX_XHLOCK_TRACE_ENTRIES 5 | ||
| 283 | |||
| 284 | /* | ||
| 285 | * This is for keeping locks waiting for commit so that true dependencies | ||
| 286 | * can be added at commit step. | ||
| 287 | */ | ||
| 288 | struct hist_lock { | ||
| 289 | /* | ||
| 290 | * Id for each entry in the ring buffer. This is used to | ||
| 291 | * decide whether the ring buffer was overwritten or not. | ||
| 292 | * | ||
| 293 | * For example, | ||
| 294 | * | ||
| 295 | * |<----------- hist_lock ring buffer size ------->| | ||
| 296 | * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii | ||
| 297 | * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii....................... | ||
| 298 | * | ||
| 299 | * where 'p' represents an acquisition in process | ||
| 300 | * context, 'i' represents an acquisition in irq | ||
| 301 | * context. | ||
| 302 | * | ||
| 303 | * In this example, the ring buffer was overwritten by | ||
| 304 | * acquisitions in irq context, that should be detected on | ||
| 305 | * rollback or commit. | ||
| 306 | */ | ||
| 307 | unsigned int hist_id; | ||
| 308 | |||
| 309 | /* | ||
| 310 | * Seperate stack_trace data. This will be used at commit step. | ||
| 311 | */ | ||
| 312 | struct stack_trace trace; | ||
| 313 | unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES]; | ||
| 314 | |||
| 315 | /* | ||
| 316 | * Seperate hlock instance. This will be used at commit step. | ||
| 317 | * | ||
| 318 | * TODO: Use a smaller data structure containing only necessary | ||
| 319 | * data. However, we should make lockdep code able to handle the | ||
| 320 | * smaller one first. | ||
| 321 | */ | ||
| 322 | struct held_lock hlock; | ||
| 323 | }; | 264 | }; |
| 324 | 265 | ||
| 325 | /* | 266 | /* |
| 326 | * To initialize a lock as crosslock, lockdep_init_map_crosslock() should | ||
| 327 | * be called instead of lockdep_init_map(). | ||
| 328 | */ | ||
| 329 | struct cross_lock { | ||
| 330 | /* | ||
| 331 | * When more than one acquisition of crosslocks are overlapped, | ||
| 332 | * we have to perform commit for them based on cross_gen_id of | ||
| 333 | * the first acquisition, which allows us to add more true | ||
| 334 | * dependencies. | ||
| 335 | * | ||
| 336 | * Moreover, when no acquisition of a crosslock is in progress, | ||
| 337 | * we should not perform commit because the lock might not exist | ||
| 338 | * any more, which might cause incorrect memory access. So we | ||
| 339 | * have to track the number of acquisitions of a crosslock. | ||
| 340 | */ | ||
| 341 | int nr_acquire; | ||
| 342 | |||
| 343 | /* | ||
| 344 | * Seperate hlock instance. This will be used at commit step. | ||
| 345 | * | ||
| 346 | * TODO: Use a smaller data structure containing only necessary | ||
| 347 | * data. However, we should make lockdep code able to handle the | ||
| 348 | * smaller one first. | ||
| 349 | */ | ||
| 350 | struct held_lock hlock; | ||
| 351 | }; | ||
| 352 | |||
| 353 | struct lockdep_map_cross { | ||
| 354 | struct lockdep_map map; | ||
| 355 | struct cross_lock xlock; | ||
| 356 | }; | ||
| 357 | #endif | ||
| 358 | |||
| 359 | /* | ||
| 360 | * Initialization, self-test and debugging-output methods: | 267 | * Initialization, self-test and debugging-output methods: |
| 361 | */ | 268 | */ |
| 362 | extern void lockdep_info(void); | 269 | extern void lockdep_info(void); |
| @@ -560,37 +467,6 @@ enum xhlock_context_t { | |||
| 560 | XHLOCK_CTX_NR, | 467 | XHLOCK_CTX_NR, |
| 561 | }; | 468 | }; |
| 562 | 469 | ||
| 563 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 564 | extern void lockdep_init_map_crosslock(struct lockdep_map *lock, | ||
| 565 | const char *name, | ||
| 566 | struct lock_class_key *key, | ||
| 567 | int subclass); | ||
| 568 | extern void lock_commit_crosslock(struct lockdep_map *lock); | ||
| 569 | |||
| 570 | /* | ||
| 571 | * What we essencially have to initialize is 'nr_acquire'. Other members | ||
| 572 | * will be initialized in add_xlock(). | ||
| 573 | */ | ||
| 574 | #define STATIC_CROSS_LOCK_INIT() \ | ||
| 575 | { .nr_acquire = 0,} | ||
| 576 | |||
| 577 | #define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \ | ||
| 578 | { .map.name = (_name), .map.key = (void *)(_key), \ | ||
| 579 | .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), } | ||
| 580 | |||
| 581 | /* | ||
| 582 | * To initialize a lockdep_map statically use this macro. | ||
| 583 | * Note that _name must not be NULL. | ||
| 584 | */ | ||
| 585 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | ||
| 586 | { .name = (_name), .key = (void *)(_key), .cross = 0, } | ||
| 587 | |||
| 588 | extern void crossrelease_hist_start(enum xhlock_context_t c); | ||
| 589 | extern void crossrelease_hist_end(enum xhlock_context_t c); | ||
| 590 | extern void lockdep_invariant_state(bool force); | ||
| 591 | extern void lockdep_init_task(struct task_struct *task); | ||
| 592 | extern void lockdep_free_task(struct task_struct *task); | ||
| 593 | #else /* !CROSSRELEASE */ | ||
| 594 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) | 470 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) |
| 595 | /* | 471 | /* |
| 596 | * To initialize a lockdep_map statically use this macro. | 472 | * To initialize a lockdep_map statically use this macro. |
| @@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {} | |||
| 604 | static inline void lockdep_invariant_state(bool force) {} | 480 | static inline void lockdep_invariant_state(bool force) {} |
| 605 | static inline void lockdep_init_task(struct task_struct *task) {} | 481 | static inline void lockdep_init_task(struct task_struct *task) {} |
| 606 | static inline void lockdep_free_task(struct task_struct *task) {} | 482 | static inline void lockdep_free_task(struct task_struct *task) {} |
| 607 | #endif /* CROSSRELEASE */ | ||
| 608 | 483 | ||
| 609 | #ifdef CONFIG_LOCK_STAT | 484 | #ifdef CONFIG_LOCK_STAT |
| 610 | 485 | ||
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index a2a1318a3d0c..c3d3f04d8cc6 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h | |||
| @@ -915,10 +915,10 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; | |||
| 915 | #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6) | 915 | #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6) |
| 916 | 916 | ||
| 917 | enum dev_aspm_mode { | 917 | enum dev_aspm_mode { |
| 918 | DEV_ASPM_DISABLE = 0, | ||
| 919 | DEV_ASPM_DYNAMIC, | 918 | DEV_ASPM_DYNAMIC, |
| 920 | DEV_ASPM_BACKDOOR, | 919 | DEV_ASPM_BACKDOOR, |
| 921 | DEV_ASPM_STATIC, | 920 | DEV_ASPM_STATIC, |
| 921 | DEV_ASPM_DISABLE, | ||
| 922 | }; | 922 | }; |
| 923 | 923 | ||
| 924 | /* | 924 | /* |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a886b51511ab..1f509d072026 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -556,6 +556,7 @@ struct mlx5_core_sriov { | |||
| 556 | }; | 556 | }; |
| 557 | 557 | ||
| 558 | struct mlx5_irq_info { | 558 | struct mlx5_irq_info { |
| 559 | cpumask_var_t mask; | ||
| 559 | char name[MLX5_MAX_IRQ_NAME]; | 560 | char name[MLX5_MAX_IRQ_NAME]; |
| 560 | }; | 561 | }; |
| 561 | 562 | ||
| @@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | |||
| 1048 | enum mlx5_eq_type type); | 1049 | enum mlx5_eq_type type); |
| 1049 | int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); | 1050 | int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); |
| 1050 | int mlx5_start_eqs(struct mlx5_core_dev *dev); | 1051 | int mlx5_start_eqs(struct mlx5_core_dev *dev); |
| 1051 | int mlx5_stop_eqs(struct mlx5_core_dev *dev); | 1052 | void mlx5_stop_eqs(struct mlx5_core_dev *dev); |
| 1052 | int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, | 1053 | int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, |
| 1053 | unsigned int *irqn); | 1054 | unsigned int *irqn); |
| 1054 | int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); | 1055 | int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
| @@ -1164,6 +1165,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); | |||
| 1164 | int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); | 1165 | int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); |
| 1165 | bool mlx5_lag_is_active(struct mlx5_core_dev *dev); | 1166 | bool mlx5_lag_is_active(struct mlx5_core_dev *dev); |
| 1166 | struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); | 1167 | struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); |
| 1168 | int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, | ||
| 1169 | u64 *values, | ||
| 1170 | int num_counters, | ||
| 1171 | size_t *offsets); | ||
| 1167 | struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); | 1172 | struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); |
| 1168 | void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); | 1173 | void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); |
| 1169 | 1174 | ||
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 38a7577a9ce7..d44ec5f41d4a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -147,7 +147,7 @@ enum { | |||
| 147 | MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, | 147 | MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, |
| 148 | MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, | 148 | MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, |
| 149 | MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, | 149 | MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, |
| 150 | MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, | 150 | MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, |
| 151 | MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, | 151 | MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, |
| 152 | MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, | 152 | MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, |
| 153 | MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, | 153 | MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, |
| @@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits { | |||
| 7239 | u8 vxlan_udp_port[0x10]; | 7239 | u8 vxlan_udp_port[0x10]; |
| 7240 | }; | 7240 | }; |
| 7241 | 7241 | ||
| 7242 | struct mlx5_ifc_set_rate_limit_out_bits { | 7242 | struct mlx5_ifc_set_pp_rate_limit_out_bits { |
| 7243 | u8 status[0x8]; | 7243 | u8 status[0x8]; |
| 7244 | u8 reserved_at_8[0x18]; | 7244 | u8 reserved_at_8[0x18]; |
| 7245 | 7245 | ||
| @@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits { | |||
| 7248 | u8 reserved_at_40[0x40]; | 7248 | u8 reserved_at_40[0x40]; |
| 7249 | }; | 7249 | }; |
| 7250 | 7250 | ||
| 7251 | struct mlx5_ifc_set_rate_limit_in_bits { | 7251 | struct mlx5_ifc_set_pp_rate_limit_in_bits { |
| 7252 | u8 opcode[0x10]; | 7252 | u8 opcode[0x10]; |
| 7253 | u8 reserved_at_10[0x10]; | 7253 | u8 reserved_at_10[0x10]; |
| 7254 | 7254 | ||
| @@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits { | |||
| 7261 | u8 reserved_at_60[0x20]; | 7261 | u8 reserved_at_60[0x20]; |
| 7262 | 7262 | ||
| 7263 | u8 rate_limit[0x20]; | 7263 | u8 rate_limit[0x20]; |
| 7264 | |||
| 7265 | u8 reserved_at_a0[0x160]; | ||
| 7264 | }; | 7266 | }; |
| 7265 | 7267 | ||
| 7266 | struct mlx5_ifc_access_register_out_bits { | 7268 | struct mlx5_ifc_access_register_out_bits { |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 01c91d874a57..5bad038ac012 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
| @@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) | |||
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | /* | 69 | /* |
| 70 | * Use this helper if tsk->mm != mm and the victim mm needs a special | ||
| 71 | * handling. This is guaranteed to stay true after once set. | ||
| 72 | */ | ||
| 73 | static inline bool mm_is_oom_victim(struct mm_struct *mm) | ||
| 74 | { | ||
| 75 | return test_bit(MMF_OOM_VICTIM, &mm->flags); | ||
| 76 | } | ||
| 77 | |||
| 78 | /* | ||
| 70 | * Checks whether a page fault on the given mm is still reliable. | 79 | * Checks whether a page fault on the given mm is still reliable. |
| 71 | * This is no longer true if the oom reaper started to reap the | 80 | * This is no longer true if the oom reaper started to reap the |
| 72 | * address space which is reflected by MMF_UNSTABLE flag set in | 81 | * address space which is reflected by MMF_UNSTABLE flag set in |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 0403894147a3..c170c9250c8b 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, | |||
| 1674 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | 1674 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, |
| 1675 | unsigned int devfn) | 1675 | unsigned int devfn) |
| 1676 | { return NULL; } | 1676 | { return NULL; } |
| 1677 | static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, | ||
| 1678 | unsigned int bus, unsigned int devfn) | ||
| 1679 | { return NULL; } | ||
| 1677 | 1680 | ||
| 1678 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } | 1681 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } |
| 1679 | static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } | 1682 | static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 65d39115f06d..492ed473ba7e 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev); | |||
| 765 | extern int pm_generic_poweroff(struct device *dev); | 765 | extern int pm_generic_poweroff(struct device *dev); |
| 766 | extern void pm_generic_complete(struct device *dev); | 766 | extern void pm_generic_complete(struct device *dev); |
| 767 | 767 | ||
| 768 | extern void dev_pm_skip_next_resume_phases(struct device *dev); | ||
| 768 | extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); | 769 | extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); |
| 769 | 770 | ||
| 770 | #else /* !CONFIG_PM_SLEEP */ | 771 | #else /* !CONFIG_PM_SLEEP */ |
diff --git a/include/linux/pti.h b/include/linux/pti.h index b3ea01a3197e..0174883a935a 100644 --- a/include/linux/pti.h +++ b/include/linux/pti.h | |||
| @@ -1,43 +1,11 @@ | |||
| 1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | * Copyright (C) Intel 2011 | 2 | #ifndef _INCLUDE_PTI_H |
| 3 | * | 3 | #define _INCLUDE_PTI_H |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 14 | * | ||
| 15 | * The PTI (Parallel Trace Interface) driver directs trace data routed from | ||
| 16 | * various parts in the system out through the Intel Penwell PTI port and | ||
| 17 | * out of the mobile device for analysis with a debugging tool | ||
| 18 | * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, | ||
| 19 | * compact JTAG, standard. | ||
| 20 | * | ||
| 21 | * This header file will allow other parts of the OS to use the | ||
| 22 | * interface to write out it's contents for debugging a mobile system. | ||
| 23 | */ | ||
| 24 | 4 | ||
| 25 | #ifndef PTI_H_ | 5 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 26 | #define PTI_H_ | 6 | #include <asm/pti.h> |
| 7 | #else | ||
| 8 | static inline void pti_init(void) { } | ||
| 9 | #endif | ||
| 27 | 10 | ||
| 28 | /* offset for last dword of any PTI message. Part of MIPI P1149.7 */ | 11 | #endif |
| 29 | #define PTI_LASTDWORD_DTS 0x30 | ||
| 30 | |||
| 31 | /* basic structure used as a write address to the PTI HW */ | ||
| 32 | struct pti_masterchannel { | ||
| 33 | u8 master; | ||
| 34 | u8 channel; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* the following functions are defined in misc/pti.c */ | ||
| 38 | void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); | ||
| 39 | struct pti_masterchannel *pti_request_masterchannel(u8 type, | ||
| 40 | const char *thread_name); | ||
| 41 | void pti_release_masterchannel(struct pti_masterchannel *mc); | ||
| 42 | |||
| 43 | #endif /*PTI_H_*/ | ||
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 37b4bb2545b3..6866df4f31b5 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h | |||
| @@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) | |||
| 101 | 101 | ||
| 102 | /* Note: callers invoking this in a loop must use a compiler barrier, | 102 | /* Note: callers invoking this in a loop must use a compiler barrier, |
| 103 | * for example cpu_relax(). Callers must hold producer_lock. | 103 | * for example cpu_relax(). Callers must hold producer_lock. |
| 104 | * Callers are responsible for making sure pointer that is being queued | ||
| 105 | * points to a valid data. | ||
| 104 | */ | 106 | */ |
| 105 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) | 107 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) |
| 106 | { | 108 | { |
| 107 | if (unlikely(!r->size) || r->queue[r->producer]) | 109 | if (unlikely(!r->size) || r->queue[r->producer]) |
| 108 | return -ENOSPC; | 110 | return -ENOSPC; |
| 109 | 111 | ||
| 112 | /* Make sure the pointer we are storing points to a valid data. */ | ||
| 113 | /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ | ||
| 114 | smp_wmb(); | ||
| 115 | |||
| 110 | r->queue[r->producer++] = ptr; | 116 | r->queue[r->producer++] = ptr; |
| 111 | if (unlikely(r->producer >= r->size)) | 117 | if (unlikely(r->producer >= r->size)) |
| 112 | r->producer = 0; | 118 | r->producer = 0; |
| @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) | |||
| 275 | if (ptr) | 281 | if (ptr) |
| 276 | __ptr_ring_discard_one(r); | 282 | __ptr_ring_discard_one(r); |
| 277 | 283 | ||
| 284 | /* Make sure anyone accessing data through the pointer is up to date. */ | ||
| 285 | /* Pairs with smp_wmb in __ptr_ring_produce. */ | ||
| 286 | smp_read_barrier_depends(); | ||
| 278 | return ptr; | 287 | return ptr; |
| 279 | } | 288 | } |
| 280 | 289 | ||
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d574361943ea..fcbeed4053ef 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
| @@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
| 99 | struct rb_root *root); | 99 | struct rb_root *root); |
| 100 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | 100 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
| 101 | struct rb_root *root); | 101 | struct rb_root *root); |
| 102 | extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, | ||
| 103 | struct rb_root_cached *root); | ||
| 102 | 104 | ||
| 103 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, | 105 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
| 104 | struct rb_node **rb_link) | 106 | struct rb_node **rb_link) |
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index cc0072e93e36..857a72ceb794 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h | |||
| @@ -10,9 +10,6 @@ | |||
| 10 | */ | 10 | */ |
| 11 | typedef struct { | 11 | typedef struct { |
| 12 | arch_rwlock_t raw_lock; | 12 | arch_rwlock_t raw_lock; |
| 13 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
| 14 | unsigned int break_lock; | ||
| 15 | #endif | ||
| 16 | #ifdef CONFIG_DEBUG_SPINLOCK | 13 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 17 | unsigned int magic, owner_cpu; | 14 | unsigned int magic, owner_cpu; |
| 18 | void *owner; | 15 | void *owner; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 21991d668d35..d2588263a989 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -849,17 +849,6 @@ struct task_struct { | |||
| 849 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 849 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
| 850 | #endif | 850 | #endif |
| 851 | 851 | ||
| 852 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 853 | #define MAX_XHLOCKS_NR 64UL | ||
| 854 | struct hist_lock *xhlocks; /* Crossrelease history locks */ | ||
| 855 | unsigned int xhlock_idx; | ||
| 856 | /* For restoring at history boundaries */ | ||
| 857 | unsigned int xhlock_idx_hist[XHLOCK_CTX_NR]; | ||
| 858 | unsigned int hist_id; | ||
| 859 | /* For overwrite check at each context exit */ | ||
| 860 | unsigned int hist_id_save[XHLOCK_CTX_NR]; | ||
| 861 | #endif | ||
| 862 | |||
| 863 | #ifdef CONFIG_UBSAN | 852 | #ifdef CONFIG_UBSAN |
| 864 | unsigned int in_ubsan; | 853 | unsigned int in_ubsan; |
| 865 | #endif | 854 | #endif |
| @@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from) | |||
| 1503 | __set_task_comm(tsk, from, false); | 1492 | __set_task_comm(tsk, from, false); |
| 1504 | } | 1493 | } |
| 1505 | 1494 | ||
| 1506 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 1495 | extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); |
| 1496 | #define get_task_comm(buf, tsk) ({ \ | ||
| 1497 | BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ | ||
| 1498 | __get_task_comm(buf, sizeof(buf), tsk); \ | ||
| 1499 | }) | ||
| 1507 | 1500 | ||
| 1508 | #ifdef CONFIG_SMP | 1501 | #ifdef CONFIG_SMP |
| 1509 | void scheduler_ipi(void); | 1502 | void scheduler_ipi(void); |
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 9c8847395b5e..ec912d01126f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h | |||
| @@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
| 70 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ | 70 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ |
| 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ | 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ |
| 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ | 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ |
| 73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ | ||
| 73 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) | 74 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) |
| 74 | 75 | ||
| 75 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ | 76 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 7b2170bfd6e7..bc6bb325d1bf 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
| @@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, | |||
| 126 | * for that name. This appears in the sysfs "modalias" attribute | 126 | * for that name. This appears in the sysfs "modalias" attribute |
| 127 | * for driver coldplugging, and in uevents used for hotplugging | 127 | * for driver coldplugging, and in uevents used for hotplugging |
| 128 | * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when | 128 | * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when |
| 129 | * when not using a GPIO line) | 129 | * not using a GPIO line) |
| 130 | * | 130 | * |
| 131 | * @statistics: statistics for the spi_device | 131 | * @statistics: statistics for the spi_device |
| 132 | * | 132 | * |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a39186194cd6..3bf273538840 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -107,16 +107,11 @@ do { \ | |||
| 107 | 107 | ||
| 108 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) | 108 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
| 109 | 109 | ||
| 110 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
| 111 | #define raw_spin_is_contended(lock) ((lock)->break_lock) | ||
| 112 | #else | ||
| 113 | |||
| 114 | #ifdef arch_spin_is_contended | 110 | #ifdef arch_spin_is_contended |
| 115 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) | 111 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
| 116 | #else | 112 | #else |
| 117 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) | 113 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
| 118 | #endif /*arch_spin_is_contended*/ | 114 | #endif /*arch_spin_is_contended*/ |
| 119 | #endif | ||
| 120 | 115 | ||
| 121 | /* | 116 | /* |
| 122 | * This barrier must provide two things: | 117 | * This barrier must provide two things: |
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 73548eb13a5d..24b4e6f2c1a2 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h | |||
| @@ -19,9 +19,6 @@ | |||
| 19 | 19 | ||
| 20 | typedef struct raw_spinlock { | 20 | typedef struct raw_spinlock { |
| 21 | arch_spinlock_t raw_lock; | 21 | arch_spinlock_t raw_lock; |
| 22 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
| 23 | unsigned int break_lock; | ||
| 24 | #endif | ||
| 25 | #ifdef CONFIG_DEBUG_SPINLOCK | 22 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 26 | unsigned int magic, owner_cpu; | 23 | unsigned int magic, owner_cpu; |
| 27 | void *owner; | 24 | void *owner; |
diff --git a/include/linux/string.h b/include/linux/string.h index 410ecf17de3c..cfd83eb2f926 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) | |||
| 259 | { | 259 | { |
| 260 | __kernel_size_t ret; | 260 | __kernel_size_t ret; |
| 261 | size_t p_size = __builtin_object_size(p, 0); | 261 | size_t p_size = __builtin_object_size(p, 0); |
| 262 | if (p_size == (size_t)-1) | 262 | |
| 263 | /* Work around gcc excess stack consumption issue */ | ||
| 264 | if (p_size == (size_t)-1 || | ||
| 265 | (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) | ||
| 263 | return __builtin_strlen(p); | 266 | return __builtin_strlen(p); |
| 264 | ret = strnlen(p, p_size); | 267 | ret = strnlen(p, p_size); |
| 265 | if (p_size <= ret) | 268 | if (p_size <= ret) |
diff --git a/include/linux/tick.h b/include/linux/tick.h index f442d1a42025..7cc35921218e 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void); | |||
| 119 | extern void tick_nohz_irq_exit(void); | 119 | extern void tick_nohz_irq_exit(void); |
| 120 | extern ktime_t tick_nohz_get_sleep_length(void); | 120 | extern ktime_t tick_nohz_get_sleep_length(void); |
| 121 | extern unsigned long tick_nohz_get_idle_calls(void); | 121 | extern unsigned long tick_nohz_get_idle_calls(void); |
| 122 | extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); | ||
| 122 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | 123 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); |
| 123 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); | 124 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); |
| 124 | #else /* !CONFIG_NO_HZ_COMMON */ | 125 | #else /* !CONFIG_NO_HZ_COMMON */ |
diff --git a/include/linux/timer.h b/include/linux/timer.h index 04af640ea95b..2448f9cc48a3 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
| @@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j); | |||
| 207 | unsigned long round_jiffies_up_relative(unsigned long j); | 207 | unsigned long round_jiffies_up_relative(unsigned long j); |
| 208 | 208 | ||
| 209 | #ifdef CONFIG_HOTPLUG_CPU | 209 | #ifdef CONFIG_HOTPLUG_CPU |
| 210 | int timers_prepare_cpu(unsigned int cpu); | ||
| 210 | int timers_dead_cpu(unsigned int cpu); | 211 | int timers_dead_cpu(unsigned int cpu); |
| 211 | #else | 212 | #else |
| 212 | #define timers_dead_cpu NULL | 213 | #define timers_prepare_cpu NULL |
| 214 | #define timers_dead_cpu NULL | ||
| 213 | #endif | 215 | #endif |
| 214 | 216 | ||
| 215 | #endif | 217 | #endif |
diff --git a/include/linux/trace.h b/include/linux/trace.h index d24991c1fef3..b95ffb2188ab 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | */ | 18 | */ |
| 19 | struct trace_export { | 19 | struct trace_export { |
| 20 | struct trace_export __rcu *next; | 20 | struct trace_export __rcu *next; |
| 21 | void (*write)(const void *, unsigned int); | 21 | void (*write)(struct trace_export *, const void *, unsigned int); |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | int register_ftrace_export(struct trace_export *export); | 24 | int register_ftrace_export(struct trace_export *export); |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 8b8118a7fadb..cb4d92b79cd9 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
| @@ -3226,7 +3226,6 @@ struct cfg80211_ops { | |||
| 3226 | * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. | 3226 | * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. |
| 3227 | * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing | 3227 | * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing |
| 3228 | * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH. | 3228 | * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH. |
| 3229 | * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans. | ||
| 3230 | * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the | 3229 | * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the |
| 3231 | * firmware. | 3230 | * firmware. |
| 3232 | * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP. | 3231 | * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP. |
diff --git a/include/net/gue.h b/include/net/gue.h index 2fdb29ca74c2..fdad41469b65 100644 --- a/include/net/gue.h +++ b/include/net/gue.h | |||
| @@ -44,10 +44,10 @@ struct guehdr { | |||
| 44 | #else | 44 | #else |
| 45 | #error "Please fix <asm/byteorder.h>" | 45 | #error "Please fix <asm/byteorder.h>" |
| 46 | #endif | 46 | #endif |
| 47 | __u8 proto_ctype; | 47 | __u8 proto_ctype; |
| 48 | __u16 flags; | 48 | __be16 flags; |
| 49 | }; | 49 | }; |
| 50 | __u32 word; | 50 | __be32 word; |
| 51 | }; | 51 | }; |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| @@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags) | |||
| 84 | * if there is an unknown standard or private flags, or the options length for | 84 | * if there is an unknown standard or private flags, or the options length for |
| 85 | * the flags exceeds the options length specific in hlen of the GUE header. | 85 | * the flags exceeds the options length specific in hlen of the GUE header. |
| 86 | */ | 86 | */ |
| 87 | static inline int validate_gue_flags(struct guehdr *guehdr, | 87 | static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen) |
| 88 | size_t optlen) | ||
| 89 | { | 88 | { |
| 89 | __be16 flags = guehdr->flags; | ||
| 90 | size_t len; | 90 | size_t len; |
| 91 | __be32 flags = guehdr->flags; | ||
| 92 | 91 | ||
| 93 | if (flags & ~GUE_FLAGS_ALL) | 92 | if (flags & ~GUE_FLAGS_ALL) |
| 94 | return 1; | 93 | return 1; |
| @@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr, | |||
| 101 | /* Private flags are last four bytes accounted in | 100 | /* Private flags are last four bytes accounted in |
| 102 | * guehdr_flags_len | 101 | * guehdr_flags_len |
| 103 | */ | 102 | */ |
| 104 | flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); | 103 | __be32 pflags = *(__be32 *)((void *)&guehdr[1] + |
| 104 | len - GUE_LEN_PRIV); | ||
| 105 | 105 | ||
| 106 | if (flags & ~GUE_PFLAGS_ALL) | 106 | if (pflags & ~GUE_PFLAGS_ALL) |
| 107 | return 1; | 107 | return 1; |
| 108 | 108 | ||
| 109 | len += guehdr_priv_flags_len(flags); | 109 | len += guehdr_priv_flags_len(pflags); |
| 110 | if (len > optlen) | 110 | if (len > optlen) |
| 111 | return 1; | 111 | return 1; |
| 112 | } | 112 | } |
diff --git a/include/net/ip.h b/include/net/ip.h index 9896f46cbbf1..af8addbaa3c1 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <net/flow_dissector.h> | 34 | #include <net/flow_dissector.h> |
| 35 | 35 | ||
| 36 | #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ | 36 | #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ |
| 37 | #define IPV4_MIN_MTU 68 /* RFC 791 */ | ||
| 37 | 38 | ||
| 38 | struct sock; | 39 | struct sock; |
| 39 | 40 | ||
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0105445cab83..8e08b6da72f3 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
| @@ -694,9 +694,7 @@ struct tc_cls_matchall_offload { | |||
| 694 | }; | 694 | }; |
| 695 | 695 | ||
| 696 | enum tc_clsbpf_command { | 696 | enum tc_clsbpf_command { |
| 697 | TC_CLSBPF_ADD, | 697 | TC_CLSBPF_OFFLOAD, |
| 698 | TC_CLSBPF_REPLACE, | ||
| 699 | TC_CLSBPF_DESTROY, | ||
| 700 | TC_CLSBPF_STATS, | 698 | TC_CLSBPF_STATS, |
| 701 | }; | 699 | }; |
| 702 | 700 | ||
| @@ -705,6 +703,7 @@ struct tc_cls_bpf_offload { | |||
| 705 | enum tc_clsbpf_command command; | 703 | enum tc_clsbpf_command command; |
| 706 | struct tcf_exts *exts; | 704 | struct tcf_exts *exts; |
| 707 | struct bpf_prog *prog; | 705 | struct bpf_prog *prog; |
| 706 | struct bpf_prog *oldprog; | ||
| 708 | const char *name; | 707 | const char *name; |
| 709 | bool exts_integrated; | 708 | bool exts_integrated; |
| 710 | u32 gen_flags; | 709 | u32 gen_flags; |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 65d0d25f2648..83a3e47d5845 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -71,6 +71,7 @@ struct Qdisc { | |||
| 71 | * qdisc_tree_decrease_qlen() should stop. | 71 | * qdisc_tree_decrease_qlen() should stop. |
| 72 | */ | 72 | */ |
| 73 | #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ | 73 | #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ |
| 74 | #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ | ||
| 74 | u32 limit; | 75 | u32 limit; |
| 75 | const struct Qdisc_ops *ops; | 76 | const struct Qdisc_ops *ops; |
| 76 | struct qdisc_size_table __rcu *stab; | 77 | struct qdisc_size_table __rcu *stab; |
diff --git a/include/net/sock.h b/include/net/sock.h index 9155da422692..7a7b14e9628a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -1514,6 +1514,11 @@ static inline bool sock_owned_by_user(const struct sock *sk) | |||
| 1514 | return sk->sk_lock.owned; | 1514 | return sk->sk_lock.owned; |
| 1515 | } | 1515 | } |
| 1516 | 1516 | ||
| 1517 | static inline bool sock_owned_by_user_nocheck(const struct sock *sk) | ||
| 1518 | { | ||
| 1519 | return sk->sk_lock.owned; | ||
| 1520 | } | ||
| 1521 | |||
| 1517 | /* no reclassification while locks are held */ | 1522 | /* no reclassification while locks are held */ |
| 1518 | static inline bool sock_allow_reclassification(const struct sock *csk) | 1523 | static inline bool sock_allow_reclassification(const struct sock *csk) |
| 1519 | { | 1524 | { |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index dc28a98ce97c..ae35991b5877 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
| @@ -1570,6 +1570,9 @@ int xfrm_init_state(struct xfrm_state *x); | |||
| 1570 | int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); | 1570 | int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); |
| 1571 | int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); | 1571 | int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); |
| 1572 | int xfrm_input_resume(struct sk_buff *skb, int nexthdr); | 1572 | int xfrm_input_resume(struct sk_buff *skb, int nexthdr); |
| 1573 | int xfrm_trans_queue(struct sk_buff *skb, | ||
| 1574 | int (*finish)(struct net *, struct sock *, | ||
| 1575 | struct sk_buff *)); | ||
| 1573 | int xfrm_output_resume(struct sk_buff *skb, int err); | 1576 | int xfrm_output_resume(struct sk_buff *skb, int err); |
| 1574 | int xfrm_output(struct sock *sk, struct sk_buff *skb); | 1577 | int xfrm_output(struct sock *sk, struct sk_buff *skb); |
| 1575 | int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1578 | int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h index 758607226bfd..2cd449328aee 100644 --- a/include/trace/events/clk.h +++ b/include/trace/events/clk.h | |||
| @@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent, | |||
| 134 | 134 | ||
| 135 | TP_STRUCT__entry( | 135 | TP_STRUCT__entry( |
| 136 | __string( name, core->name ) | 136 | __string( name, core->name ) |
| 137 | __string( pname, parent->name ) | 137 | __string( pname, parent ? parent->name : "none" ) |
| 138 | ), | 138 | ), |
| 139 | 139 | ||
| 140 | TP_fast_assign( | 140 | TP_fast_assign( |
| 141 | __assign_str(name, core->name); | 141 | __assign_str(name, core->name); |
| 142 | __assign_str(pname, parent->name); | 142 | __assign_str(pname, parent ? parent->name : "none"); |
| 143 | ), | 143 | ), |
| 144 | 144 | ||
| 145 | TP_printk("%s %s", __get_str(name), __get_str(pname)) | 145 | TP_printk("%s %s", __get_str(name), __get_str(pname)) |
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index e4b0b8e09932..2c735a3e6613 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h | |||
| @@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq, | |||
| 211 | { KVM_TRACE_MMIO_WRITE, "write" } | 211 | { KVM_TRACE_MMIO_WRITE, "write" } |
| 212 | 212 | ||
| 213 | TRACE_EVENT(kvm_mmio, | 213 | TRACE_EVENT(kvm_mmio, |
| 214 | TP_PROTO(int type, int len, u64 gpa, u64 val), | 214 | TP_PROTO(int type, int len, u64 gpa, void *val), |
| 215 | TP_ARGS(type, len, gpa, val), | 215 | TP_ARGS(type, len, gpa, val), |
| 216 | 216 | ||
| 217 | TP_STRUCT__entry( | 217 | TP_STRUCT__entry( |
| @@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio, | |||
| 225 | __entry->type = type; | 225 | __entry->type = type; |
| 226 | __entry->len = len; | 226 | __entry->len = len; |
| 227 | __entry->gpa = gpa; | 227 | __entry->gpa = gpa; |
| 228 | __entry->val = val; | 228 | __entry->val = 0; |
| 229 | if (val) | ||
| 230 | memcpy(&__entry->val, val, | ||
| 231 | min_t(u32, sizeof(__entry->val), len)); | ||
| 229 | ), | 232 | ), |
| 230 | 233 | ||
| 231 | TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", | 234 | TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", |
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index f5024c560d8f..9c4eb33c5a1d 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h | |||
| @@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, | |||
| 56 | 56 | ||
| 57 | #include <trace/define_trace.h> | 57 | #include <trace/define_trace.h> |
| 58 | 58 | ||
| 59 | #else /* !CONFIG_PREEMPTIRQ_EVENTS */ | 59 | #endif /* !CONFIG_PREEMPTIRQ_EVENTS */ |
| 60 | 60 | ||
| 61 | #if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING) | ||
| 61 | #define trace_irq_enable(...) | 62 | #define trace_irq_enable(...) |
| 62 | #define trace_irq_disable(...) | 63 | #define trace_irq_disable(...) |
| 63 | #define trace_preempt_enable(...) | ||
| 64 | #define trace_preempt_disable(...) | ||
| 65 | #define trace_irq_enable_rcuidle(...) | 64 | #define trace_irq_enable_rcuidle(...) |
| 66 | #define trace_irq_disable_rcuidle(...) | 65 | #define trace_irq_disable_rcuidle(...) |
| 66 | #endif | ||
| 67 | |||
| 68 | #if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT) | ||
| 69 | #define trace_preempt_enable(...) | ||
| 70 | #define trace_preempt_disable(...) | ||
| 67 | #define trace_preempt_enable_rcuidle(...) | 71 | #define trace_preempt_enable_rcuidle(...) |
| 68 | #define trace_preempt_disable_rcuidle(...) | 72 | #define trace_preempt_disable_rcuidle(...) |
| 69 | |||
| 70 | #endif | 73 | #endif |
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 07cccca6cbf1..ab34c561f26b 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h | |||
| @@ -25,6 +25,35 @@ | |||
| 25 | tcp_state_name(TCP_CLOSING), \ | 25 | tcp_state_name(TCP_CLOSING), \ |
| 26 | tcp_state_name(TCP_NEW_SYN_RECV)) | 26 | tcp_state_name(TCP_NEW_SYN_RECV)) |
| 27 | 27 | ||
| 28 | #define TP_STORE_V4MAPPED(__entry, saddr, daddr) \ | ||
| 29 | do { \ | ||
| 30 | struct in6_addr *pin6; \ | ||
| 31 | \ | ||
| 32 | pin6 = (struct in6_addr *)__entry->saddr_v6; \ | ||
| 33 | ipv6_addr_set_v4mapped(saddr, pin6); \ | ||
| 34 | pin6 = (struct in6_addr *)__entry->daddr_v6; \ | ||
| 35 | ipv6_addr_set_v4mapped(daddr, pin6); \ | ||
| 36 | } while (0) | ||
| 37 | |||
| 38 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 39 | #define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ | ||
| 40 | do { \ | ||
| 41 | if (sk->sk_family == AF_INET6) { \ | ||
| 42 | struct in6_addr *pin6; \ | ||
| 43 | \ | ||
| 44 | pin6 = (struct in6_addr *)__entry->saddr_v6; \ | ||
| 45 | *pin6 = saddr6; \ | ||
| 46 | pin6 = (struct in6_addr *)__entry->daddr_v6; \ | ||
| 47 | *pin6 = daddr6; \ | ||
| 48 | } else { \ | ||
| 49 | TP_STORE_V4MAPPED(__entry, saddr, daddr); \ | ||
| 50 | } \ | ||
| 51 | } while (0) | ||
| 52 | #else | ||
| 53 | #define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ | ||
| 54 | TP_STORE_V4MAPPED(__entry, saddr, daddr) | ||
| 55 | #endif | ||
| 56 | |||
| 28 | /* | 57 | /* |
| 29 | * tcp event with arguments sk and skb | 58 | * tcp event with arguments sk and skb |
| 30 | * | 59 | * |
| @@ -50,7 +79,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, | |||
| 50 | 79 | ||
| 51 | TP_fast_assign( | 80 | TP_fast_assign( |
| 52 | struct inet_sock *inet = inet_sk(sk); | 81 | struct inet_sock *inet = inet_sk(sk); |
| 53 | struct in6_addr *pin6; | ||
| 54 | __be32 *p32; | 82 | __be32 *p32; |
| 55 | 83 | ||
| 56 | __entry->skbaddr = skb; | 84 | __entry->skbaddr = skb; |
| @@ -65,20 +93,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, | |||
| 65 | p32 = (__be32 *) __entry->daddr; | 93 | p32 = (__be32 *) __entry->daddr; |
| 66 | *p32 = inet->inet_daddr; | 94 | *p32 = inet->inet_daddr; |
| 67 | 95 | ||
| 68 | #if IS_ENABLED(CONFIG_IPV6) | 96 | TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, |
| 69 | if (sk->sk_family == AF_INET6) { | 97 | sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); |
| 70 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 71 | *pin6 = sk->sk_v6_rcv_saddr; | ||
| 72 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 73 | *pin6 = sk->sk_v6_daddr; | ||
| 74 | } else | ||
| 75 | #endif | ||
| 76 | { | ||
| 77 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 78 | ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); | ||
| 79 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 80 | ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); | ||
| 81 | } | ||
| 82 | ), | 98 | ), |
| 83 | 99 | ||
| 84 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", | 100 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", |
| @@ -127,7 +143,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk, | |||
| 127 | 143 | ||
| 128 | TP_fast_assign( | 144 | TP_fast_assign( |
| 129 | struct inet_sock *inet = inet_sk(sk); | 145 | struct inet_sock *inet = inet_sk(sk); |
| 130 | struct in6_addr *pin6; | ||
| 131 | __be32 *p32; | 146 | __be32 *p32; |
| 132 | 147 | ||
| 133 | __entry->skaddr = sk; | 148 | __entry->skaddr = sk; |
| @@ -141,20 +156,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk, | |||
| 141 | p32 = (__be32 *) __entry->daddr; | 156 | p32 = (__be32 *) __entry->daddr; |
| 142 | *p32 = inet->inet_daddr; | 157 | *p32 = inet->inet_daddr; |
| 143 | 158 | ||
| 144 | #if IS_ENABLED(CONFIG_IPV6) | 159 | TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, |
| 145 | if (sk->sk_family == AF_INET6) { | 160 | sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); |
| 146 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 147 | *pin6 = sk->sk_v6_rcv_saddr; | ||
| 148 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 149 | *pin6 = sk->sk_v6_daddr; | ||
| 150 | } else | ||
| 151 | #endif | ||
| 152 | { | ||
| 153 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 154 | ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); | ||
| 155 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 156 | ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); | ||
| 157 | } | ||
| 158 | ), | 161 | ), |
| 159 | 162 | ||
| 160 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", | 163 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", |
| @@ -197,7 +200,6 @@ TRACE_EVENT(tcp_set_state, | |||
| 197 | 200 | ||
| 198 | TP_fast_assign( | 201 | TP_fast_assign( |
| 199 | struct inet_sock *inet = inet_sk(sk); | 202 | struct inet_sock *inet = inet_sk(sk); |
| 200 | struct in6_addr *pin6; | ||
| 201 | __be32 *p32; | 203 | __be32 *p32; |
| 202 | 204 | ||
| 203 | __entry->skaddr = sk; | 205 | __entry->skaddr = sk; |
| @@ -213,20 +215,8 @@ TRACE_EVENT(tcp_set_state, | |||
| 213 | p32 = (__be32 *) __entry->daddr; | 215 | p32 = (__be32 *) __entry->daddr; |
| 214 | *p32 = inet->inet_daddr; | 216 | *p32 = inet->inet_daddr; |
| 215 | 217 | ||
| 216 | #if IS_ENABLED(CONFIG_IPV6) | 218 | TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, |
| 217 | if (sk->sk_family == AF_INET6) { | 219 | sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); |
| 218 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 219 | *pin6 = sk->sk_v6_rcv_saddr; | ||
| 220 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 221 | *pin6 = sk->sk_v6_daddr; | ||
| 222 | } else | ||
| 223 | #endif | ||
| 224 | { | ||
| 225 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 226 | ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); | ||
| 227 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 228 | ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); | ||
| 229 | } | ||
| 230 | ), | 220 | ), |
| 231 | 221 | ||
| 232 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", | 222 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", |
| @@ -256,7 +246,6 @@ TRACE_EVENT(tcp_retransmit_synack, | |||
| 256 | 246 | ||
| 257 | TP_fast_assign( | 247 | TP_fast_assign( |
| 258 | struct inet_request_sock *ireq = inet_rsk(req); | 248 | struct inet_request_sock *ireq = inet_rsk(req); |
| 259 | struct in6_addr *pin6; | ||
| 260 | __be32 *p32; | 249 | __be32 *p32; |
| 261 | 250 | ||
| 262 | __entry->skaddr = sk; | 251 | __entry->skaddr = sk; |
| @@ -271,20 +260,8 @@ TRACE_EVENT(tcp_retransmit_synack, | |||
| 271 | p32 = (__be32 *) __entry->daddr; | 260 | p32 = (__be32 *) __entry->daddr; |
| 272 | *p32 = ireq->ir_rmt_addr; | 261 | *p32 = ireq->ir_rmt_addr; |
| 273 | 262 | ||
| 274 | #if IS_ENABLED(CONFIG_IPV6) | 263 | TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr, |
| 275 | if (sk->sk_family == AF_INET6) { | 264 | ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr); |
| 276 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 277 | *pin6 = ireq->ir_v6_loc_addr; | ||
| 278 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 279 | *pin6 = ireq->ir_v6_rmt_addr; | ||
| 280 | } else | ||
| 281 | #endif | ||
| 282 | { | ||
| 283 | pin6 = (struct in6_addr *)__entry->saddr_v6; | ||
| 284 | ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6); | ||
| 285 | pin6 = (struct in6_addr *)__entry->daddr_v6; | ||
| 286 | ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6); | ||
| 287 | } | ||
| 288 | ), | 265 | ), |
| 289 | 266 | ||
| 290 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", | 267 | TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", |
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index af3cc2f4e1ad..37b5096ae97b 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h | |||
| @@ -256,7 +256,6 @@ struct tc_red_qopt { | |||
| 256 | #define TC_RED_ECN 1 | 256 | #define TC_RED_ECN 1 |
| 257 | #define TC_RED_HARDDROP 2 | 257 | #define TC_RED_HARDDROP 2 |
| 258 | #define TC_RED_ADAPTATIVE 4 | 258 | #define TC_RED_ADAPTATIVE 4 |
| 259 | #define TC_RED_OFFLOADED 8 | ||
| 260 | }; | 259 | }; |
| 261 | 260 | ||
| 262 | struct tc_red_xstats { | 261 | struct tc_red_xstats { |
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index d8b5f80c2ea6..843e29aa3cac 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h | |||
| @@ -557,6 +557,7 @@ enum { | |||
| 557 | TCA_PAD, | 557 | TCA_PAD, |
| 558 | TCA_DUMP_INVISIBLE, | 558 | TCA_DUMP_INVISIBLE, |
| 559 | TCA_CHAIN, | 559 | TCA_CHAIN, |
| 560 | TCA_HW_OFFLOAD, | ||
| 560 | __TCA_MAX | 561 | __TCA_MAX |
| 561 | }; | 562 | }; |
| 562 | 563 | ||
diff --git a/include/xen/balloon.h b/include/xen/balloon.h index 4914b93a23f2..61f410fd74e4 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h | |||
| @@ -44,3 +44,8 @@ static inline void xen_balloon_init(void) | |||
| 44 | { | 44 | { |
| 45 | } | 45 | } |
| 46 | #endif | 46 | #endif |
| 47 | |||
| 48 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | ||
| 49 | struct resource; | ||
| 50 | void arch_xen_balloon_init(struct resource *hostmem_resource); | ||
| 51 | #endif | ||
