diff options
Diffstat (limited to 'include')
89 files changed, 550 insertions, 392 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index db284bff29dc..9dbb739cafa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * Copyright 2001 Red Hat, Inc. | 5 | * Copyright 2001 Red Hat, Inc. |
| 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. | 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
| 7 | * | 7 | * |
| 8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra |
| 9 | * | 9 | * |
| 10 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0b921ae06cd8..0a271ca1f7c7 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -309,6 +309,11 @@ struct drm_file { | |||
| 309 | unsigned universal_planes:1; | 309 | unsigned universal_planes:1; |
| 310 | /* true if client understands atomic properties */ | 310 | /* true if client understands atomic properties */ |
| 311 | unsigned atomic:1; | 311 | unsigned atomic:1; |
| 312 | /* | ||
| 313 | * This client is allowed to gain master privileges for @master. | ||
| 314 | * Protected by struct drm_device::master_mutex. | ||
| 315 | */ | ||
| 316 | unsigned allowed_master:1; | ||
| 312 | 317 | ||
| 313 | struct pid *pid; | 318 | struct pid *pid; |
| 314 | kuid_t uid; | 319 | kuid_t uid; |
| @@ -910,6 +915,7 @@ extern int drm_open(struct inode *inode, struct file *filp); | |||
| 910 | extern ssize_t drm_read(struct file *filp, char __user *buffer, | 915 | extern ssize_t drm_read(struct file *filp, char __user *buffer, |
| 911 | size_t count, loff_t *offset); | 916 | size_t count, loff_t *offset); |
| 912 | extern int drm_release(struct inode *inode, struct file *filp); | 917 | extern int drm_release(struct inode *inode, struct file *filp); |
| 918 | extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv); | ||
| 913 | 919 | ||
| 914 | /* Mapping support (drm_vm.h) */ | 920 | /* Mapping support (drm_vm.h) */ |
| 915 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | 921 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
| @@ -947,6 +953,10 @@ extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, | |||
| 947 | struct drm_pending_vblank_event *e); | 953 | struct drm_pending_vblank_event *e); |
| 948 | extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, | 954 | extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, |
| 949 | struct drm_pending_vblank_event *e); | 955 | struct drm_pending_vblank_event *e); |
| 956 | extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, | ||
| 957 | struct drm_pending_vblank_event *e); | ||
| 958 | extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, | ||
| 959 | struct drm_pending_vblank_event *e); | ||
| 950 | extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); | 960 | extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); |
| 951 | extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); | 961 | extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); |
| 952 | extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); | 962 | extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); |
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index e67aeac2aee0..4b74c97d297a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h | |||
| @@ -136,6 +136,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, | |||
| 136 | 136 | ||
| 137 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state); | 137 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state); |
| 138 | 138 | ||
| 139 | void | ||
| 140 | drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); | ||
| 141 | |||
| 139 | int __must_check drm_atomic_check_only(struct drm_atomic_state *state); | 142 | int __must_check drm_atomic_check_only(struct drm_atomic_state *state); |
| 140 | int __must_check drm_atomic_commit(struct drm_atomic_state *state); | 143 | int __must_check drm_atomic_commit(struct drm_atomic_state *state); |
| 141 | int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); | 144 | int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 9c747cb14ad8..d2f41477f8ae 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
| @@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, | |||
| 342 | struct irq_phys_map *map, bool level); | 342 | struct irq_phys_map *map, bool level); |
| 343 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | 343 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); |
| 344 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); | 344 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); |
| 345 | int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); | ||
| 346 | struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, | 345 | struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, |
| 347 | int virt_irq, int irq); | 346 | int virt_irq, int irq); |
| 348 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); | 347 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); |
| 348 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map); | ||
| 349 | 349 | ||
| 350 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) | 350 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) |
| 351 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) | 351 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 054833939995..1991aea2ec4c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, | |||
| 870 | } | 870 | } |
| 871 | 871 | ||
| 872 | static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, | 872 | static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, |
| 873 | const char *name, const char *cells_name, | 873 | const char *name, size_t index, |
| 874 | size_t index, struct acpi_reference_args *args) | 874 | struct acpi_reference_args *args) |
| 875 | { | 875 | { |
| 876 | return -ENXIO; | 876 | return -ENXIO; |
| 877 | } | 877 | } |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 2b8ed123ad36..defeaac0745f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift) | |||
| 107 | */ | 107 | */ |
| 108 | static inline __u32 rol32(__u32 word, unsigned int shift) | 108 | static inline __u32 rol32(__u32 word, unsigned int shift) |
| 109 | { | 109 | { |
| 110 | return (word << shift) | (word >> (32 - shift)); | 110 | return (word << shift) | (word >> ((-shift) & 31)); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | /** | 113 | /** |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3fe27f8d91f0..c70e3588a48c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -254,6 +254,7 @@ struct queue_limits { | |||
| 254 | unsigned long virt_boundary_mask; | 254 | unsigned long virt_boundary_mask; |
| 255 | 255 | ||
| 256 | unsigned int max_hw_sectors; | 256 | unsigned int max_hw_sectors; |
| 257 | unsigned int max_dev_sectors; | ||
| 257 | unsigned int chunk_sectors; | 258 | unsigned int chunk_sectors; |
| 258 | unsigned int max_sectors; | 259 | unsigned int max_sectors; |
| 259 | unsigned int max_segment_size; | 260 | unsigned int max_segment_size; |
| @@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *); | |||
| 773 | extern void blk_requeue_request(struct request_queue *, struct request *); | 774 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| 774 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 775 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
| 775 | unsigned int len); | 776 | unsigned int len); |
| 776 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | ||
| 777 | extern int blk_lld_busy(struct request_queue *q); | 777 | extern int blk_lld_busy(struct request_queue *q); |
| 778 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 778 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
| 779 | struct bio_set *bs, gfp_t gfp_mask, | 779 | struct bio_set *bs, gfp_t gfp_mask, |
| @@ -794,7 +794,10 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 795 | struct scsi_ioctl_command __user *); | 795 | struct scsi_ioctl_command __user *); |
| 796 | 796 | ||
| 797 | extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
| 798 | extern void blk_queue_exit(struct request_queue *q); | ||
| 797 | extern void blk_start_queue(struct request_queue *q); | 799 | extern void blk_start_queue(struct request_queue *q); |
| 800 | extern void blk_start_queue_async(struct request_queue *q); | ||
| 798 | extern void blk_stop_queue(struct request_queue *q); | 801 | extern void blk_stop_queue(struct request_queue *q); |
| 799 | extern void blk_sync_queue(struct request_queue *q); | 802 | extern void blk_sync_queue(struct request_queue *q); |
| 800 | extern void __blk_stop_queue(struct request_queue *q); | 803 | extern void __blk_stop_queue(struct request_queue *q); |
| @@ -958,7 +961,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | |||
| 958 | extern void blk_cleanup_queue(struct request_queue *); | 961 | extern void blk_cleanup_queue(struct request_queue *); |
| 959 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 962 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 960 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 963 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 961 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | ||
| 962 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 964 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
| 963 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); | 965 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); |
| 964 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 966 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index de464e6683b6..83d1926c61e4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -40,6 +40,7 @@ struct bpf_map { | |||
| 40 | struct user_struct *user; | 40 | struct user_struct *user; |
| 41 | const struct bpf_map_ops *ops; | 41 | const struct bpf_map_ops *ops; |
| 42 | struct work_struct work; | 42 | struct work_struct work; |
| 43 | atomic_t usercnt; | ||
| 43 | }; | 44 | }; |
| 44 | 45 | ||
| 45 | struct bpf_map_type_list { | 46 | struct bpf_map_type_list { |
| @@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd); | |||
| 167 | void bpf_prog_put(struct bpf_prog *prog); | 168 | void bpf_prog_put(struct bpf_prog *prog); |
| 168 | void bpf_prog_put_rcu(struct bpf_prog *prog); | 169 | void bpf_prog_put_rcu(struct bpf_prog *prog); |
| 169 | 170 | ||
| 170 | struct bpf_map *bpf_map_get(u32 ufd); | 171 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
| 171 | struct bpf_map *__bpf_map_get(struct fd f); | 172 | struct bpf_map *__bpf_map_get(struct fd f); |
| 173 | void bpf_map_inc(struct bpf_map *map, bool uref); | ||
| 174 | void bpf_map_put_with_uref(struct bpf_map *map); | ||
| 172 | void bpf_map_put(struct bpf_map *map); | 175 | void bpf_map_put(struct bpf_map *map); |
| 173 | 176 | ||
| 174 | extern int sysctl_unprivileged_bpf_disabled; | 177 | extern int sysctl_unprivileged_bpf_disabled; |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 60d44b26276d..06b77f9dd3f2 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
| @@ -90,7 +90,6 @@ enum { | |||
| 90 | */ | 90 | */ |
| 91 | struct cgroup_file { | 91 | struct cgroup_file { |
| 92 | /* do not access any fields from outside cgroup core */ | 92 | /* do not access any fields from outside cgroup core */ |
| 93 | struct list_head node; /* anchored at css->files */ | ||
| 94 | struct kernfs_node *kn; | 93 | struct kernfs_node *kn; |
| 95 | }; | 94 | }; |
| 96 | 95 | ||
| @@ -134,9 +133,6 @@ struct cgroup_subsys_state { | |||
| 134 | */ | 133 | */ |
| 135 | u64 serial_nr; | 134 | u64 serial_nr; |
| 136 | 135 | ||
| 137 | /* all cgroup_files associated with this css */ | ||
| 138 | struct list_head files; | ||
| 139 | |||
| 140 | /* percpu_ref killing and RCU release */ | 136 | /* percpu_ref killing and RCU release */ |
| 141 | struct rcu_head rcu_head; | 137 | struct rcu_head rcu_head; |
| 142 | struct work_struct destroy_work; | 138 | struct work_struct destroy_work; |
| @@ -426,12 +422,9 @@ struct cgroup_subsys { | |||
| 426 | void (*css_reset)(struct cgroup_subsys_state *css); | 422 | void (*css_reset)(struct cgroup_subsys_state *css); |
| 427 | void (*css_e_css_changed)(struct cgroup_subsys_state *css); | 423 | void (*css_e_css_changed)(struct cgroup_subsys_state *css); |
| 428 | 424 | ||
| 429 | int (*can_attach)(struct cgroup_subsys_state *css, | 425 | int (*can_attach)(struct cgroup_taskset *tset); |
| 430 | struct cgroup_taskset *tset); | 426 | void (*cancel_attach)(struct cgroup_taskset *tset); |
| 431 | void (*cancel_attach)(struct cgroup_subsys_state *css, | 427 | void (*attach)(struct cgroup_taskset *tset); |
| 432 | struct cgroup_taskset *tset); | ||
| 433 | void (*attach)(struct cgroup_subsys_state *css, | ||
| 434 | struct cgroup_taskset *tset); | ||
| 435 | int (*can_fork)(struct task_struct *task, void **priv_p); | 428 | int (*can_fork)(struct task_struct *task, void **priv_p); |
| 436 | void (*cancel_fork)(struct task_struct *task, void *priv); | 429 | void (*cancel_fork)(struct task_struct *task, void *priv); |
| 437 | void (*fork)(struct task_struct *task, void *priv); | 430 | void (*fork)(struct task_struct *task, void *priv); |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 22e3754f89c5..cb91b44f5f78 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -88,6 +88,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); | |||
| 88 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | 88 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
| 89 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | 89 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
| 90 | int cgroup_rm_cftypes(struct cftype *cfts); | 90 | int cgroup_rm_cftypes(struct cftype *cfts); |
| 91 | void cgroup_file_notify(struct cgroup_file *cfile); | ||
| 91 | 92 | ||
| 92 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); | 93 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); |
| 93 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); | 94 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); |
| @@ -119,8 +120,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state | |||
| 119 | struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, | 120 | struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, |
| 120 | struct cgroup_subsys_state *css); | 121 | struct cgroup_subsys_state *css); |
| 121 | 122 | ||
| 122 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | 123 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, |
| 123 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | 124 | struct cgroup_subsys_state **dst_cssp); |
| 125 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, | ||
| 126 | struct cgroup_subsys_state **dst_cssp); | ||
| 124 | 127 | ||
| 125 | void css_task_iter_start(struct cgroup_subsys_state *css, | 128 | void css_task_iter_start(struct cgroup_subsys_state *css, |
| 126 | struct css_task_iter *it); | 129 | struct css_task_iter *it); |
| @@ -235,30 +238,39 @@ void css_task_iter_end(struct css_task_iter *it); | |||
| 235 | /** | 238 | /** |
| 236 | * cgroup_taskset_for_each - iterate cgroup_taskset | 239 | * cgroup_taskset_for_each - iterate cgroup_taskset |
| 237 | * @task: the loop cursor | 240 | * @task: the loop cursor |
| 241 | * @dst_css: the destination css | ||
| 238 | * @tset: taskset to iterate | 242 | * @tset: taskset to iterate |
| 239 | * | 243 | * |
| 240 | * @tset may contain multiple tasks and they may belong to multiple | 244 | * @tset may contain multiple tasks and they may belong to multiple |
| 241 | * processes. When there are multiple tasks in @tset, if a task of a | 245 | * processes. |
| 242 | * process is in @tset, all tasks of the process are in @tset. Also, all | 246 | * |
| 243 | * are guaranteed to share the same source and destination csses. | 247 | * On the v2 hierarchy, there may be tasks from multiple processes and they |
| 248 | * may not share the source or destination csses. | ||
| 249 | * | ||
| 250 | * On traditional hierarchies, when there are multiple tasks in @tset, if a | ||
| 251 | * task of a process is in @tset, all tasks of the process are in @tset. | ||
| 252 | * Also, all are guaranteed to share the same source and destination csses. | ||
| 244 | * | 253 | * |
| 245 | * Iteration is not in any specific order. | 254 | * Iteration is not in any specific order. |
| 246 | */ | 255 | */ |
| 247 | #define cgroup_taskset_for_each(task, tset) \ | 256 | #define cgroup_taskset_for_each(task, dst_css, tset) \ |
| 248 | for ((task) = cgroup_taskset_first((tset)); (task); \ | 257 | for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ |
| 249 | (task) = cgroup_taskset_next((tset))) | 258 | (task); \ |
| 259 | (task) = cgroup_taskset_next((tset), &(dst_css))) | ||
| 250 | 260 | ||
| 251 | /** | 261 | /** |
| 252 | * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset | 262 | * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset |
| 253 | * @leader: the loop cursor | 263 | * @leader: the loop cursor |
| 264 | * @dst_css: the destination css | ||
| 254 | * @tset: takset to iterate | 265 | * @tset: takset to iterate |
| 255 | * | 266 | * |
| 256 | * Iterate threadgroup leaders of @tset. For single-task migrations, @tset | 267 | * Iterate threadgroup leaders of @tset. For single-task migrations, @tset |
| 257 | * may not contain any. | 268 | * may not contain any. |
| 258 | */ | 269 | */ |
| 259 | #define cgroup_taskset_for_each_leader(leader, tset) \ | 270 | #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ |
| 260 | for ((leader) = cgroup_taskset_first((tset)); (leader); \ | 271 | for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ |
| 261 | (leader) = cgroup_taskset_next((tset))) \ | 272 | (leader); \ |
| 273 | (leader) = cgroup_taskset_next((tset), &(dst_css))) \ | ||
| 262 | if ((leader) != (leader)->group_leader) \ | 274 | if ((leader) != (leader)->group_leader) \ |
| 263 | ; \ | 275 | ; \ |
| 264 | else | 276 | else |
| @@ -516,19 +528,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) | |||
| 516 | pr_cont_kernfs_path(cgrp->kn); | 528 | pr_cont_kernfs_path(cgrp->kn); |
| 517 | } | 529 | } |
| 518 | 530 | ||
| 519 | /** | ||
| 520 | * cgroup_file_notify - generate a file modified event for a cgroup_file | ||
| 521 | * @cfile: target cgroup_file | ||
| 522 | * | ||
| 523 | * @cfile must have been obtained by setting cftype->file_offset. | ||
| 524 | */ | ||
| 525 | static inline void cgroup_file_notify(struct cgroup_file *cfile) | ||
| 526 | { | ||
| 527 | /* might not have been created due to one of the CFTYPE selector flags */ | ||
| 528 | if (cfile->kn) | ||
| 529 | kernfs_notify(cfile->kn); | ||
| 530 | } | ||
| 531 | |||
| 532 | #else /* !CONFIG_CGROUPS */ | 531 | #else /* !CONFIG_CGROUPS */ |
| 533 | 532 | ||
| 534 | struct cgroup_subsys_state; | 533 | struct cgroup_subsys_state; |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index a8a335b7fce0..758a029011b1 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
| @@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro | |||
| 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); | 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); |
| 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); | 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); |
| 199 | 199 | ||
| 200 | int configfs_register_group(struct config_group *parent_group, | ||
| 201 | struct config_group *group); | ||
| 202 | void configfs_unregister_group(struct config_group *group); | ||
| 203 | |||
| 204 | struct config_group * | ||
| 205 | configfs_register_default_group(struct config_group *parent_group, | ||
| 206 | const char *name, | ||
| 207 | struct config_item_type *item_type); | ||
| 208 | void configfs_unregister_default_group(struct config_group *group); | ||
| 209 | |||
| 200 | /* These functions can sleep and can alloc with GFP_KERNEL */ | 210 | /* These functions can sleep and can alloc with GFP_KERNEL */ |
| 201 | /* WARNING: These cannot be called underneath configfs callbacks!! */ | 211 | /* WARNING: These cannot be called underneath configfs callbacks!! */ |
| 202 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); | 212 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ef4c5b1a860f..177c7680c1a8 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -77,6 +77,7 @@ struct cpufreq_policy { | |||
| 77 | unsigned int suspend_freq; /* freq to set during suspend */ | 77 | unsigned int suspend_freq; /* freq to set during suspend */ |
| 78 | 78 | ||
| 79 | unsigned int policy; /* see above */ | 79 | unsigned int policy; /* see above */ |
| 80 | unsigned int last_policy; /* policy before unplug */ | ||
| 80 | struct cpufreq_governor *governor; /* see below */ | 81 | struct cpufreq_governor *governor; /* see below */ |
| 81 | void *governor_data; | 82 | void *governor_data; |
| 82 | bool governor_enabled; /* governor start/stop flag */ | 83 | bool governor_enabled; /* governor start/stop flag */ |
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index cc92268af89a..6ac3cad9aef1 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #ifdef __KERNEL__ | 27 | #ifdef __KERNEL__ |
| 28 | 28 | ||
| 29 | extern int dns_query(const char *type, const char *name, size_t namelen, | 29 | extern int dns_query(const char *type, const char *name, size_t namelen, |
| 30 | const char *options, char **_result, time_t *_expiry); | 30 | const char *options, char **_result, time64_t *_expiry); |
| 31 | 31 | ||
| 32 | #endif /* KERNEL */ | 32 | #endif /* KERNEL */ |
| 33 | 33 | ||
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 7be22da321f3..a4cf57cd0f75 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h | |||
| @@ -29,7 +29,11 @@ | |||
| 29 | /* A few generic types ... taken from ses-2 */ | 29 | /* A few generic types ... taken from ses-2 */ |
| 30 | enum enclosure_component_type { | 30 | enum enclosure_component_type { |
| 31 | ENCLOSURE_COMPONENT_DEVICE = 0x01, | 31 | ENCLOSURE_COMPONENT_DEVICE = 0x01, |
| 32 | ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, | ||
| 33 | ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, | ||
| 34 | ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, | ||
| 32 | ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, | 35 | ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, |
| 36 | ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, | ||
| 33 | }; | 37 | }; |
| 34 | 38 | ||
| 35 | /* ses-2 common element status */ | 39 | /* ses-2 common element status */ |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 6523109e136d..8942af0813e3 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) | |||
| 271 | 271 | ||
| 272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) | 272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
| 273 | { | 273 | { |
| 274 | return gfp_flags & __GFP_DIRECT_RECLAIM; | 274 | return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | #ifdef CONFIG_HIGHMEM | 277 | #ifdef CONFIG_HIGHMEM |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0ef2a97ccdb5..402753bccafa 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -227,7 +227,7 @@ struct ipv6_pinfo { | |||
| 227 | struct ipv6_ac_socklist *ipv6_ac_list; | 227 | struct ipv6_ac_socklist *ipv6_ac_list; |
| 228 | struct ipv6_fl_socklist __rcu *ipv6_fl_list; | 228 | struct ipv6_fl_socklist __rcu *ipv6_fl_list; |
| 229 | 229 | ||
| 230 | struct ipv6_txoptions *opt; | 230 | struct ipv6_txoptions __rcu *opt; |
| 231 | struct sk_buff *pktoptions; | 231 | struct sk_buff *pktoptions; |
| 232 | struct sk_buff *rxpmtu; | 232 | struct sk_buff *rxpmtu; |
| 233 | struct inet6_cork cork; | 233 | struct inet6_cork cork; |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index c9ae0c6ec050..d5d798b35c1f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -330,6 +330,7 @@ struct rdists { | |||
| 330 | }; | 330 | }; |
| 331 | 331 | ||
| 332 | struct irq_domain; | 332 | struct irq_domain; |
| 333 | struct device_node; | ||
| 333 | int its_cpu_init(void); | 334 | int its_cpu_init(void); |
| 334 | int its_init(struct device_node *node, struct rdists *rdists, | 335 | int its_init(struct device_node *node, struct rdists *rdists, |
| 335 | struct irq_domain *domain); | 336 | struct irq_domain *domain); |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 8dde55974f18..0536524bb9eb 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * Jump label support | 5 | * Jump label support |
| 6 | * | 6 | * |
| 7 | * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> | 7 | * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> |
| 8 | * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra |
| 9 | * | 9 | * |
| 10 | * DEPRECATED API: | 10 | * DEPRECATED API: |
| 11 | * | 11 | * |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index d0a1f99e24e3..4894c6888bc6 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | #ifdef CONFIG_DEBUG_KMEMLEAK | 26 | #ifdef CONFIG_DEBUG_KMEMLEAK |
| 27 | 27 | ||
| 28 | extern void kmemleak_init(void) __ref; | 28 | extern void kmemleak_init(void) __init; |
| 29 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | 29 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
| 30 | gfp_t gfp) __ref; | 30 | gfp_t gfp) __ref; |
| 31 | extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, | 31 | extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, |
diff --git a/include/linux/kref.h b/include/linux/kref.h index 484604d184be..e15828fd71f1 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
| 20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
| 22 | #include <linux/spinlock.h> | ||
| 23 | 22 | ||
| 24 | struct kref { | 23 | struct kref { |
| 25 | atomic_t refcount; | 24 | atomic_t refcount; |
| @@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) | |||
| 99 | return kref_sub(kref, 1, release); | 98 | return kref_sub(kref, 1, release); |
| 100 | } | 99 | } |
| 101 | 100 | ||
| 102 | /** | ||
| 103 | * kref_put_spinlock_irqsave - decrement refcount for object. | ||
| 104 | * @kref: object. | ||
| 105 | * @release: pointer to the function that will clean up the object when the | ||
| 106 | * last reference to the object is released. | ||
| 107 | * This pointer is required, and it is not acceptable to pass kfree | ||
| 108 | * in as this function. | ||
| 109 | * @lock: lock to take in release case | ||
| 110 | * | ||
| 111 | * Behaves identical to kref_put with one exception. If the reference count | ||
| 112 | * drops to zero, the lock will be taken atomically wrt dropping the reference | ||
| 113 | * count. The release function has to call spin_unlock() without _irqrestore. | ||
| 114 | */ | ||
| 115 | static inline int kref_put_spinlock_irqsave(struct kref *kref, | ||
| 116 | void (*release)(struct kref *kref), | ||
| 117 | spinlock_t *lock) | ||
| 118 | { | ||
| 119 | unsigned long flags; | ||
| 120 | |||
| 121 | WARN_ON(release == NULL); | ||
| 122 | if (atomic_add_unless(&kref->refcount, -1, 1)) | ||
| 123 | return 0; | ||
| 124 | spin_lock_irqsave(lock, flags); | ||
| 125 | if (atomic_dec_and_test(&kref->refcount)) { | ||
| 126 | release(kref); | ||
| 127 | local_irq_restore(flags); | ||
| 128 | return 1; | ||
| 129 | } | ||
| 130 | spin_unlock_irqrestore(lock, flags); | ||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline int kref_put_mutex(struct kref *kref, | 101 | static inline int kref_put_mutex(struct kref *kref, |
| 135 | void (*release)(struct kref *kref), | 102 | void (*release)(struct kref *kref), |
| 136 | struct mutex *lock) | 103 | struct mutex *lock) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5706a2108f0a..c923350ca20a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | |||
| 460 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ | 460 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
| 461 | idx++) | 461 | idx++) |
| 462 | 462 | ||
| 463 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) | ||
| 464 | { | ||
| 465 | struct kvm_vcpu *vcpu; | ||
| 466 | int i; | ||
| 467 | |||
| 468 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
| 469 | if (vcpu->vcpu_id == id) | ||
| 470 | return vcpu; | ||
| 471 | return NULL; | ||
| 472 | } | ||
| 473 | |||
| 463 | #define kvm_for_each_memslot(memslot, slots) \ | 474 | #define kvm_for_each_memslot(memslot, slots) \ |
| 464 | for (memslot = &slots->memslots[0]; \ | 475 | for (memslot = &slots->memslots[0]; \ |
| 465 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ | 476 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 83577f8fd15b..600c1e0626a5 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -210,6 +210,7 @@ enum { | |||
| 210 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ | 210 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ |
| 211 | /* (doesn't imply presence) */ | 211 | /* (doesn't imply presence) */ |
| 212 | ATA_FLAG_SATA = (1 << 1), | 212 | ATA_FLAG_SATA = (1 << 1), |
| 213 | ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ | ||
| 213 | ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ | 214 | ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ |
| 214 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ | 215 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ |
| 215 | ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ | 216 | ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 69c9057e1ab8..034117b3be5f 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
| @@ -50,15 +50,21 @@ enum { | |||
| 50 | NVM_IO_DUAL_ACCESS = 0x1, | 50 | NVM_IO_DUAL_ACCESS = 0x1, |
| 51 | NVM_IO_QUAD_ACCESS = 0x2, | 51 | NVM_IO_QUAD_ACCESS = 0x2, |
| 52 | 52 | ||
| 53 | /* NAND Access Modes */ | ||
| 53 | NVM_IO_SUSPEND = 0x80, | 54 | NVM_IO_SUSPEND = 0x80, |
| 54 | NVM_IO_SLC_MODE = 0x100, | 55 | NVM_IO_SLC_MODE = 0x100, |
| 55 | NVM_IO_SCRAMBLE_DISABLE = 0x200, | 56 | NVM_IO_SCRAMBLE_DISABLE = 0x200, |
| 57 | |||
| 58 | /* Block Types */ | ||
| 59 | NVM_BLK_T_FREE = 0x0, | ||
| 60 | NVM_BLK_T_BAD = 0x1, | ||
| 61 | NVM_BLK_T_DEV = 0x2, | ||
| 62 | NVM_BLK_T_HOST = 0x4, | ||
| 56 | }; | 63 | }; |
| 57 | 64 | ||
| 58 | struct nvm_id_group { | 65 | struct nvm_id_group { |
| 59 | u8 mtype; | 66 | u8 mtype; |
| 60 | u8 fmtype; | 67 | u8 fmtype; |
| 61 | u16 res16; | ||
| 62 | u8 num_ch; | 68 | u8 num_ch; |
| 63 | u8 num_lun; | 69 | u8 num_lun; |
| 64 | u8 num_pln; | 70 | u8 num_pln; |
| @@ -74,9 +80,9 @@ struct nvm_id_group { | |||
| 74 | u32 tbet; | 80 | u32 tbet; |
| 75 | u32 tbem; | 81 | u32 tbem; |
| 76 | u32 mpos; | 82 | u32 mpos; |
| 83 | u32 mccap; | ||
| 77 | u16 cpar; | 84 | u16 cpar; |
| 78 | u8 res[913]; | 85 | }; |
| 79 | } __packed; | ||
| 80 | 86 | ||
| 81 | struct nvm_addr_format { | 87 | struct nvm_addr_format { |
| 82 | u8 ch_offset; | 88 | u8 ch_offset; |
| @@ -91,19 +97,15 @@ struct nvm_addr_format { | |||
| 91 | u8 pg_len; | 97 | u8 pg_len; |
| 92 | u8 sect_offset; | 98 | u8 sect_offset; |
| 93 | u8 sect_len; | 99 | u8 sect_len; |
| 94 | u8 res[4]; | ||
| 95 | }; | 100 | }; |
| 96 | 101 | ||
| 97 | struct nvm_id { | 102 | struct nvm_id { |
| 98 | u8 ver_id; | 103 | u8 ver_id; |
| 99 | u8 vmnt; | 104 | u8 vmnt; |
| 100 | u8 cgrps; | 105 | u8 cgrps; |
| 101 | u8 res[5]; | ||
| 102 | u32 cap; | 106 | u32 cap; |
| 103 | u32 dom; | 107 | u32 dom; |
| 104 | struct nvm_addr_format ppaf; | 108 | struct nvm_addr_format ppaf; |
| 105 | u8 ppat; | ||
| 106 | u8 resv[224]; | ||
| 107 | struct nvm_id_group groups[4]; | 109 | struct nvm_id_group groups[4]; |
| 108 | } __packed; | 110 | } __packed; |
| 109 | 111 | ||
| @@ -123,39 +125,28 @@ struct nvm_tgt_instance { | |||
| 123 | #define NVM_VERSION_MINOR 0 | 125 | #define NVM_VERSION_MINOR 0 |
| 124 | #define NVM_VERSION_PATCH 0 | 126 | #define NVM_VERSION_PATCH 0 |
| 125 | 127 | ||
| 126 | #define NVM_SEC_BITS (8) | ||
| 127 | #define NVM_PL_BITS (6) | ||
| 128 | #define NVM_PG_BITS (16) | ||
| 129 | #define NVM_BLK_BITS (16) | 128 | #define NVM_BLK_BITS (16) |
| 130 | #define NVM_LUN_BITS (10) | 129 | #define NVM_PG_BITS (16) |
| 130 | #define NVM_SEC_BITS (8) | ||
| 131 | #define NVM_PL_BITS (8) | ||
| 132 | #define NVM_LUN_BITS (8) | ||
| 131 | #define NVM_CH_BITS (8) | 133 | #define NVM_CH_BITS (8) |
| 132 | 134 | ||
| 133 | struct ppa_addr { | 135 | struct ppa_addr { |
| 136 | /* Generic structure for all addresses */ | ||
| 134 | union { | 137 | union { |
| 135 | /* Channel-based PPA format in nand 4x2x2x2x8x10 */ | ||
| 136 | struct { | ||
| 137 | u64 ch : 4; | ||
| 138 | u64 sec : 2; /* 4 sectors per page */ | ||
| 139 | u64 pl : 2; /* 4 planes per LUN */ | ||
| 140 | u64 lun : 2; /* 4 LUNs per channel */ | ||
| 141 | u64 pg : 8; /* 256 pages per block */ | ||
| 142 | u64 blk : 10;/* 1024 blocks per plane */ | ||
| 143 | u64 resved : 36; | ||
| 144 | } chnl; | ||
| 145 | |||
| 146 | /* Generic structure for all addresses */ | ||
| 147 | struct { | 138 | struct { |
| 139 | u64 blk : NVM_BLK_BITS; | ||
| 140 | u64 pg : NVM_PG_BITS; | ||
| 148 | u64 sec : NVM_SEC_BITS; | 141 | u64 sec : NVM_SEC_BITS; |
| 149 | u64 pl : NVM_PL_BITS; | 142 | u64 pl : NVM_PL_BITS; |
| 150 | u64 pg : NVM_PG_BITS; | ||
| 151 | u64 blk : NVM_BLK_BITS; | ||
| 152 | u64 lun : NVM_LUN_BITS; | 143 | u64 lun : NVM_LUN_BITS; |
| 153 | u64 ch : NVM_CH_BITS; | 144 | u64 ch : NVM_CH_BITS; |
| 154 | } g; | 145 | } g; |
| 155 | 146 | ||
| 156 | u64 ppa; | 147 | u64 ppa; |
| 157 | }; | 148 | }; |
| 158 | } __packed; | 149 | }; |
| 159 | 150 | ||
| 160 | struct nvm_rq { | 151 | struct nvm_rq { |
| 161 | struct nvm_tgt_instance *ins; | 152 | struct nvm_tgt_instance *ins; |
| @@ -191,18 +182,18 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | |||
| 191 | struct nvm_block; | 182 | struct nvm_block; |
| 192 | 183 | ||
| 193 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | 184 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); |
| 194 | typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); | 185 | typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); |
| 195 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); | 186 | typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); |
| 196 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, | 187 | typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, |
| 197 | nvm_l2p_update_fn *, void *); | 188 | nvm_l2p_update_fn *, void *); |
| 198 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, | 189 | typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, |
| 199 | nvm_bb_update_fn *, void *); | 190 | nvm_bb_update_fn *, void *); |
| 200 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); | 191 | typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); |
| 201 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); | 192 | typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); |
| 202 | typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *); | 193 | typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); |
| 203 | typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *); | 194 | typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); |
| 204 | typedef void (nvm_destroy_dma_pool_fn)(void *); | 195 | typedef void (nvm_destroy_dma_pool_fn)(void *); |
| 205 | typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t, | 196 | typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, |
| 206 | dma_addr_t *); | 197 | dma_addr_t *); |
| 207 | typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); | 198 | typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); |
| 208 | 199 | ||
| @@ -210,7 +201,7 @@ struct nvm_dev_ops { | |||
| 210 | nvm_id_fn *identity; | 201 | nvm_id_fn *identity; |
| 211 | nvm_get_l2p_tbl_fn *get_l2p_tbl; | 202 | nvm_get_l2p_tbl_fn *get_l2p_tbl; |
| 212 | nvm_op_bb_tbl_fn *get_bb_tbl; | 203 | nvm_op_bb_tbl_fn *get_bb_tbl; |
| 213 | nvm_op_set_bb_fn *set_bb; | 204 | nvm_op_set_bb_fn *set_bb_tbl; |
| 214 | 205 | ||
| 215 | nvm_submit_io_fn *submit_io; | 206 | nvm_submit_io_fn *submit_io; |
| 216 | nvm_erase_blk_fn *erase_block; | 207 | nvm_erase_blk_fn *erase_block; |
| @@ -220,7 +211,7 @@ struct nvm_dev_ops { | |||
| 220 | nvm_dev_dma_alloc_fn *dev_dma_alloc; | 211 | nvm_dev_dma_alloc_fn *dev_dma_alloc; |
| 221 | nvm_dev_dma_free_fn *dev_dma_free; | 212 | nvm_dev_dma_free_fn *dev_dma_free; |
| 222 | 213 | ||
| 223 | uint8_t max_phys_sect; | 214 | unsigned int max_phys_sect; |
| 224 | }; | 215 | }; |
| 225 | 216 | ||
| 226 | struct nvm_lun { | 217 | struct nvm_lun { |
| @@ -229,7 +220,9 @@ struct nvm_lun { | |||
| 229 | int lun_id; | 220 | int lun_id; |
| 230 | int chnl_id; | 221 | int chnl_id; |
| 231 | 222 | ||
| 223 | unsigned int nr_inuse_blocks; /* Number of used blocks */ | ||
| 232 | unsigned int nr_free_blocks; /* Number of unused blocks */ | 224 | unsigned int nr_free_blocks; /* Number of unused blocks */ |
| 225 | unsigned int nr_bad_blocks; /* Number of bad blocks */ | ||
| 233 | struct nvm_block *blocks; | 226 | struct nvm_block *blocks; |
| 234 | 227 | ||
| 235 | spinlock_t lock; | 228 | spinlock_t lock; |
| @@ -263,8 +256,7 @@ struct nvm_dev { | |||
| 263 | int blks_per_lun; | 256 | int blks_per_lun; |
| 264 | int sec_size; | 257 | int sec_size; |
| 265 | int oob_size; | 258 | int oob_size; |
| 266 | int addr_mode; | 259 | struct nvm_addr_format ppaf; |
| 267 | struct nvm_addr_format addr_format; | ||
| 268 | 260 | ||
| 269 | /* Calculated/Cached values. These do not reflect the actual usable | 261 | /* Calculated/Cached values. These do not reflect the actual usable |
| 270 | * blocks at run-time. | 262 | * blocks at run-time. |
| @@ -290,118 +282,45 @@ struct nvm_dev { | |||
| 290 | char name[DISK_NAME_LEN]; | 282 | char name[DISK_NAME_LEN]; |
| 291 | }; | 283 | }; |
| 292 | 284 | ||
| 293 | /* fallback conversion */ | 285 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, |
| 294 | static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, | 286 | struct ppa_addr r) |
| 295 | struct ppa_addr r) | ||
| 296 | { | 287 | { |
| 297 | struct ppa_addr l; | 288 | struct ppa_addr l; |
| 298 | 289 | ||
| 299 | l.ppa = r.g.sec + | 290 | l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; |
| 300 | r.g.pg * dev->sec_per_pg + | 291 | l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; |
| 301 | r.g.blk * (dev->pgs_per_blk * | 292 | l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; |
| 302 | dev->sec_per_pg) + | 293 | l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; |
| 303 | r.g.lun * (dev->blks_per_lun * | 294 | l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; |
| 304 | dev->pgs_per_blk * | 295 | l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; |
| 305 | dev->sec_per_pg) + | ||
| 306 | r.g.ch * (dev->blks_per_lun * | ||
| 307 | dev->pgs_per_blk * | ||
| 308 | dev->luns_per_chnl * | ||
| 309 | dev->sec_per_pg); | ||
| 310 | 296 | ||
| 311 | return l; | 297 | return l; |
| 312 | } | 298 | } |
| 313 | 299 | ||
| 314 | /* fallback conversion */ | 300 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, |
| 315 | static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, | 301 | struct ppa_addr r) |
| 316 | struct ppa_addr r) | ||
| 317 | { | 302 | { |
| 318 | struct ppa_addr l; | 303 | struct ppa_addr l; |
| 319 | int secs, pgs, blks, luns; | ||
| 320 | sector_t ppa = r.ppa; | ||
| 321 | |||
| 322 | l.ppa = 0; | ||
| 323 | |||
| 324 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | ||
| 325 | l.g.sec = secs; | ||
| 326 | 304 | ||
| 327 | sector_div(ppa, dev->sec_per_pg); | 305 | /* |
| 328 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | 306 | * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. |
| 329 | l.g.pg = pgs; | 307 | */ |
| 330 | 308 | l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & | |
| 331 | sector_div(ppa, dev->pgs_per_blk); | 309 | (((1 << dev->ppaf.blk_len) - 1)); |
| 332 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | 310 | l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & |
| 333 | l.g.blk = blks; | 311 | (((1 << dev->ppaf.pg_len) - 1)); |
| 334 | 312 | l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & | |
| 335 | sector_div(ppa, dev->blks_per_lun); | 313 | (((1 << dev->ppaf.sect_len) - 1)); |
| 336 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | 314 | l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & |
| 337 | l.g.lun = luns; | 315 | (((1 << dev->ppaf.pln_len) - 1)); |
| 338 | 316 | l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & | |
| 339 | sector_div(ppa, dev->luns_per_chnl); | 317 | (((1 << dev->ppaf.lun_len) - 1)); |
| 340 | l.g.ch = ppa; | 318 | l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & |
| 341 | 319 | (((1 << dev->ppaf.ch_len) - 1)); | |
| 342 | return l; | ||
| 343 | } | ||
| 344 | |||
| 345 | static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) | ||
| 346 | { | ||
| 347 | struct ppa_addr l; | ||
| 348 | |||
| 349 | l.ppa = 0; | ||
| 350 | |||
| 351 | l.chnl.sec = r.g.sec; | ||
| 352 | l.chnl.pl = r.g.pl; | ||
| 353 | l.chnl.pg = r.g.pg; | ||
| 354 | l.chnl.blk = r.g.blk; | ||
| 355 | l.chnl.lun = r.g.lun; | ||
| 356 | l.chnl.ch = r.g.ch; | ||
| 357 | |||
| 358 | return l; | ||
| 359 | } | ||
| 360 | |||
| 361 | static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) | ||
| 362 | { | ||
| 363 | struct ppa_addr l; | ||
| 364 | |||
| 365 | l.ppa = 0; | ||
| 366 | |||
| 367 | l.g.sec = r.chnl.sec; | ||
| 368 | l.g.pl = r.chnl.pl; | ||
| 369 | l.g.pg = r.chnl.pg; | ||
| 370 | l.g.blk = r.chnl.blk; | ||
| 371 | l.g.lun = r.chnl.lun; | ||
| 372 | l.g.ch = r.chnl.ch; | ||
| 373 | 320 | ||
| 374 | return l; | 321 | return l; |
| 375 | } | 322 | } |
| 376 | 323 | ||
| 377 | static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, | ||
| 378 | struct ppa_addr gppa) | ||
| 379 | { | ||
| 380 | switch (dev->addr_mode) { | ||
| 381 | case NVM_ADDRMODE_LINEAR: | ||
| 382 | return __linear_to_generic_addr(dev, gppa); | ||
| 383 | case NVM_ADDRMODE_CHANNEL: | ||
| 384 | return __chnl_to_generic_addr(gppa); | ||
| 385 | default: | ||
| 386 | BUG(); | ||
| 387 | } | ||
| 388 | return gppa; | ||
| 389 | } | ||
| 390 | |||
| 391 | static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, | ||
| 392 | struct ppa_addr gppa) | ||
| 393 | { | ||
| 394 | switch (dev->addr_mode) { | ||
| 395 | case NVM_ADDRMODE_LINEAR: | ||
| 396 | return __generic_to_linear_addr(dev, gppa); | ||
| 397 | case NVM_ADDRMODE_CHANNEL: | ||
| 398 | return __generic_to_chnl_addr(gppa); | ||
| 399 | default: | ||
| 400 | BUG(); | ||
| 401 | } | ||
| 402 | return gppa; | ||
| 403 | } | ||
| 404 | |||
| 405 | static inline int ppa_empty(struct ppa_addr ppa_addr) | 324 | static inline int ppa_empty(struct ppa_addr ppa_addr) |
| 406 | { | 325 | { |
| 407 | return (ppa_addr.ppa == ADDR_EMPTY); | 326 | return (ppa_addr.ppa == ADDR_EMPTY); |
| @@ -468,7 +387,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); | |||
| 468 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, | 387 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, |
| 469 | unsigned long); | 388 | unsigned long); |
| 470 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); | 389 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); |
| 471 | typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); | 390 | typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); |
| 472 | 391 | ||
| 473 | struct nvmm_type { | 392 | struct nvmm_type { |
| 474 | const char *name; | 393 | const char *name; |
| @@ -492,7 +411,7 @@ struct nvmm_type { | |||
| 492 | nvmm_get_lun_fn *get_lun; | 411 | nvmm_get_lun_fn *get_lun; |
| 493 | 412 | ||
| 494 | /* Statistics */ | 413 | /* Statistics */ |
| 495 | nvmm_free_blocks_print_fn *free_blocks_print; | 414 | nvmm_lun_info_print_fn *lun_info_print; |
| 496 | struct list_head list; | 415 | struct list_head list; |
| 497 | }; | 416 | }; |
| 498 | 417 | ||
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 70400dc7660f..c57e424d914b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * Runtime locking correctness validator | 2 | * Runtime locking correctness validator |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
| 6 | * | 6 | * |
| 7 | * see Documentation/locking/lockdep-design.txt for more details. | 7 | * see Documentation/locking/lockdep-design.txt for more details. |
| 8 | */ | 8 | */ |
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index e6982ac3200d..a57f0dfb6db7 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
| 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 | 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 |
| 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 | 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 |
| 19 | #define MARVELL_PHY_ID_88E1540 0x01410eb0 | ||
| 19 | #define MARVELL_PHY_ID_88E3016 0x01410e60 | 20 | #define MARVELL_PHY_ID_88E3016 0x01410e60 |
| 20 | 21 | ||
| 21 | /* struct phy_device dev_flags definitions */ | 22 | /* struct phy_device dev_flags definitions */ |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7501626ab529..d3133be12d92 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -427,6 +427,17 @@ enum { | |||
| 427 | }; | 427 | }; |
| 428 | 428 | ||
| 429 | enum { | 429 | enum { |
| 430 | /* | ||
| 431 | * Max wqe size for rdma read is 512 bytes, so this | ||
| 432 | * limits our max_sge_rd as the wqe needs to fit: | ||
| 433 | * - ctrl segment (16 bytes) | ||
| 434 | * - rdma segment (16 bytes) | ||
| 435 | * - scatter elements (16 bytes each) | ||
| 436 | */ | ||
| 437 | MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16 | ||
| 438 | }; | ||
| 439 | |||
| 440 | enum { | ||
| 430 | MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, | 441 | MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, |
| 431 | MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, | 442 | MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, |
| 432 | MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, | 443 | MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dd2097455a2e..1565324eb620 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { | |||
| 453 | u8 lro_cap[0x1]; | 453 | u8 lro_cap[0x1]; |
| 454 | u8 lro_psh_flag[0x1]; | 454 | u8 lro_psh_flag[0x1]; |
| 455 | u8 lro_time_stamp[0x1]; | 455 | u8 lro_time_stamp[0x1]; |
| 456 | u8 reserved_0[0x6]; | 456 | u8 reserved_0[0x3]; |
| 457 | u8 self_lb_en_modifiable[0x1]; | ||
| 458 | u8 reserved_1[0x2]; | ||
| 457 | u8 max_lso_cap[0x5]; | 459 | u8 max_lso_cap[0x5]; |
| 458 | u8 reserved_1[0x4]; | 460 | u8 reserved_2[0x4]; |
| 459 | u8 rss_ind_tbl_cap[0x4]; | 461 | u8 rss_ind_tbl_cap[0x4]; |
| 460 | u8 reserved_2[0x3]; | 462 | u8 reserved_3[0x3]; |
| 461 | u8 tunnel_lso_const_out_ip_id[0x1]; | 463 | u8 tunnel_lso_const_out_ip_id[0x1]; |
| 462 | u8 reserved_3[0x2]; | 464 | u8 reserved_4[0x2]; |
| 463 | u8 tunnel_statless_gre[0x1]; | 465 | u8 tunnel_statless_gre[0x1]; |
| 464 | u8 tunnel_stateless_vxlan[0x1]; | 466 | u8 tunnel_stateless_vxlan[0x1]; |
| 465 | 467 | ||
| 466 | u8 reserved_4[0x20]; | 468 | u8 reserved_5[0x20]; |
| 467 | 469 | ||
| 468 | u8 reserved_5[0x10]; | 470 | u8 reserved_6[0x10]; |
| 469 | u8 lro_min_mss_size[0x10]; | 471 | u8 lro_min_mss_size[0x10]; |
| 470 | 472 | ||
| 471 | u8 reserved_6[0x120]; | 473 | u8 reserved_7[0x120]; |
| 472 | 474 | ||
| 473 | u8 lro_timer_supported_periods[4][0x20]; | 475 | u8 lro_timer_supported_periods[4][0x20]; |
| 474 | 476 | ||
| 475 | u8 reserved_7[0x600]; | 477 | u8 reserved_8[0x600]; |
| 476 | }; | 478 | }; |
| 477 | 479 | ||
| 478 | struct mlx5_ifc_roce_cap_bits { | 480 | struct mlx5_ifc_roce_cap_bits { |
| @@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits { | |||
| 4051 | }; | 4053 | }; |
| 4052 | 4054 | ||
| 4053 | struct mlx5_ifc_modify_tir_bitmask_bits { | 4055 | struct mlx5_ifc_modify_tir_bitmask_bits { |
| 4054 | u8 reserved[0x20]; | 4056 | u8 reserved_0[0x20]; |
| 4055 | 4057 | ||
| 4056 | u8 reserved1[0x1f]; | 4058 | u8 reserved_1[0x1b]; |
| 4059 | u8 self_lb_en[0x1]; | ||
| 4060 | u8 reserved_2[0x3]; | ||
| 4057 | u8 lro[0x1]; | 4061 | u8 lro[0x1]; |
| 4058 | }; | 4062 | }; |
| 4059 | 4063 | ||
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 877ef226f90f..772362adf471 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef LINUX_MM_DEBUG_H | 1 | #ifndef LINUX_MM_DEBUG_H |
| 2 | #define LINUX_MM_DEBUG_H 1 | 2 | #define LINUX_MM_DEBUG_H 1 |
| 3 | 3 | ||
| 4 | #include <linux/bug.h> | ||
| 4 | #include <linux/stringify.h> | 5 | #include <linux/stringify.h> |
| 5 | 6 | ||
| 6 | struct page; | 7 | struct page; |
diff --git a/include/linux/net.h b/include/linux/net.h index 70ac5e28e6b7..0b4ac7da583a 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
| @@ -34,8 +34,12 @@ struct inode; | |||
| 34 | struct file; | 34 | struct file; |
| 35 | struct net; | 35 | struct net; |
| 36 | 36 | ||
| 37 | #define SOCK_ASYNC_NOSPACE 0 | 37 | /* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located |
| 38 | #define SOCK_ASYNC_WAITDATA 1 | 38 | * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. |
| 39 | * Eventually all flags will be in sk->sk_wq_flags. | ||
| 40 | */ | ||
| 41 | #define SOCKWQ_ASYNC_NOSPACE 0 | ||
| 42 | #define SOCKWQ_ASYNC_WAITDATA 1 | ||
| 39 | #define SOCK_NOSPACE 2 | 43 | #define SOCK_NOSPACE 2 |
| 40 | #define SOCK_PASSCRED 3 | 44 | #define SOCK_PASSCRED 3 |
| 41 | #define SOCK_PASSSEC 4 | 45 | #define SOCK_PASSSEC 4 |
| @@ -89,6 +93,7 @@ struct socket_wq { | |||
| 89 | /* Note: wait MUST be first field of socket_wq */ | 93 | /* Note: wait MUST be first field of socket_wq */ |
| 90 | wait_queue_head_t wait; | 94 | wait_queue_head_t wait; |
| 91 | struct fasync_struct *fasync_list; | 95 | struct fasync_struct *fasync_list; |
| 96 | unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */ | ||
| 92 | struct rcu_head rcu; | 97 | struct rcu_head rcu; |
| 93 | } ____cacheline_aligned_in_smp; | 98 | } ____cacheline_aligned_in_smp; |
| 94 | 99 | ||
| @@ -96,7 +101,7 @@ struct socket_wq { | |||
| 96 | * struct socket - general BSD socket | 101 | * struct socket - general BSD socket |
| 97 | * @state: socket state (%SS_CONNECTED, etc) | 102 | * @state: socket state (%SS_CONNECTED, etc) |
| 98 | * @type: socket type (%SOCK_STREAM, etc) | 103 | * @type: socket type (%SOCK_STREAM, etc) |
| 99 | * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) | 104 | * @flags: socket flags (%SOCK_NOSPACE, etc) |
| 100 | * @ops: protocol specific socket operations | 105 | * @ops: protocol specific socket operations |
| 101 | * @file: File back pointer for gc | 106 | * @file: File back pointer for gc |
| 102 | * @sk: internal networking protocol agnostic socket representation | 107 | * @sk: internal networking protocol agnostic socket representation |
| @@ -202,7 +207,7 @@ enum { | |||
| 202 | SOCK_WAKE_URG, | 207 | SOCK_WAKE_URG, |
| 203 | }; | 208 | }; |
| 204 | 209 | ||
| 205 | int sock_wake_async(struct socket *sk, int how, int band); | 210 | int sock_wake_async(struct socket_wq *sk_wq, int how, int band); |
| 206 | int sock_register(const struct net_proto_family *fam); | 211 | int sock_register(const struct net_proto_family *fam); |
| 207 | void sock_unregister(int family); | 212 | void sock_unregister(int family); |
| 208 | int __sock_create(struct net *net, int family, int type, int proto, | 213 | int __sock_create(struct net *net, int family, int type, int proto, |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d20891465247..3143c847bddb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -1398,7 +1398,8 @@ enum netdev_priv_flags { | |||
| 1398 | * @dma: DMA channel | 1398 | * @dma: DMA channel |
| 1399 | * @mtu: Interface MTU value | 1399 | * @mtu: Interface MTU value |
| 1400 | * @type: Interface hardware type | 1400 | * @type: Interface hardware type |
| 1401 | * @hard_header_len: Hardware header length | 1401 | * @hard_header_len: Hardware header length, which means that this is the |
| 1402 | * minimum size of a packet. | ||
| 1402 | * | 1403 | * |
| 1403 | * @needed_headroom: Extra headroom the hardware may need, but not in all | 1404 | * @needed_headroom: Extra headroom the hardware may need, but not in all |
| 1404 | * cases can this be guaranteed | 1405 | * cases can this be guaranteed |
| @@ -2068,20 +2069,23 @@ struct pcpu_sw_netstats { | |||
| 2068 | struct u64_stats_sync syncp; | 2069 | struct u64_stats_sync syncp; |
| 2069 | }; | 2070 | }; |
| 2070 | 2071 | ||
| 2071 | #define netdev_alloc_pcpu_stats(type) \ | 2072 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
| 2072 | ({ \ | 2073 | ({ \ |
| 2073 | typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ | 2074 | typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ |
| 2074 | if (pcpu_stats) { \ | 2075 | if (pcpu_stats) { \ |
| 2075 | int __cpu; \ | 2076 | int __cpu; \ |
| 2076 | for_each_possible_cpu(__cpu) { \ | 2077 | for_each_possible_cpu(__cpu) { \ |
| 2077 | typeof(type) *stat; \ | 2078 | typeof(type) *stat; \ |
| 2078 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ | 2079 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ |
| 2079 | u64_stats_init(&stat->syncp); \ | 2080 | u64_stats_init(&stat->syncp); \ |
| 2080 | } \ | 2081 | } \ |
| 2081 | } \ | 2082 | } \ |
| 2082 | pcpu_stats; \ | 2083 | pcpu_stats; \ |
| 2083 | }) | 2084 | }) |
| 2084 | 2085 | ||
| 2086 | #define netdev_alloc_pcpu_stats(type) \ | ||
| 2087 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL) | ||
| 2088 | |||
| 2085 | #include <linux/notifier.h> | 2089 | #include <linux/notifier.h> |
| 2086 | 2090 | ||
| 2087 | /* netdevice notifier chain. Please remember to update the rtnetlink | 2091 | /* netdevice notifier chain. Please remember to update the rtnetlink |
| @@ -3854,6 +3858,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev) | |||
| 3854 | return dev->priv_flags & IFF_EBRIDGE; | 3858 | return dev->priv_flags & IFF_EBRIDGE; |
| 3855 | } | 3859 | } |
| 3856 | 3860 | ||
| 3861 | static inline bool netif_is_bridge_port(const struct net_device *dev) | ||
| 3862 | { | ||
| 3863 | return dev->priv_flags & IFF_BRIDGE_PORT; | ||
| 3864 | } | ||
| 3865 | |||
| 3857 | static inline bool netif_is_ovs_master(const struct net_device *dev) | 3866 | static inline bool netif_is_ovs_master(const struct net_device *dev) |
| 3858 | { | 3867 | { |
| 3859 | return dev->priv_flags & IFF_OPENVSWITCH; | 3868 | return dev->priv_flags & IFF_OPENVSWITCH; |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 48bb01edcf30..0e1f433cc4b7 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
| @@ -421,7 +421,7 @@ extern void ip_set_free(void *members); | |||
| 421 | extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); | 421 | extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); |
| 422 | extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); | 422 | extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); |
| 423 | extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], | 423 | extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], |
| 424 | size_t len); | 424 | size_t len, size_t align); |
| 425 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], | 425 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], |
| 426 | struct ip_set_ext *ext); | 426 | struct ip_set_ext *ext); |
| 427 | 427 | ||
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 249d1bb01e03..5646b24bfc64 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h | |||
| @@ -14,7 +14,7 @@ struct nfnl_callback { | |||
| 14 | int (*call_rcu)(struct sock *nl, struct sk_buff *skb, | 14 | int (*call_rcu)(struct sock *nl, struct sk_buff *skb, |
| 15 | const struct nlmsghdr *nlh, | 15 | const struct nlmsghdr *nlh, |
| 16 | const struct nlattr * const cda[]); | 16 | const struct nlattr * const cda[]); |
| 17 | int (*call_batch)(struct sock *nl, struct sk_buff *skb, | 17 | int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, |
| 18 | const struct nlmsghdr *nlh, | 18 | const struct nlmsghdr *nlh, |
| 19 | const struct nlattr * const cda[]); | 19 | const struct nlattr * const cda[]); |
| 20 | const struct nla_policy *policy; /* netlink attribute policy */ | 20 | const struct nla_policy *policy; /* netlink attribute policy */ |
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index 187feabe557c..5fcd375ef175 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h | |||
| @@ -5,10 +5,13 @@ | |||
| 5 | #include <linux/netdevice.h> | 5 | #include <linux/netdevice.h> |
| 6 | 6 | ||
| 7 | #ifdef CONFIG_NETFILTER_INGRESS | 7 | #ifdef CONFIG_NETFILTER_INGRESS |
| 8 | static inline int nf_hook_ingress_active(struct sk_buff *skb) | 8 | static inline bool nf_hook_ingress_active(const struct sk_buff *skb) |
| 9 | { | 9 | { |
| 10 | return nf_hook_list_active(&skb->dev->nf_hooks_ingress, | 10 | #ifdef HAVE_JUMP_LABEL |
| 11 | NFPROTO_NETDEV, NF_NETDEV_INGRESS); | 11 | if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) |
| 12 | return false; | ||
| 13 | #endif | ||
| 14 | return !list_empty(&skb->dev->nf_hooks_ingress); | ||
| 12 | } | 15 | } |
| 13 | 16 | ||
| 14 | static inline int nf_hook_ingress(struct sk_buff *skb) | 17 | static inline int nf_hook_ingress(struct sk_buff *skb) |
| @@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb) | |||
| 16 | struct nf_hook_state state; | 19 | struct nf_hook_state state; |
| 17 | 20 | ||
| 18 | nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, | 21 | nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, |
| 19 | NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, | 22 | NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, |
| 20 | skb->dev, NULL, dev_net(skb->dev), NULL); | 23 | skb->dev, NULL, NULL, dev_net(skb->dev), NULL); |
| 21 | return nf_hook_slow(skb, &state); | 24 | return nf_hook_slow(skb, &state); |
| 22 | } | 25 | } |
| 23 | 26 | ||
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 570d630f98ae..11bbae44f4cb 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -251,6 +251,7 @@ struct nfs4_layoutget { | |||
| 251 | struct nfs4_layoutget_res res; | 251 | struct nfs4_layoutget_res res; |
| 252 | struct rpc_cred *cred; | 252 | struct rpc_cred *cred; |
| 253 | gfp_t gfp_flags; | 253 | gfp_t gfp_flags; |
| 254 | long timeout; | ||
| 254 | }; | 255 | }; |
| 255 | 256 | ||
| 256 | struct nfs4_getdeviceinfo_args { | 257 | struct nfs4_getdeviceinfo_args { |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 36112cdd665a..b90d8ec57c1f 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
| @@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np, | |||
| 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
| 81 | const char *name) | 81 | const char *name) |
| 82 | { | 82 | { |
| 83 | return NULL; | 83 | return ERR_PTR(-ENODEV); |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | 86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 039f2eec49ce..1e0deb8e8494 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h | |||
| @@ -46,12 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index); | |||
| 46 | extern int of_irq_get_byname(struct device_node *dev, const char *name); | 46 | extern int of_irq_get_byname(struct device_node *dev, const char *name); |
| 47 | extern int of_irq_to_resource_table(struct device_node *dev, | 47 | extern int of_irq_to_resource_table(struct device_node *dev, |
| 48 | struct resource *res, int nr_irqs); | 48 | struct resource *res, int nr_irqs); |
| 49 | extern struct device_node *of_irq_find_parent(struct device_node *child); | ||
| 49 | extern struct irq_domain *of_msi_get_domain(struct device *dev, | 50 | extern struct irq_domain *of_msi_get_domain(struct device *dev, |
| 50 | struct device_node *np, | 51 | struct device_node *np, |
| 51 | enum irq_domain_bus_token token); | 52 | enum irq_domain_bus_token token); |
| 52 | extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, | 53 | extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, |
| 53 | u32 rid); | 54 | u32 rid); |
| 54 | extern void of_msi_configure(struct device *dev, struct device_node *np); | 55 | extern void of_msi_configure(struct device *dev, struct device_node *np); |
| 56 | u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); | ||
| 55 | #else | 57 | #else |
| 56 | static inline int of_irq_count(struct device_node *dev) | 58 | static inline int of_irq_count(struct device_node *dev) |
| 57 | { | 59 | { |
| @@ -70,6 +72,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev, | |||
| 70 | { | 72 | { |
| 71 | return 0; | 73 | return 0; |
| 72 | } | 74 | } |
| 75 | static inline void *of_irq_find_parent(struct device_node *child) | ||
| 76 | { | ||
| 77 | return NULL; | ||
| 78 | } | ||
| 79 | |||
| 73 | static inline struct irq_domain *of_msi_get_domain(struct device *dev, | 80 | static inline struct irq_domain *of_msi_get_domain(struct device *dev, |
| 74 | struct device_node *np, | 81 | struct device_node *np, |
| 75 | enum irq_domain_bus_token token) | 82 | enum irq_domain_bus_token token) |
| @@ -84,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev | |||
| 84 | static inline void of_msi_configure(struct device *dev, struct device_node *np) | 91 | static inline void of_msi_configure(struct device *dev, struct device_node *np) |
| 85 | { | 92 | { |
| 86 | } | 93 | } |
| 94 | static inline u32 of_msi_map_rid(struct device *dev, | ||
| 95 | struct device_node *msi_np, u32 rid_in) | ||
| 96 | { | ||
| 97 | return rid_in; | ||
| 98 | } | ||
| 87 | #endif | 99 | #endif |
| 88 | 100 | ||
| 89 | #if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) | 101 | #if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) |
| @@ -93,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np) | |||
| 93 | * so declare it here regardless of the CONFIG_OF_IRQ setting. | 105 | * so declare it here regardless of the CONFIG_OF_IRQ setting. |
| 94 | */ | 106 | */ |
| 95 | extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); | 107 | extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); |
| 96 | u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); | ||
| 97 | 108 | ||
| 98 | #else /* !CONFIG_OF && !CONFIG_SPARC */ | 109 | #else /* !CONFIG_OF && !CONFIG_SPARC */ |
| 99 | static inline unsigned int irq_of_parse_and_map(struct device_node *dev, | 110 | static inline unsigned int irq_of_parse_and_map(struct device_node *dev, |
| @@ -101,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev, | |||
| 101 | { | 112 | { |
| 102 | return 0; | 113 | return 0; |
| 103 | } | 114 | } |
| 104 | |||
| 105 | static inline u32 of_msi_map_rid(struct device *dev, | ||
| 106 | struct device_node *msi_np, u32 rid_in) | ||
| 107 | { | ||
| 108 | return rid_in; | ||
| 109 | } | ||
| 110 | #endif /* !CONFIG_OF */ | 115 | #endif /* !CONFIG_OF */ |
| 111 | 116 | ||
| 112 | #endif /* __OF_IRQ_H */ | 117 | #endif /* __OF_IRQ_H */ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index e828e7b4afec..6ae25aae88fd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -412,9 +412,18 @@ struct pci_host_bridge { | |||
| 412 | void (*release_fn)(struct pci_host_bridge *); | 412 | void (*release_fn)(struct pci_host_bridge *); |
| 413 | void *release_data; | 413 | void *release_data; |
| 414 | unsigned int ignore_reset_delay:1; /* for entire hierarchy */ | 414 | unsigned int ignore_reset_delay:1; /* for entire hierarchy */ |
| 415 | /* Resource alignment requirements */ | ||
| 416 | resource_size_t (*align_resource)(struct pci_dev *dev, | ||
| 417 | const struct resource *res, | ||
| 418 | resource_size_t start, | ||
| 419 | resource_size_t size, | ||
| 420 | resource_size_t align); | ||
| 415 | }; | 421 | }; |
| 416 | 422 | ||
| 417 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) | 423 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) |
| 424 | |||
| 425 | struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); | ||
| 426 | |||
| 418 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, | 427 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, |
| 419 | void (*release_fn)(struct pci_host_bridge *), | 428 | void (*release_fn)(struct pci_host_bridge *), |
| 420 | void *release_data); | 429 | void *release_data); |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d841d33bcdc9..f9828a48f16a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -697,9 +697,11 @@ struct perf_cgroup { | |||
| 697 | * if there is no cgroup event for the current CPU context. | 697 | * if there is no cgroup event for the current CPU context. |
| 698 | */ | 698 | */ |
| 699 | static inline struct perf_cgroup * | 699 | static inline struct perf_cgroup * |
| 700 | perf_cgroup_from_task(struct task_struct *task) | 700 | perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) |
| 701 | { | 701 | { |
| 702 | return container_of(task_css(task, perf_event_cgrp_id), | 702 | return container_of(task_css_check(task, perf_event_cgrp_id, |
| 703 | ctx ? lockdep_is_held(&ctx->lock) | ||
| 704 | : true), | ||
| 703 | struct perf_cgroup, css); | 705 | struct perf_cgroup, css); |
| 704 | } | 706 | } |
| 705 | #endif /* CONFIG_CGROUP_PERF */ | 707 | #endif /* CONFIG_CGROUP_PERF */ |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index e2878baeb90e..4299f4ba03bd 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
| @@ -72,7 +72,7 @@ struct edma_soc_info { | |||
| 72 | struct edma_rsv_info *rsv; | 72 | struct edma_rsv_info *rsv; |
| 73 | 73 | ||
| 74 | /* List of channels allocated for memcpy, terminated with -1 */ | 74 | /* List of channels allocated for memcpy, terminated with -1 */ |
| 75 | s16 *memcpy_channels; | 75 | s32 *memcpy_channels; |
| 76 | 76 | ||
| 77 | s8 (*queue_priority_mapping)[2]; | 77 | s8 (*queue_priority_mapping)[2]; |
| 78 | const s16 (*xbar_chans)[2]; | 78 | const s16 (*xbar_chans)[2]; |
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index d3889b98a1a1..fb5625bcca9a 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h | |||
| @@ -40,6 +40,8 @@ struct s3c64xx_spi_info { | |||
| 40 | int num_cs; | 40 | int num_cs; |
| 41 | int (*cfg_gpio)(void); | 41 | int (*cfg_gpio)(void); |
| 42 | dma_filter_fn filter; | 42 | dma_filter_fn filter; |
| 43 | void *dma_tx; | ||
| 44 | void *dma_rx; | ||
| 43 | }; | 45 | }; |
| 44 | 46 | ||
| 45 | /** | 47 | /** |
diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 5440f64d2942..21221338ad18 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * FLoating proportions | 2 | * FLoating proportions |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 4 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
| 5 | * | 5 | * |
| 6 | * This file contains the public data structure and API definitions. | 6 | * This file contains the public data structure and API definitions. |
| 7 | */ | 7 | */ |
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 6a4347639c03..1d1ba2c5ee7a 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #ifndef __COMMON_HSI__ | 9 | #ifndef __COMMON_HSI__ |
| 10 | #define __COMMON_HSI__ | 10 | #define __COMMON_HSI__ |
| 11 | 11 | ||
| 12 | #define CORE_SPQE_PAGE_SIZE_BYTES 4096 | ||
| 13 | |||
| 12 | #define FW_MAJOR_VERSION 8 | 14 | #define FW_MAJOR_VERSION 8 |
| 13 | #define FW_MINOR_VERSION 4 | 15 | #define FW_MINOR_VERSION 4 |
| 14 | #define FW_REVISION_VERSION 2 | 16 | #define FW_REVISION_VERSION 2 |
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index b920c3605c46..41b9049b57e2 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h | |||
| @@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) | |||
| 111 | used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - | 111 | used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - |
| 112 | (u32)p_chain->cons_idx; | 112 | (u32)p_chain->cons_idx; |
| 113 | if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) | 113 | if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) |
| 114 | used -= (used / p_chain->elem_per_page); | 114 | used -= p_chain->prod_idx / p_chain->elem_per_page - |
| 115 | p_chain->cons_idx / p_chain->elem_per_page; | ||
| 115 | 116 | ||
| 116 | return p_chain->capacity - used; | 117 | return p_chain->capacity - used; |
| 117 | } | 118 | } |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 843ceca9a21e..e50b31d18462 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | 19 | ||
| 20 | #include <linux/atomic.h> | 20 | #include <linux/atomic.h> |
| 21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
| 22 | #include <linux/err.h> | ||
| 22 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
| 23 | #include <linux/jhash.h> | 24 | #include <linux/jhash.h> |
| 24 | #include <linux/list_nulls.h> | 25 | #include <linux/list_nulls.h> |
| @@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, | |||
| 339 | int rhashtable_init(struct rhashtable *ht, | 340 | int rhashtable_init(struct rhashtable *ht, |
| 340 | const struct rhashtable_params *params); | 341 | const struct rhashtable_params *params); |
| 341 | 342 | ||
| 342 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | 343 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, |
| 343 | struct rhash_head *obj, | 344 | const void *key, |
| 344 | struct bucket_table *old_tbl); | 345 | struct rhash_head *obj, |
| 345 | int rhashtable_insert_rehash(struct rhashtable *ht); | 346 | struct bucket_table *old_tbl); |
| 347 | int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); | ||
| 346 | 348 | ||
| 347 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); | 349 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); |
| 348 | void rhashtable_walk_exit(struct rhashtable_iter *iter); | 350 | void rhashtable_walk_exit(struct rhashtable_iter *iter); |
| @@ -598,9 +600,11 @@ restart: | |||
| 598 | 600 | ||
| 599 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); | 601 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 600 | if (unlikely(new_tbl)) { | 602 | if (unlikely(new_tbl)) { |
| 601 | err = rhashtable_insert_slow(ht, key, obj, new_tbl); | 603 | tbl = rhashtable_insert_slow(ht, key, obj, new_tbl); |
| 602 | if (err == -EAGAIN) | 604 | if (!IS_ERR_OR_NULL(tbl)) |
| 603 | goto slow_path; | 605 | goto slow_path; |
| 606 | |||
| 607 | err = PTR_ERR(tbl); | ||
| 604 | goto out; | 608 | goto out; |
| 605 | } | 609 | } |
| 606 | 610 | ||
| @@ -611,7 +615,7 @@ restart: | |||
| 611 | if (unlikely(rht_grow_above_100(ht, tbl))) { | 615 | if (unlikely(rht_grow_above_100(ht, tbl))) { |
| 612 | slow_path: | 616 | slow_path: |
| 613 | spin_unlock_bh(lock); | 617 | spin_unlock_bh(lock); |
| 614 | err = rhashtable_insert_rehash(ht); | 618 | err = rhashtable_insert_rehash(ht, tbl); |
| 615 | rcu_read_unlock(); | 619 | rcu_read_unlock(); |
| 616 | if (err) | 620 | if (err) |
| 617 | return err; | 621 | return err; |
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index 80af3cd35ae4..72ce932c69b2 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h | |||
| @@ -71,7 +71,7 @@ struct scpi_ops { | |||
| 71 | int (*sensor_get_value)(u16, u32 *); | 71 | int (*sensor_get_value)(u16, u32 *); |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | #if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) | 74 | #if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) |
| 75 | struct scpi_ops *get_scpi_ops(void); | 75 | struct scpi_ops *get_scpi_ops(void); |
| 76 | #else | 76 | #else |
| 77 | static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } | 77 | static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } |
diff --git a/include/linux/signal.h b/include/linux/signal.h index ab1e0392b5ac..92557bbce7e7 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
| @@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); | |||
| 239 | extern void set_current_blocked(sigset_t *); | 239 | extern void set_current_blocked(sigset_t *); |
| 240 | extern void __set_current_blocked(const sigset_t *); | 240 | extern void __set_current_blocked(const sigset_t *); |
| 241 | extern int show_unhandled_signals; | 241 | extern int show_unhandled_signals; |
| 242 | extern int sigsuspend(sigset_t *); | ||
| 243 | 242 | ||
| 244 | struct sigaction { | 243 | struct sigaction { |
| 245 | #ifndef __ARCH_HAS_IRIX_SIGACTION | 244 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 7c82e3b307a3..2037a861e367 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -158,6 +158,24 @@ size_t ksize(const void *); | |||
| 158 | #endif | 158 | #endif |
| 159 | 159 | ||
| 160 | /* | 160 | /* |
| 161 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
| 162 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
| 163 | * aligned buffers. | ||
| 164 | */ | ||
| 165 | #ifndef ARCH_SLAB_MINALIGN | ||
| 166 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 167 | #endif | ||
| 168 | |||
| 169 | /* | ||
| 170 | * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned | ||
| 171 | * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN | ||
| 172 | * aligned pointers. | ||
| 173 | */ | ||
| 174 | #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) | ||
| 175 | #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) | ||
| 176 | #define __assume_page_alignment __assume_aligned(PAGE_SIZE) | ||
| 177 | |||
| 178 | /* | ||
| 161 | * Kmalloc array related definitions | 179 | * Kmalloc array related definitions |
| 162 | */ | 180 | */ |
| 163 | 181 | ||
| @@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
| 286 | } | 304 | } |
| 287 | #endif /* !CONFIG_SLOB */ | 305 | #endif /* !CONFIG_SLOB */ |
| 288 | 306 | ||
| 289 | void *__kmalloc(size_t size, gfp_t flags); | 307 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; |
| 290 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | 308 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; |
| 291 | void kmem_cache_free(struct kmem_cache *, void *); | 309 | void kmem_cache_free(struct kmem_cache *, void *); |
| 292 | 310 | ||
| 293 | /* | 311 | /* |
| @@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *); | |||
| 298 | * Note that interrupts must be enabled when calling these functions. | 316 | * Note that interrupts must be enabled when calling these functions. |
| 299 | */ | 317 | */ |
| 300 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 318 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
| 301 | bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 319 | int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
| 302 | 320 | ||
| 303 | #ifdef CONFIG_NUMA | 321 | #ifdef CONFIG_NUMA |
| 304 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 322 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; |
| 305 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 323 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; |
| 306 | #else | 324 | #else |
| 307 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 325 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 308 | { | 326 | { |
| @@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f | |||
| 316 | #endif | 334 | #endif |
| 317 | 335 | ||
| 318 | #ifdef CONFIG_TRACING | 336 | #ifdef CONFIG_TRACING |
| 319 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | 337 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; |
| 320 | 338 | ||
| 321 | #ifdef CONFIG_NUMA | 339 | #ifdef CONFIG_NUMA |
| 322 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | 340 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| 323 | gfp_t gfpflags, | 341 | gfp_t gfpflags, |
| 324 | int node, size_t size); | 342 | int node, size_t size) __assume_slab_alignment; |
| 325 | #else | 343 | #else |
| 326 | static __always_inline void * | 344 | static __always_inline void * |
| 327 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | 345 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| @@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
| 354 | } | 372 | } |
| 355 | #endif /* CONFIG_TRACING */ | 373 | #endif /* CONFIG_TRACING */ |
| 356 | 374 | ||
| 357 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); | 375 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
| 358 | 376 | ||
| 359 | #ifdef CONFIG_TRACING | 377 | #ifdef CONFIG_TRACING |
| 360 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | 378 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
| 361 | #else | 379 | #else |
| 362 | static __always_inline void * | 380 | static __always_inline void * |
| 363 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | 381 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
| @@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 482 | return __kmalloc_node(size, flags, node); | 500 | return __kmalloc_node(size, flags, node); |
| 483 | } | 501 | } |
| 484 | 502 | ||
| 485 | /* | ||
| 486 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
| 487 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
| 488 | * aligned buffers. | ||
| 489 | */ | ||
| 490 | #ifndef ARCH_SLAB_MINALIGN | ||
| 491 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 492 | #endif | ||
| 493 | |||
| 494 | struct memcg_cache_array { | 503 | struct memcg_cache_array { |
| 495 | struct rcu_head rcu; | 504 | struct rcu_head rcu; |
| 496 | struct kmem_cache *entries[0]; | 505 | struct kmem_cache *entries[0]; |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 075bede66521..53be3a4c60cb 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
| @@ -425,6 +425,12 @@ struct spi_master { | |||
| 425 | #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ | 425 | #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ |
| 426 | #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ | 426 | #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ |
| 427 | 427 | ||
| 428 | /* | ||
| 429 | * on some hardware transfer size may be constrained | ||
| 430 | * the limit may depend on device transfer settings | ||
| 431 | */ | ||
| 432 | size_t (*max_transfer_size)(struct spi_device *spi); | ||
| 433 | |||
| 428 | /* lock and mutex for SPI bus locking */ | 434 | /* lock and mutex for SPI bus locking */ |
| 429 | spinlock_t bus_lock_spinlock; | 435 | spinlock_t bus_lock_spinlock; |
| 430 | struct mutex bus_lock_mutex; | 436 | struct mutex bus_lock_mutex; |
| @@ -762,10 +768,15 @@ struct spi_message { | |||
| 762 | void *state; | 768 | void *state; |
| 763 | }; | 769 | }; |
| 764 | 770 | ||
| 771 | static inline void spi_message_init_no_memset(struct spi_message *m) | ||
| 772 | { | ||
| 773 | INIT_LIST_HEAD(&m->transfers); | ||
| 774 | } | ||
| 775 | |||
| 765 | static inline void spi_message_init(struct spi_message *m) | 776 | static inline void spi_message_init(struct spi_message *m) |
| 766 | { | 777 | { |
| 767 | memset(m, 0, sizeof *m); | 778 | memset(m, 0, sizeof *m); |
| 768 | INIT_LIST_HEAD(&m->transfers); | 779 | spi_message_init_no_memset(m); |
| 769 | } | 780 | } |
| 770 | 781 | ||
| 771 | static inline void | 782 | static inline void |
| @@ -832,6 +843,15 @@ extern int spi_async(struct spi_device *spi, struct spi_message *message); | |||
| 832 | extern int spi_async_locked(struct spi_device *spi, | 843 | extern int spi_async_locked(struct spi_device *spi, |
| 833 | struct spi_message *message); | 844 | struct spi_message *message); |
| 834 | 845 | ||
| 846 | static inline size_t | ||
| 847 | spi_max_transfer_size(struct spi_device *spi) | ||
| 848 | { | ||
| 849 | struct spi_master *master = spi->master; | ||
| 850 | if (!master->max_transfer_size) | ||
| 851 | return SIZE_MAX; | ||
| 852 | return master->max_transfer_size(spi); | ||
| 853 | } | ||
| 854 | |||
| 835 | /*---------------------------------------------------------------------------*/ | 855 | /*---------------------------------------------------------------------------*/ |
| 836 | 856 | ||
| 837 | /* All these synchronous SPI transfer routines are utilities layered | 857 | /* All these synchronous SPI transfer routines are utilities layered |
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 0adedca24c5b..0e1b1540597a 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
| @@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask, | |||
| 99 | * grabbing every spinlock (and more). So the "read" side to such a | 99 | * grabbing every spinlock (and more). So the "read" side to such a |
| 100 | * lock is anything which disables preemption. | 100 | * lock is anything which disables preemption. |
| 101 | */ | 101 | */ |
| 102 | #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) | 102 | #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
| 103 | 103 | ||
| 104 | /** | 104 | /** |
| 105 | * stop_machine: freeze the machine on all CPUs and run this function | 105 | * stop_machine: freeze the machine on all CPUs and run this function |
| @@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); | |||
| 118 | 118 | ||
| 119 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, | 119 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
| 120 | const struct cpumask *cpus); | 120 | const struct cpumask *cpus); |
| 121 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 121 | #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 122 | 122 | ||
| 123 | static inline int stop_machine(cpu_stop_fn_t fn, void *data, | 123 | static inline int stop_machine(cpu_stop_fn_t fn, void *data, |
| 124 | const struct cpumask *cpus) | 124 | const struct cpumask *cpus) |
| @@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, | |||
| 137 | return stop_machine(fn, data, cpus); | 137 | return stop_machine(fn, data, cpus); |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 140 | #endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 141 | #endif /* _LINUX_STOP_MACHINE */ | 141 | #endif /* _LINUX_STOP_MACHINE */ |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a156b82dd14c..c2b66a277e98 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename, | |||
| 524 | asmlinkage long sys_lchown(const char __user *filename, | 524 | asmlinkage long sys_lchown(const char __user *filename, |
| 525 | uid_t user, gid_t group); | 525 | uid_t user, gid_t group); |
| 526 | asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); | 526 | asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); |
| 527 | #ifdef CONFIG_UID16 | 527 | #ifdef CONFIG_HAVE_UID16 |
| 528 | asmlinkage long sys_chown16(const char __user *filename, | 528 | asmlinkage long sys_chown16(const char __user *filename, |
| 529 | old_uid_t user, old_gid_t group); | 529 | old_uid_t user, old_gid_t group); |
| 530 | asmlinkage long sys_lchown16(const char __user *filename, | 530 | asmlinkage long sys_lchown16(const char __user *filename, |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 4014a59828fc..613c29bd6baf 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister( | |||
| 438 | static inline int thermal_zone_bind_cooling_device( | 438 | static inline int thermal_zone_bind_cooling_device( |
| 439 | struct thermal_zone_device *tz, int trip, | 439 | struct thermal_zone_device *tz, int trip, |
| 440 | struct thermal_cooling_device *cdev, | 440 | struct thermal_cooling_device *cdev, |
| 441 | unsigned long upper, unsigned long lower) | 441 | unsigned long upper, unsigned long lower, |
| 442 | unsigned int weight) | ||
| 442 | { return -ENODEV; } | 443 | { return -ENODEV; } |
| 443 | static inline int thermal_zone_unbind_cooling_device( | 444 | static inline int thermal_zone_unbind_cooling_device( |
| 444 | struct thermal_zone_device *tz, int trip, | 445 | struct thermal_zone_device *tz, int trip, |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 5b04b0a5375b..5e31f1b99037 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); | |||
| 607 | 607 | ||
| 608 | /* tty_audit.c */ | 608 | /* tty_audit.c */ |
| 609 | #ifdef CONFIG_AUDIT | 609 | #ifdef CONFIG_AUDIT |
| 610 | extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, | 610 | extern void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 611 | size_t size, unsigned icanon); | 611 | size_t size, unsigned icanon); |
| 612 | extern void tty_audit_exit(void); | 612 | extern void tty_audit_exit(void); |
| 613 | extern void tty_audit_fork(struct signal_struct *sig); | 613 | extern void tty_audit_fork(struct signal_struct *sig); |
| @@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); | |||
| 615 | extern void tty_audit_push(struct tty_struct *tty); | 615 | extern void tty_audit_push(struct tty_struct *tty); |
| 616 | extern int tty_audit_push_current(void); | 616 | extern int tty_audit_push_current(void); |
| 617 | #else | 617 | #else |
| 618 | static inline void tty_audit_add_data(struct tty_struct *tty, | 618 | static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 619 | unsigned char *data, size_t size, unsigned icanon) | 619 | size_t size, unsigned icanon) |
| 620 | { | 620 | { |
| 621 | } | 621 | } |
| 622 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) | 622 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) |
diff --git a/include/linux/types.h b/include/linux/types.h index 70d8500bddf1..70dd3dfde631 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t; | |||
| 35 | 35 | ||
| 36 | typedef unsigned long uintptr_t; | 36 | typedef unsigned long uintptr_t; |
| 37 | 37 | ||
| 38 | #ifdef CONFIG_UID16 | 38 | #ifdef CONFIG_HAVE_UID16 |
| 39 | /* This is defined by include/asm-{arch}/posix_types.h */ | 39 | /* This is defined by include/asm-{arch}/posix_types.h */ |
| 40 | typedef __kernel_old_uid_t old_uid_t; | 40 | typedef __kernel_old_uid_t old_uid_t; |
| 41 | typedef __kernel_old_gid_t old_gid_t; | 41 | typedef __kernel_old_gid_t old_gid_t; |
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 0bdc72f36905..4a29c75b146e 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | * Authors: | 21 | * Authors: |
| 22 | * Srikar Dronamraju | 22 | * Srikar Dronamraju |
| 23 | * Jim Keniston | 23 | * Jim Keniston |
| 24 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 24 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 1f6526c76ee8..3a375d07d0dc 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
| @@ -138,6 +138,7 @@ struct cdc_ncm_ctx { | |||
| 138 | }; | 138 | }; |
| 139 | 139 | ||
| 140 | u8 cdc_ncm_select_altsetting(struct usb_interface *intf); | 140 | u8 cdc_ncm_select_altsetting(struct usb_interface *intf); |
| 141 | int cdc_ncm_change_mtu(struct net_device *net, int new_mtu); | ||
| 141 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); | 142 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); |
| 142 | void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); | 143 | void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); |
| 143 | struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); | 144 | struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); |
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 9948c874e3f1..1d0043dc34e4 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h | |||
| @@ -47,4 +47,7 @@ | |||
| 47 | /* device generates spurious wakeup, ignore remote wakeup capability */ | 47 | /* device generates spurious wakeup, ignore remote wakeup capability */ |
| 48 | #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) | 48 | #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) |
| 49 | 49 | ||
| 50 | /* device can't handle Link Power Management */ | ||
| 51 | #define USB_QUIRK_NO_LPM BIT(10) | ||
| 52 | |||
| 50 | #endif /* __LINUX_USB_QUIRKS_H */ | 53 | #endif /* __LINUX_USB_QUIRKS_H */ |
diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 610a86a892b8..ddb440975382 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h | |||
| @@ -44,9 +44,6 @@ struct vfio_device_ops { | |||
| 44 | void (*request)(void *device_data, unsigned int count); | 44 | void (*request)(void *device_data, unsigned int count); |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | extern struct iommu_group *vfio_iommu_group_get(struct device *dev); | ||
| 48 | extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); | ||
| 49 | |||
| 50 | extern int vfio_add_group_dev(struct device *dev, | 47 | extern int vfio_add_group_dev(struct device *dev, |
| 51 | const struct vfio_device_ops *ops, | 48 | const struct vfio_device_ops *ops, |
| 52 | void *device_data); | 49 | void *device_data); |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 5dbc8b0ee567..3e5d9075960f 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); | |||
| 176 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | 176 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
| 177 | 177 | ||
| 178 | #ifdef CONFIG_SMP | 178 | #ifdef CONFIG_SMP |
| 179 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | 179 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
| 180 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | 180 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 181 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | 181 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 182 | 182 | ||
| 183 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); | 183 | void mod_zone_page_state(struct zone *, enum zone_stat_item, long); |
| 184 | void inc_zone_page_state(struct page *, enum zone_stat_item); | 184 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 185 | void dec_zone_page_state(struct page *, enum zone_stat_item); | 185 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 186 | 186 | ||
| @@ -205,7 +205,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat, | |||
| 205 | * The functions directly modify the zone and global counters. | 205 | * The functions directly modify the zone and global counters. |
| 206 | */ | 206 | */ |
| 207 | static inline void __mod_zone_page_state(struct zone *zone, | 207 | static inline void __mod_zone_page_state(struct zone *zone, |
| 208 | enum zone_stat_item item, int delta) | 208 | enum zone_stat_item item, long delta) |
| 209 | { | 209 | { |
| 210 | zone_page_state_add(delta, zone, item); | 210 | zone_page_state_add(delta, zone, item); |
| 211 | } | 211 | } |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 1e1bf9f963a9..513b36f04dfd 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -145,7 +145,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) | |||
| 145 | list_del(&old->task_list); | 145 | list_del(&old->task_list); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | typedef int wait_bit_action_f(struct wait_bit_key *); | 148 | typedef int wait_bit_action_f(struct wait_bit_key *, int mode); |
| 149 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 149 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
| 150 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 150 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
| 151 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 151 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
| @@ -960,10 +960,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
| 960 | } while (0) | 960 | } while (0) |
| 961 | 961 | ||
| 962 | 962 | ||
| 963 | extern int bit_wait(struct wait_bit_key *); | 963 | extern int bit_wait(struct wait_bit_key *, int); |
| 964 | extern int bit_wait_io(struct wait_bit_key *); | 964 | extern int bit_wait_io(struct wait_bit_key *, int); |
| 965 | extern int bit_wait_timeout(struct wait_bit_key *); | 965 | extern int bit_wait_timeout(struct wait_bit_key *, int); |
| 966 | extern int bit_wait_io_timeout(struct wait_bit_key *); | 966 | extern int bit_wait_io_timeout(struct wait_bit_key *, int); |
| 967 | 967 | ||
| 968 | /** | 968 | /** |
| 969 | * wait_on_bit - wait for a bit to be cleared | 969 | * wait_on_bit - wait for a bit to be cleared |
diff --git a/include/net/af_unix.h b/include/net/af_unix.h index b36d837c701e..2a91a0561a47 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h | |||
| @@ -62,6 +62,7 @@ struct unix_sock { | |||
| 62 | #define UNIX_GC_CANDIDATE 0 | 62 | #define UNIX_GC_CANDIDATE 0 |
| 63 | #define UNIX_GC_MAYBE_CYCLE 1 | 63 | #define UNIX_GC_MAYBE_CYCLE 1 |
| 64 | struct socket_wq peer_wq; | 64 | struct socket_wq peer_wq; |
| 65 | wait_queue_t peer_wake; | ||
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| 67 | static inline struct unix_sock *unix_sk(const struct sock *sk) | 68 | static inline struct unix_sock *unix_sk(const struct sock *sk) |
diff --git a/include/net/dst.h b/include/net/dst.h index 1279f9b09791..c7329dcd90cc 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
| @@ -322,6 +322,39 @@ static inline void skb_dst_force(struct sk_buff *skb) | |||
| 322 | } | 322 | } |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | /** | ||
| 326 | * dst_hold_safe - Take a reference on a dst if possible | ||
| 327 | * @dst: pointer to dst entry | ||
| 328 | * | ||
| 329 | * This helper returns false if it could not safely | ||
| 330 | * take a reference on a dst. | ||
| 331 | */ | ||
| 332 | static inline bool dst_hold_safe(struct dst_entry *dst) | ||
| 333 | { | ||
| 334 | if (dst->flags & DST_NOCACHE) | ||
| 335 | return atomic_inc_not_zero(&dst->__refcnt); | ||
| 336 | dst_hold(dst); | ||
| 337 | return true; | ||
| 338 | } | ||
| 339 | |||
| 340 | /** | ||
| 341 | * skb_dst_force_safe - makes sure skb dst is refcounted | ||
| 342 | * @skb: buffer | ||
| 343 | * | ||
| 344 | * If dst is not yet refcounted and not destroyed, grab a ref on it. | ||
| 345 | */ | ||
| 346 | static inline void skb_dst_force_safe(struct sk_buff *skb) | ||
| 347 | { | ||
| 348 | if (skb_dst_is_noref(skb)) { | ||
| 349 | struct dst_entry *dst = skb_dst(skb); | ||
| 350 | |||
| 351 | if (!dst_hold_safe(dst)) | ||
| 352 | dst = NULL; | ||
| 353 | |||
| 354 | skb->_skb_refdst = (unsigned long)dst; | ||
| 355 | } | ||
| 356 | } | ||
| 357 | |||
| 325 | 358 | ||
| 326 | /** | 359 | /** |
| 327 | * __skb_tunnel_rx - prepare skb for rx reinsert | 360 | * __skb_tunnel_rx - prepare skb for rx reinsert |
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 2134e6d815bc..625bdf95d673 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
| @@ -210,18 +210,37 @@ struct inet_sock { | |||
| 210 | #define IP_CMSG_ORIGDSTADDR BIT(6) | 210 | #define IP_CMSG_ORIGDSTADDR BIT(6) |
| 211 | #define IP_CMSG_CHECKSUM BIT(7) | 211 | #define IP_CMSG_CHECKSUM BIT(7) |
| 212 | 212 | ||
| 213 | /* SYNACK messages might be attached to request sockets. | 213 | /** |
| 214 | * sk_to_full_sk - Access to a full socket | ||
| 215 | * @sk: pointer to a socket | ||
| 216 | * | ||
| 217 | * SYNACK messages might be attached to request sockets. | ||
| 214 | * Some places want to reach the listener in this case. | 218 | * Some places want to reach the listener in this case. |
| 215 | */ | 219 | */ |
| 216 | static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) | 220 | static inline struct sock *sk_to_full_sk(struct sock *sk) |
| 217 | { | 221 | { |
| 218 | struct sock *sk = skb->sk; | 222 | #ifdef CONFIG_INET |
| 219 | |||
| 220 | if (sk && sk->sk_state == TCP_NEW_SYN_RECV) | 223 | if (sk && sk->sk_state == TCP_NEW_SYN_RECV) |
| 221 | sk = inet_reqsk(sk)->rsk_listener; | 224 | sk = inet_reqsk(sk)->rsk_listener; |
| 225 | #endif | ||
| 226 | return sk; | ||
| 227 | } | ||
| 228 | |||
| 229 | /* sk_to_full_sk() variant with a const argument */ | ||
| 230 | static inline const struct sock *sk_const_to_full_sk(const struct sock *sk) | ||
| 231 | { | ||
| 232 | #ifdef CONFIG_INET | ||
| 233 | if (sk && sk->sk_state == TCP_NEW_SYN_RECV) | ||
| 234 | sk = ((const struct request_sock *)sk)->rsk_listener; | ||
| 235 | #endif | ||
| 222 | return sk; | 236 | return sk; |
| 223 | } | 237 | } |
| 224 | 238 | ||
| 239 | static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) | ||
| 240 | { | ||
| 241 | return sk_to_full_sk(skb->sk); | ||
| 242 | } | ||
| 243 | |||
| 225 | static inline struct inet_sock *inet_sk(const struct sock *sk) | 244 | static inline struct inet_sock *inet_sk(const struct sock *sk) |
| 226 | { | 245 | { |
| 227 | return (struct inet_sock *)sk; | 246 | return (struct inet_sock *)sk; |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 4a6009d4486b..235c7811a86a 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
| @@ -78,6 +78,7 @@ void inet_initpeers(void) __init; | |||
| 78 | static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) | 78 | static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) |
| 79 | { | 79 | { |
| 80 | iaddr->a4.addr = ip; | 80 | iaddr->a4.addr = ip; |
| 81 | iaddr->a4.vif = 0; | ||
| 81 | iaddr->family = AF_INET; | 82 | iaddr->family = AF_INET; |
| 82 | } | 83 | } |
| 83 | 84 | ||
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index aaf9700fc9e5..fb961a576abe 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h | |||
| @@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) | |||
| 167 | 167 | ||
| 168 | static inline u32 rt6_get_cookie(const struct rt6_info *rt) | 168 | static inline u32 rt6_get_cookie(const struct rt6_info *rt) |
| 169 | { | 169 | { |
| 170 | if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE)) | 170 | if (rt->rt6i_flags & RTF_PCPU || |
| 171 | (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from)) | ||
| 171 | rt = (struct rt6_info *)(rt->dst.from); | 172 | rt = (struct rt6_info *)(rt->dst.from); |
| 172 | 173 | ||
| 173 | return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; | 174 | return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2bfb2ad2fab1..877f682989b8 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
| @@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); | |||
| 133 | /* | 133 | /* |
| 134 | * Store a destination cache entry in a socket | 134 | * Store a destination cache entry in a socket |
| 135 | */ | 135 | */ |
| 136 | static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, | 136 | static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, |
| 137 | const struct in6_addr *daddr, | 137 | const struct in6_addr *daddr, |
| 138 | const struct in6_addr *saddr) | 138 | const struct in6_addr *saddr) |
| 139 | { | 139 | { |
| 140 | struct ipv6_pinfo *np = inet6_sk(sk); | 140 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 141 | struct rt6_info *rt = (struct rt6_info *) dst; | ||
| 142 | 141 | ||
| 142 | np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst); | ||
| 143 | sk_setup_caps(sk, dst); | 143 | sk_setup_caps(sk, dst); |
| 144 | np->daddr_cache = daddr; | 144 | np->daddr_cache = daddr; |
| 145 | #ifdef CONFIG_IPV6_SUBTREES | 145 | #ifdef CONFIG_IPV6_SUBTREES |
| 146 | np->saddr_cache = saddr; | 146 | np->saddr_cache = saddr; |
| 147 | #endif | 147 | #endif |
| 148 | np->dst_cookie = rt6_get_cookie(rt); | ||
| 149 | } | ||
| 150 | |||
| 151 | static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, | ||
| 152 | struct in6_addr *daddr, struct in6_addr *saddr) | ||
| 153 | { | ||
| 154 | spin_lock(&sk->sk_dst_lock); | ||
| 155 | __ip6_dst_store(sk, dst, daddr, saddr); | ||
| 156 | spin_unlock(&sk->sk_dst_lock); | ||
| 157 | } | 148 | } |
| 158 | 149 | ||
| 159 | static inline bool ipv6_unicast_destination(const struct sk_buff *skb) | 150 | static inline bool ipv6_unicast_destination(const struct sk_buff *skb) |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index aaee6fa02cf1..ff788b665277 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
| @@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | |||
| 90 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); | 90 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); |
| 91 | 91 | ||
| 92 | if (net_xmit_eval(err) == 0) { | 92 | if (net_xmit_eval(err) == 0) { |
| 93 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | 93 | struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); |
| 94 | u64_stats_update_begin(&tstats->syncp); | 94 | u64_stats_update_begin(&tstats->syncp); |
| 95 | tstats->tx_bytes += pkt_len; | 95 | tstats->tx_bytes += pkt_len; |
| 96 | tstats->tx_packets++; | 96 | tstats->tx_packets++; |
| 97 | u64_stats_update_end(&tstats->syncp); | 97 | u64_stats_update_end(&tstats->syncp); |
| 98 | put_cpu_ptr(tstats); | ||
| 98 | } else { | 99 | } else { |
| 99 | stats->tx_errors++; | 100 | stats->tx_errors++; |
| 100 | stats->tx_aborted_errors++; | 101 | stats->tx_aborted_errors++; |
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index f6dafec9102c..62a750a6a8f8 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
| @@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err, | |||
| 287 | struct pcpu_sw_netstats __percpu *stats) | 287 | struct pcpu_sw_netstats __percpu *stats) |
| 288 | { | 288 | { |
| 289 | if (err > 0) { | 289 | if (err > 0) { |
| 290 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); | 290 | struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats); |
| 291 | 291 | ||
| 292 | u64_stats_update_begin(&tstats->syncp); | 292 | u64_stats_update_begin(&tstats->syncp); |
| 293 | tstats->tx_bytes += err; | 293 | tstats->tx_bytes += err; |
| 294 | tstats->tx_packets++; | 294 | tstats->tx_packets++; |
| 295 | u64_stats_update_end(&tstats->syncp); | 295 | u64_stats_update_end(&tstats->syncp); |
| 296 | put_cpu_ptr(tstats); | ||
| 296 | } else if (err < 0) { | 297 | } else if (err < 0) { |
| 297 | err_stats->tx_errors++; | 298 | err_stats->tx_errors++; |
| 298 | err_stats->tx_aborted_errors++; | 299 | err_stats->tx_aborted_errors++; |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e1a10b0ac0b0..9a5c9f013784 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
| @@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock; | |||
| 205 | */ | 205 | */ |
| 206 | 206 | ||
| 207 | struct ipv6_txoptions { | 207 | struct ipv6_txoptions { |
| 208 | atomic_t refcnt; | ||
| 208 | /* Length of this structure */ | 209 | /* Length of this structure */ |
| 209 | int tot_len; | 210 | int tot_len; |
| 210 | 211 | ||
| @@ -217,7 +218,7 @@ struct ipv6_txoptions { | |||
| 217 | struct ipv6_opt_hdr *dst0opt; | 218 | struct ipv6_opt_hdr *dst0opt; |
| 218 | struct ipv6_rt_hdr *srcrt; /* Routing Header */ | 219 | struct ipv6_rt_hdr *srcrt; /* Routing Header */ |
| 219 | struct ipv6_opt_hdr *dst1opt; | 220 | struct ipv6_opt_hdr *dst1opt; |
| 220 | 221 | struct rcu_head rcu; | |
| 221 | /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ | 222 | /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ |
| 222 | }; | 223 | }; |
| 223 | 224 | ||
| @@ -252,6 +253,24 @@ struct ipv6_fl_socklist { | |||
| 252 | struct rcu_head rcu; | 253 | struct rcu_head rcu; |
| 253 | }; | 254 | }; |
| 254 | 255 | ||
| 256 | static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) | ||
| 257 | { | ||
| 258 | struct ipv6_txoptions *opt; | ||
| 259 | |||
| 260 | rcu_read_lock(); | ||
| 261 | opt = rcu_dereference(np->opt); | ||
| 262 | if (opt && !atomic_inc_not_zero(&opt->refcnt)) | ||
| 263 | opt = NULL; | ||
| 264 | rcu_read_unlock(); | ||
| 265 | return opt; | ||
| 266 | } | ||
| 267 | |||
| 268 | static inline void txopt_put(struct ipv6_txoptions *opt) | ||
| 269 | { | ||
| 270 | if (opt && atomic_dec_and_test(&opt->refcnt)) | ||
| 271 | kfree_rcu(opt, rcu); | ||
| 272 | } | ||
| 273 | |||
| 255 | struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); | 274 | struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); |
| 256 | struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, | 275 | struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, |
| 257 | struct ip6_flowlabel *fl, | 276 | struct ip6_flowlabel *fl, |
| @@ -490,6 +509,7 @@ struct ip6_create_arg { | |||
| 490 | u32 user; | 509 | u32 user; |
| 491 | const struct in6_addr *src; | 510 | const struct in6_addr *src; |
| 492 | const struct in6_addr *dst; | 511 | const struct in6_addr *dst; |
| 512 | int iif; | ||
| 493 | u8 ecn; | 513 | u8 ecn; |
| 494 | }; | 514 | }; |
| 495 | 515 | ||
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 82045fca388b..760bc4d5a2cf 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
| @@ -2003,8 +2003,10 @@ enum ieee80211_hw_flags { | |||
| 2003 | * it shouldn't be set. | 2003 | * it shouldn't be set. |
| 2004 | * | 2004 | * |
| 2005 | * @max_tx_aggregation_subframes: maximum number of subframes in an | 2005 | * @max_tx_aggregation_subframes: maximum number of subframes in an |
| 2006 | * aggregate an HT driver will transmit, used by the peer as a | 2006 | * aggregate an HT driver will transmit. Though ADDBA will advertise |
| 2007 | * hint to size its reorder buffer. | 2007 | * a constant value of 64 as some older APs can crash if the window |
| 2008 | * size is smaller (an example is LinkSys WRT120N with FW v1.0.07 | ||
| 2009 | * build 002 Jun 18 2012). | ||
| 2008 | * | 2010 | * |
| 2009 | * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX | 2011 | * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX |
| 2010 | * (if %IEEE80211_HW_QUEUE_CONTROL is set) | 2012 | * (if %IEEE80211_HW_QUEUE_CONTROL is set) |
diff --git a/include/net/ndisc.h b/include/net/ndisc.h index bf3937431030..2d8edaad29cb 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h | |||
| @@ -181,8 +181,7 @@ void ndisc_cleanup(void); | |||
| 181 | int ndisc_rcv(struct sk_buff *skb); | 181 | int ndisc_rcv(struct sk_buff *skb); |
| 182 | 182 | ||
| 183 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, | 183 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, |
| 184 | const struct in6_addr *daddr, const struct in6_addr *saddr, | 184 | const struct in6_addr *daddr, const struct in6_addr *saddr); |
| 185 | struct sk_buff *oskb); | ||
| 186 | 185 | ||
| 187 | void ndisc_send_rs(struct net_device *dev, | 186 | void ndisc_send_rs(struct net_device *dev, |
| 188 | const struct in6_addr *saddr, const struct in6_addr *daddr); | 187 | const struct in6_addr *saddr, const struct in6_addr *daddr); |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index c9149cc0a02d..4bd7508bedc9 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
| @@ -618,6 +618,8 @@ struct nft_expr_ops { | |||
| 618 | void (*eval)(const struct nft_expr *expr, | 618 | void (*eval)(const struct nft_expr *expr, |
| 619 | struct nft_regs *regs, | 619 | struct nft_regs *regs, |
| 620 | const struct nft_pktinfo *pkt); | 620 | const struct nft_pktinfo *pkt); |
| 621 | int (*clone)(struct nft_expr *dst, | ||
| 622 | const struct nft_expr *src); | ||
| 621 | unsigned int size; | 623 | unsigned int size; |
| 622 | 624 | ||
| 623 | int (*init)(const struct nft_ctx *ctx, | 625 | int (*init)(const struct nft_ctx *ctx, |
| @@ -660,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr); | |||
| 660 | int nft_expr_dump(struct sk_buff *skb, unsigned int attr, | 662 | int nft_expr_dump(struct sk_buff *skb, unsigned int attr, |
| 661 | const struct nft_expr *expr); | 663 | const struct nft_expr *expr); |
| 662 | 664 | ||
| 663 | static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | 665 | static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) |
| 664 | { | 666 | { |
| 667 | int err; | ||
| 668 | |||
| 665 | __module_get(src->ops->type->owner); | 669 | __module_get(src->ops->type->owner); |
| 666 | memcpy(dst, src, src->ops->size); | 670 | if (src->ops->clone) { |
| 671 | dst->ops = src->ops; | ||
| 672 | err = src->ops->clone(dst, src); | ||
| 673 | if (err < 0) | ||
| 674 | return err; | ||
| 675 | } else { | ||
| 676 | memcpy(dst, src, src->ops->size); | ||
| 677 | } | ||
| 678 | return 0; | ||
| 667 | } | 679 | } |
| 668 | 680 | ||
| 669 | /** | 681 | /** |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 4c79ce8c1f92..b2a8e6338576 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -61,6 +61,9 @@ struct Qdisc { | |||
| 61 | */ | 61 | */ |
| 62 | #define TCQ_F_WARN_NONWC (1 << 16) | 62 | #define TCQ_F_WARN_NONWC (1 << 16) |
| 63 | #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ | 63 | #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ |
| 64 | #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : | ||
| 65 | * qdisc_tree_decrease_qlen() should stop. | ||
| 66 | */ | ||
| 64 | u32 limit; | 67 | u32 limit; |
| 65 | const struct Qdisc_ops *ops; | 68 | const struct Qdisc_ops *ops; |
| 66 | struct qdisc_size_table __rcu *stab; | 69 | struct qdisc_size_table __rcu *stab; |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 495c87e367b3..eea9bdeecba2 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -775,10 +775,10 @@ struct sctp_transport { | |||
| 775 | hb_sent:1, | 775 | hb_sent:1, |
| 776 | 776 | ||
| 777 | /* Is the Path MTU update pending on this tranport */ | 777 | /* Is the Path MTU update pending on this tranport */ |
| 778 | pmtu_pending:1; | 778 | pmtu_pending:1, |
| 779 | 779 | ||
| 780 | /* Has this transport moved the ctsn since we last sacked */ | 780 | /* Has this transport moved the ctsn since we last sacked */ |
| 781 | __u32 sack_generation; | 781 | sack_generation:1; |
| 782 | u32 dst_cookie; | 782 | u32 dst_cookie; |
| 783 | 783 | ||
| 784 | struct flowi fl; | 784 | struct flowi fl; |
| @@ -1482,19 +1482,20 @@ struct sctp_association { | |||
| 1482 | prsctp_capable:1, /* Can peer do PR-SCTP? */ | 1482 | prsctp_capable:1, /* Can peer do PR-SCTP? */ |
| 1483 | auth_capable:1; /* Is peer doing SCTP-AUTH? */ | 1483 | auth_capable:1; /* Is peer doing SCTP-AUTH? */ |
| 1484 | 1484 | ||
| 1485 | /* Ack State : This flag indicates if the next received | 1485 | /* sack_needed : This flag indicates if the next received |
| 1486 | * : packet is to be responded to with a | 1486 | * : packet is to be responded to with a |
| 1487 | * : SACK. This is initializedto 0. When a packet | 1487 | * : SACK. This is initialized to 0. When a packet |
| 1488 | * : is received it is incremented. If this value | 1488 | * : is received sack_cnt is incremented. If this value |
| 1489 | * : reaches 2 or more, a SACK is sent and the | 1489 | * : reaches 2 or more, a SACK is sent and the |
| 1490 | * : value is reset to 0. Note: This is used only | 1490 | * : value is reset to 0. Note: This is used only |
| 1491 | * : when no DATA chunks are received out of | 1491 | * : when no DATA chunks are received out of |
| 1492 | * : order. When DATA chunks are out of order, | 1492 | * : order. When DATA chunks are out of order, |
| 1493 | * : SACK's are not delayed (see Section 6). | 1493 | * : SACK's are not delayed (see Section 6). |
| 1494 | */ | 1494 | */ |
| 1495 | __u8 sack_needed; /* Do we need to sack the peer? */ | 1495 | __u8 sack_needed:1, /* Do we need to sack the peer? */ |
| 1496 | sack_generation:1, | ||
| 1497 | zero_window_announced:1; | ||
| 1496 | __u32 sack_cnt; | 1498 | __u32 sack_cnt; |
| 1497 | __u32 sack_generation; | ||
| 1498 | 1499 | ||
| 1499 | __u32 adaptation_ind; /* Adaptation Code point. */ | 1500 | __u32 adaptation_ind; /* Adaptation Code point. */ |
| 1500 | 1501 | ||
diff --git a/include/net/sock.h b/include/net/sock.h index bbf7c2cf15b4..14d3c0734007 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -254,7 +254,6 @@ struct cg_proto; | |||
| 254 | * @sk_wq: sock wait queue and async head | 254 | * @sk_wq: sock wait queue and async head |
| 255 | * @sk_rx_dst: receive input route used by early demux | 255 | * @sk_rx_dst: receive input route used by early demux |
| 256 | * @sk_dst_cache: destination cache | 256 | * @sk_dst_cache: destination cache |
| 257 | * @sk_dst_lock: destination cache lock | ||
| 258 | * @sk_policy: flow policy | 257 | * @sk_policy: flow policy |
| 259 | * @sk_receive_queue: incoming packets | 258 | * @sk_receive_queue: incoming packets |
| 260 | * @sk_wmem_alloc: transmit queue bytes committed | 259 | * @sk_wmem_alloc: transmit queue bytes committed |
| @@ -384,14 +383,16 @@ struct sock { | |||
| 384 | int sk_rcvbuf; | 383 | int sk_rcvbuf; |
| 385 | 384 | ||
| 386 | struct sk_filter __rcu *sk_filter; | 385 | struct sk_filter __rcu *sk_filter; |
| 387 | struct socket_wq __rcu *sk_wq; | 386 | union { |
| 388 | 387 | struct socket_wq __rcu *sk_wq; | |
| 388 | struct socket_wq *sk_wq_raw; | ||
| 389 | }; | ||
| 389 | #ifdef CONFIG_XFRM | 390 | #ifdef CONFIG_XFRM |
| 390 | struct xfrm_policy *sk_policy[2]; | 391 | struct xfrm_policy __rcu *sk_policy[2]; |
| 391 | #endif | 392 | #endif |
| 392 | struct dst_entry *sk_rx_dst; | 393 | struct dst_entry *sk_rx_dst; |
| 393 | struct dst_entry __rcu *sk_dst_cache; | 394 | struct dst_entry __rcu *sk_dst_cache; |
| 394 | spinlock_t sk_dst_lock; | 395 | /* Note: 32bit hole on 64bit arches */ |
| 395 | atomic_t sk_wmem_alloc; | 396 | atomic_t sk_wmem_alloc; |
| 396 | atomic_t sk_omem_alloc; | 397 | atomic_t sk_omem_alloc; |
| 397 | int sk_sndbuf; | 398 | int sk_sndbuf; |
| @@ -403,6 +404,7 @@ struct sock { | |||
| 403 | sk_userlocks : 4, | 404 | sk_userlocks : 4, |
| 404 | sk_protocol : 8, | 405 | sk_protocol : 8, |
| 405 | sk_type : 16; | 406 | sk_type : 16; |
| 407 | #define SK_PROTOCOL_MAX U8_MAX | ||
| 406 | kmemcheck_bitfield_end(flags); | 408 | kmemcheck_bitfield_end(flags); |
| 407 | int sk_wmem_queued; | 409 | int sk_wmem_queued; |
| 408 | gfp_t sk_allocation; | 410 | gfp_t sk_allocation; |
| @@ -739,6 +741,8 @@ enum sock_flags { | |||
| 739 | SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ | 741 | SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ |
| 740 | }; | 742 | }; |
| 741 | 743 | ||
| 744 | #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) | ||
| 745 | |||
| 742 | static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) | 746 | static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) |
| 743 | { | 747 | { |
| 744 | nsk->sk_flags = osk->sk_flags; | 748 | nsk->sk_flags = osk->sk_flags; |
| @@ -813,7 +817,7 @@ void sk_stream_write_space(struct sock *sk); | |||
| 813 | static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 817 | static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
| 814 | { | 818 | { |
| 815 | /* dont let skb dst not refcounted, we are going to leave rcu lock */ | 819 | /* dont let skb dst not refcounted, we are going to leave rcu lock */ |
| 816 | skb_dst_force(skb); | 820 | skb_dst_force_safe(skb); |
| 817 | 821 | ||
| 818 | if (!sk->sk_backlog.tail) | 822 | if (!sk->sk_backlog.tail) |
| 819 | sk->sk_backlog.head = skb; | 823 | sk->sk_backlog.head = skb; |
| @@ -2005,10 +2009,27 @@ static inline unsigned long sock_wspace(struct sock *sk) | |||
| 2005 | return amt; | 2009 | return amt; |
| 2006 | } | 2010 | } |
| 2007 | 2011 | ||
| 2008 | static inline void sk_wake_async(struct sock *sk, int how, int band) | 2012 | /* Note: |
| 2013 | * We use sk->sk_wq_raw, from contexts knowing this | ||
| 2014 | * pointer is not NULL and cannot disappear/change. | ||
| 2015 | */ | ||
| 2016 | static inline void sk_set_bit(int nr, struct sock *sk) | ||
| 2017 | { | ||
| 2018 | set_bit(nr, &sk->sk_wq_raw->flags); | ||
| 2019 | } | ||
| 2020 | |||
| 2021 | static inline void sk_clear_bit(int nr, struct sock *sk) | ||
| 2022 | { | ||
| 2023 | clear_bit(nr, &sk->sk_wq_raw->flags); | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | static inline void sk_wake_async(const struct sock *sk, int how, int band) | ||
| 2009 | { | 2027 | { |
| 2010 | if (sock_flag(sk, SOCK_FASYNC)) | 2028 | if (sock_flag(sk, SOCK_FASYNC)) { |
| 2011 | sock_wake_async(sk->sk_socket, how, band); | 2029 | rcu_read_lock(); |
| 2030 | sock_wake_async(rcu_dereference(sk->sk_wq), how, band); | ||
| 2031 | rcu_read_unlock(); | ||
| 2032 | } | ||
| 2012 | } | 2033 | } |
| 2013 | 2034 | ||
| 2014 | /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might | 2035 | /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might |
| @@ -2226,6 +2247,31 @@ static inline bool sk_listener(const struct sock *sk) | |||
| 2226 | return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); | 2247 | return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); |
| 2227 | } | 2248 | } |
| 2228 | 2249 | ||
| 2250 | /** | ||
| 2251 | * sk_state_load - read sk->sk_state for lockless contexts | ||
| 2252 | * @sk: socket pointer | ||
| 2253 | * | ||
| 2254 | * Paired with sk_state_store(). Used in places we do not hold socket lock : | ||
| 2255 | * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ... | ||
| 2256 | */ | ||
| 2257 | static inline int sk_state_load(const struct sock *sk) | ||
| 2258 | { | ||
| 2259 | return smp_load_acquire(&sk->sk_state); | ||
| 2260 | } | ||
| 2261 | |||
| 2262 | /** | ||
| 2263 | * sk_state_store - update sk->sk_state | ||
| 2264 | * @sk: socket pointer | ||
| 2265 | * @newstate: new state | ||
| 2266 | * | ||
| 2267 | * Paired with sk_state_load(). Should be used in contexts where | ||
| 2268 | * state change might impact lockless readers. | ||
| 2269 | */ | ||
| 2270 | static inline void sk_state_store(struct sock *sk, int newstate) | ||
| 2271 | { | ||
| 2272 | smp_store_release(&sk->sk_state, newstate); | ||
| 2273 | } | ||
| 2274 | |||
| 2229 | void sock_enable_timestamp(struct sock *sk, int flag); | 2275 | void sock_enable_timestamp(struct sock *sk, int flag); |
| 2230 | int sock_get_timestamp(struct sock *, struct timeval __user *); | 2276 | int sock_get_timestamp(struct sock *, struct timeval __user *); |
| 2231 | int sock_get_timestampns(struct sock *, struct timespec __user *); | 2277 | int sock_get_timestampns(struct sock *, struct timespec __user *); |
diff --git a/include/net/switchdev.h b/include/net/switchdev.h index bc865e244efe..1d22ce9f352e 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h | |||
| @@ -323,7 +323,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb, | |||
| 323 | struct net_device *filter_dev, | 323 | struct net_device *filter_dev, |
| 324 | int idx) | 324 | int idx) |
| 325 | { | 325 | { |
| 326 | return -EOPNOTSUPP; | 326 | return idx; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | static inline void switchdev_port_fwd_mark_set(struct net_device *dev, | 329 | static inline void switchdev_port_fwd_mark_set(struct net_device *dev, |
diff --git a/include/net/vxlan.h b/include/net/vxlan.h index c1c899c3a51b..e289ada6adf6 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h | |||
| @@ -79,7 +79,7 @@ struct vxlanhdr { | |||
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | /* VXLAN header flags. */ | 81 | /* VXLAN header flags. */ |
| 82 | #define VXLAN_HF_RCO BIT(24) | 82 | #define VXLAN_HF_RCO BIT(21) |
| 83 | #define VXLAN_HF_VNI BIT(27) | 83 | #define VXLAN_HF_VNI BIT(27) |
| 84 | #define VXLAN_HF_GBP BIT(31) | 84 | #define VXLAN_HF_GBP BIT(31) |
| 85 | 85 | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 4a9c21f9b4ea..d6f6e5006ee9 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
| @@ -548,6 +548,7 @@ struct xfrm_policy { | |||
| 548 | u16 family; | 548 | u16 family; |
| 549 | struct xfrm_sec_ctx *security; | 549 | struct xfrm_sec_ctx *security; |
| 550 | struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; | 550 | struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; |
| 551 | struct rcu_head rcu; | ||
| 551 | }; | 552 | }; |
| 552 | 553 | ||
| 553 | static inline struct net *xp_net(const struct xfrm_policy *xp) | 554 | static inline struct net *xp_net(const struct xfrm_policy *xp) |
| @@ -1141,12 +1142,14 @@ static inline int xfrm6_route_forward(struct sk_buff *skb) | |||
| 1141 | return xfrm_route_forward(skb, AF_INET6); | 1142 | return xfrm_route_forward(skb, AF_INET6); |
| 1142 | } | 1143 | } |
| 1143 | 1144 | ||
| 1144 | int __xfrm_sk_clone_policy(struct sock *sk); | 1145 | int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); |
| 1145 | 1146 | ||
| 1146 | static inline int xfrm_sk_clone_policy(struct sock *sk) | 1147 | static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) |
| 1147 | { | 1148 | { |
| 1148 | if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) | 1149 | sk->sk_policy[0] = NULL; |
| 1149 | return __xfrm_sk_clone_policy(sk); | 1150 | sk->sk_policy[1] = NULL; |
| 1151 | if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) | ||
| 1152 | return __xfrm_sk_clone_policy(sk, osk); | ||
| 1150 | return 0; | 1153 | return 0; |
| 1151 | } | 1154 | } |
| 1152 | 1155 | ||
| @@ -1154,12 +1157,16 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir); | |||
| 1154 | 1157 | ||
| 1155 | static inline void xfrm_sk_free_policy(struct sock *sk) | 1158 | static inline void xfrm_sk_free_policy(struct sock *sk) |
| 1156 | { | 1159 | { |
| 1157 | if (unlikely(sk->sk_policy[0] != NULL)) { | 1160 | struct xfrm_policy *pol; |
| 1158 | xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX); | 1161 | |
| 1162 | pol = rcu_dereference_protected(sk->sk_policy[0], 1); | ||
| 1163 | if (unlikely(pol != NULL)) { | ||
| 1164 | xfrm_policy_delete(pol, XFRM_POLICY_MAX); | ||
| 1159 | sk->sk_policy[0] = NULL; | 1165 | sk->sk_policy[0] = NULL; |
| 1160 | } | 1166 | } |
| 1161 | if (unlikely(sk->sk_policy[1] != NULL)) { | 1167 | pol = rcu_dereference_protected(sk->sk_policy[1], 1); |
| 1162 | xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1); | 1168 | if (unlikely(pol != NULL)) { |
| 1169 | xfrm_policy_delete(pol, XFRM_POLICY_MAX+1); | ||
| 1163 | sk->sk_policy[1] = NULL; | 1170 | sk->sk_policy[1] = NULL; |
| 1164 | } | 1171 | } |
| 1165 | } | 1172 | } |
| @@ -1169,7 +1176,7 @@ void xfrm_garbage_collect(struct net *net); | |||
| 1169 | #else | 1176 | #else |
| 1170 | 1177 | ||
| 1171 | static inline void xfrm_sk_free_policy(struct sock *sk) {} | 1178 | static inline void xfrm_sk_free_policy(struct sock *sk) {} |
| 1172 | static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; } | 1179 | static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; } |
| 1173 | static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } | 1180 | static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } |
| 1174 | static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } | 1181 | static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } |
| 1175 | static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) | 1182 | static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 188df91d5851..ec9b44dd3d80 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
| @@ -237,6 +237,8 @@ struct ib_vendor_mad { | |||
| 237 | u8 data[IB_MGMT_VENDOR_DATA]; | 237 | u8 data[IB_MGMT_VENDOR_DATA]; |
| 238 | }; | 238 | }; |
| 239 | 239 | ||
| 240 | #define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001) | ||
| 241 | |||
| 240 | struct ib_class_port_info { | 242 | struct ib_class_port_info { |
| 241 | u8 base_version; | 243 | u8 base_version; |
| 242 | u8 class_version; | 244 | u8 class_version; |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9a68a19532ba..120da1d7f57e 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -1271,6 +1271,7 @@ struct ib_uobject { | |||
| 1271 | int id; /* index into kernel idr */ | 1271 | int id; /* index into kernel idr */ |
| 1272 | struct kref ref; | 1272 | struct kref ref; |
| 1273 | struct rw_semaphore mutex; /* protects .live */ | 1273 | struct rw_semaphore mutex; /* protects .live */ |
| 1274 | struct rcu_head rcu; /* kfree_rcu() overhead */ | ||
| 1274 | int live; | 1275 | int live; |
| 1275 | }; | 1276 | }; |
| 1276 | 1277 | ||
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index ed527121031d..fcfa3d7f5e7e 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
| @@ -668,6 +668,9 @@ struct Scsi_Host { | |||
| 668 | unsigned use_blk_mq:1; | 668 | unsigned use_blk_mq:1; |
| 669 | unsigned use_cmd_list:1; | 669 | unsigned use_cmd_list:1; |
| 670 | 670 | ||
| 671 | /* Host responded with short (<36 bytes) INQUIRY result */ | ||
| 672 | unsigned short_inquiry:1; | ||
| 673 | |||
| 671 | /* | 674 | /* |
| 672 | * Optional work queue to be utilized by the transport | 675 | * Optional work queue to be utilized by the transport |
| 673 | */ | 676 | */ |
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h index 2ae8812d7b1a..94dc6a9772e0 100644 --- a/include/sound/hda_register.h +++ b/include/sound/hda_register.h | |||
| @@ -93,6 +93,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; | |||
| 93 | #define AZX_REG_HSW_EM4 0x100c | 93 | #define AZX_REG_HSW_EM4 0x100c |
| 94 | #define AZX_REG_HSW_EM5 0x1010 | 94 | #define AZX_REG_HSW_EM5 0x1010 |
| 95 | 95 | ||
| 96 | /* Skylake/Broxton display HD-A controller Extended Mode registers */ | ||
| 97 | #define AZX_REG_SKL_EM4L 0x1040 | ||
| 98 | |||
| 96 | /* PCI space */ | 99 | /* PCI space */ |
| 97 | #define AZX_PCIREG_TCSEL 0x44 | 100 | #define AZX_PCIREG_TCSEL 0x44 |
| 98 | 101 | ||
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 7855cfe46b69..95a937eafb79 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
| @@ -398,6 +398,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm, | |||
| 398 | int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, | 398 | int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, |
| 399 | const struct snd_soc_dapm_route *route, int num); | 399 | const struct snd_soc_dapm_route *route, int num); |
| 400 | void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w); | 400 | void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w); |
| 401 | void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm); | ||
| 401 | 402 | ||
| 402 | /* dapm events */ | 403 | /* dapm events */ |
| 403 | void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, | 404 | void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 0a2c74008e53..aabf0aca0171 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
| @@ -474,7 +474,7 @@ struct se_cmd { | |||
| 474 | struct completion cmd_wait_comp; | 474 | struct completion cmd_wait_comp; |
| 475 | const struct target_core_fabric_ops *se_tfo; | 475 | const struct target_core_fabric_ops *se_tfo; |
| 476 | sense_reason_t (*execute_cmd)(struct se_cmd *); | 476 | sense_reason_t (*execute_cmd)(struct se_cmd *); |
| 477 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); | 477 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); |
| 478 | void *protocol_data; | 478 | void *protocol_data; |
| 479 | 479 | ||
| 480 | unsigned char *t_task_cdb; | 480 | unsigned char *t_task_cdb; |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 628e6e64c2fb..c2e5d6cb34e3 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
| @@ -186,6 +186,7 @@ header-y += if_tunnel.h | |||
| 186 | header-y += if_vlan.h | 186 | header-y += if_vlan.h |
| 187 | header-y += if_x25.h | 187 | header-y += if_x25.h |
| 188 | header-y += igmp.h | 188 | header-y += igmp.h |
| 189 | header-y += ila.h | ||
| 189 | header-y += in6.h | 190 | header-y += in6.h |
| 190 | header-y += inet_diag.h | 191 | header-y += inet_diag.h |
| 191 | header-y += in.h | 192 | header-y += in.h |
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index 654bae3f1a38..5e6296160361 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h | |||
| @@ -33,17 +33,6 @@ | |||
| 33 | 33 | ||
| 34 | #define NFS_PIPE_DIRNAME "nfs" | 34 | #define NFS_PIPE_DIRNAME "nfs" |
| 35 | 35 | ||
| 36 | /* NFS ioctls */ | ||
| 37 | /* Let's follow btrfs lead on CLONE to avoid messing userspace */ | ||
| 38 | #define NFS_IOC_CLONE _IOW(0x94, 9, int) | ||
| 39 | #define NFS_IOC_CLONE_RANGE _IOW(0x94, 13, int) | ||
| 40 | |||
| 41 | struct nfs_ioctl_clone_range_args { | ||
| 42 | __s64 src_fd; | ||
| 43 | __u64 src_off, count; | ||
| 44 | __u64 dst_off; | ||
| 45 | }; | ||
| 46 | |||
| 47 | /* | 36 | /* |
| 48 | * NFS stats. The good thing with these values is that NFSv3 errors are | 37 | * NFS stats. The good thing with these values is that NFSv3 errors are |
| 49 | * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which | 38 | * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which |
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 28ccedd000f5..a27222d5b413 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h | |||
| @@ -628,7 +628,7 @@ struct ovs_action_hash { | |||
| 628 | * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the | 628 | * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the |
| 629 | * mask, the corresponding bit in the value is copied to the connection | 629 | * mask, the corresponding bit in the value is copied to the connection |
| 630 | * tracking mark field in the connection. | 630 | * tracking mark field in the connection. |
| 631 | * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN | 631 | * @OVS_CT_ATTR_LABELS: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN |
| 632 | * mask. For each bit set in the mask, the corresponding bit in the value is | 632 | * mask. For each bit set in the mask, the corresponding bit in the value is |
| 633 | * copied to the connection tracking label field in the connection. | 633 | * copied to the connection tracking label field in the connection. |
| 634 | * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. | 634 | * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. |
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 751b69f858c8..9fd7b5d8df2f 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h | |||
| @@ -39,13 +39,6 @@ | |||
| 39 | #define VFIO_SPAPR_TCE_v2_IOMMU 7 | 39 | #define VFIO_SPAPR_TCE_v2_IOMMU 7 |
| 40 | 40 | ||
| 41 | /* | 41 | /* |
| 42 | * The No-IOMMU IOMMU offers no translation or isolation for devices and | ||
| 43 | * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU | ||
| 44 | * code will taint the host kernel and should be used with extreme caution. | ||
| 45 | */ | ||
| 46 | #define VFIO_NOIOMMU_IOMMU 8 | ||
| 47 | |||
| 48 | /* | ||
| 49 | * The IOCTL interface is designed for extensibility by embedding the | 42 | * The IOCTL interface is designed for extensibility by embedding the |
| 50 | * structure length (argsz) and flags into structures passed between | 43 | * structure length (argsz) and flags into structures passed between |
| 51 | * kernel and userspace. We therefore use the _IO() macro for these | 44 | * kernel and userspace. We therefore use the _IO() macro for these |
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 85dedca3dcfb..eeba75395f7d 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h | |||
| @@ -343,7 +343,6 @@ struct ipu_client_platformdata { | |||
| 343 | int di; | 343 | int di; |
| 344 | int dc; | 344 | int dc; |
| 345 | int dp; | 345 | int dp; |
| 346 | int dmfc; | ||
| 347 | int dma[2]; | 346 | int dma[2]; |
| 348 | }; | 347 | }; |
| 349 | 348 | ||
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 7d28aff605c7..7dc685b4057d 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h | |||
| @@ -181,6 +181,20 @@ struct __name##_back_ring { \ | |||
| 181 | #define RING_GET_REQUEST(_r, _idx) \ | 181 | #define RING_GET_REQUEST(_r, _idx) \ |
| 182 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) | 182 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) |
| 183 | 183 | ||
| 184 | /* | ||
| 185 | * Get a local copy of a request. | ||
| 186 | * | ||
| 187 | * Use this in preference to RING_GET_REQUEST() so all processing is | ||
| 188 | * done on a local copy that cannot be modified by the other end. | ||
| 189 | * | ||
| 190 | * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this | ||
| 191 | * to be ineffective where _req is a struct which consists of only bitfields. | ||
| 192 | */ | ||
| 193 | #define RING_COPY_REQUEST(_r, _idx, _req) do { \ | ||
| 194 | /* Use volatile to force the copy into _req. */ \ | ||
| 195 | *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ | ||
| 196 | } while (0) | ||
| 197 | |||
| 184 | #define RING_GET_RESPONSE(_r, _idx) \ | 198 | #define RING_GET_RESPONSE(_r, _idx) \ |
| 185 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) | 199 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) |
| 186 | 200 | ||
