aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-05-18 00:08:53 -0400
committerDave Airlie <airlied@redhat.com>2018-05-18 00:08:53 -0400
commit1fafef9dfe127bdd4600eeaca302f0c1cb4ee5d0 (patch)
treef829e8a26fc768666eb5f827bf5817892ea7e46c /include/linux
parent315852b422972e6ebb1dfddaadada09e46a2681a (diff)
parent76ef6b28ea4f81c3d511866a9b31392caa833126 (diff)
Merge drm-fixes-for-v4.17-rc6-urgent into drm-next
Need to backmerge some nouveau fixes to reduce the nouveau -next conflicts a lot. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/ceph/osd_client.h12
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/genhd.h4
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/mlx5/driver.h12
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/rbtree_augmented.h1
-rw-r--r--include/linux/rbtree_latch.h1
-rw-r--r--include/linux/remoteproc.h2
-rw-r--r--include/linux/sched.h50
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/wait_bit.h17
15 files changed, 93 insertions, 21 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 486e65e3db26..469b20e1dd7e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -31,6 +31,7 @@ struct bpf_map_ops {
31 void (*map_release)(struct bpf_map *map, struct file *map_file); 31 void (*map_release)(struct bpf_map *map, struct file *map_file);
32 void (*map_free)(struct bpf_map *map); 32 void (*map_free)(struct bpf_map *map);
33 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 33 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
34 void (*map_release_uref)(struct bpf_map *map);
34 35
35 /* funcs callable from userspace and from eBPF programs */ 36 /* funcs callable from userspace and from eBPF programs */
36 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 37 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -351,6 +352,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
351 struct bpf_prog **_prog, *__prog; \ 352 struct bpf_prog **_prog, *__prog; \
352 struct bpf_prog_array *_array; \ 353 struct bpf_prog_array *_array; \
353 u32 _ret = 1; \ 354 u32 _ret = 1; \
355 preempt_disable(); \
354 rcu_read_lock(); \ 356 rcu_read_lock(); \
355 _array = rcu_dereference(array); \ 357 _array = rcu_dereference(array); \
356 if (unlikely(check_non_null && !_array))\ 358 if (unlikely(check_non_null && !_array))\
@@ -362,6 +364,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
362 } \ 364 } \
363_out: \ 365_out: \
364 rcu_read_unlock(); \ 366 rcu_read_unlock(); \
367 preempt_enable_no_resched(); \
365 _ret; \ 368 _ret; \
366 }) 369 })
367 370
@@ -434,7 +437,6 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
434int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 437int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
435 void *key, void *value, u64 map_flags); 438 void *key, void *value, u64 map_flags);
436int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 439int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
437void bpf_fd_array_map_clear(struct bpf_map *map);
438int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 440int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
439 void *key, void *value, u64 map_flags); 441 void *key, void *value, u64 map_flags);
440int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 442int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index d3339dd48b1a..b324e01ccf2d 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -25,6 +25,7 @@
25#define PHY_ID_BCM54612E 0x03625e60 25#define PHY_ID_BCM54612E 0x03625e60
26#define PHY_ID_BCM54616S 0x03625d10 26#define PHY_ID_BCM54616S 0x03625d10
27#define PHY_ID_BCM57780 0x03625d90 27#define PHY_ID_BCM57780 0x03625d90
28#define PHY_ID_BCM89610 0x03625cd0
28 29
29#define PHY_ID_BCM7250 0xae025280 30#define PHY_ID_BCM7250 0xae025280
30#define PHY_ID_BCM7260 0xae025190 31#define PHY_ID_BCM7260 0xae025190
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 528ccc943cee..96bb32285989 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -77,7 +77,10 @@ struct ceph_osd_data {
77 u32 bio_length; 77 u32 bio_length;
78 }; 78 };
79#endif /* CONFIG_BLOCK */ 79#endif /* CONFIG_BLOCK */
80 struct ceph_bvec_iter bvec_pos; 80 struct {
81 struct ceph_bvec_iter bvec_pos;
82 u32 num_bvecs;
83 };
81 }; 84 };
82}; 85};
83 86
@@ -412,6 +415,10 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
412 struct ceph_bio_iter *bio_pos, 415 struct ceph_bio_iter *bio_pos,
413 u32 bio_length); 416 u32 bio_length);
414#endif /* CONFIG_BLOCK */ 417#endif /* CONFIG_BLOCK */
418void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
419 unsigned int which,
420 struct bio_vec *bvecs, u32 num_bvecs,
421 u32 bytes);
415void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, 422void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
416 unsigned int which, 423 unsigned int which,
417 struct ceph_bvec_iter *bvec_pos); 424 struct ceph_bvec_iter *bvec_pos);
@@ -426,7 +433,8 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
426 bool own_pages); 433 bool own_pages);
427void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, 434void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
428 unsigned int which, 435 unsigned int which,
429 struct bio_vec *bvecs, u32 bytes); 436 struct bio_vec *bvecs, u32 num_bvecs,
437 u32 bytes);
430extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, 438extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
431 unsigned int which, 439 unsigned int which,
432 struct page **pages, u64 length, 440 struct page **pages, u64 length,
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 210a890008f9..1d25e149c1c5 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -765,6 +765,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
765int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req); 765int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
766int __clk_mux_determine_rate_closest(struct clk_hw *hw, 766int __clk_mux_determine_rate_closest(struct clk_hw *hw,
767 struct clk_rate_request *req); 767 struct clk_rate_request *req);
768int clk_mux_determine_rate_flags(struct clk_hw *hw,
769 struct clk_rate_request *req,
770 unsigned long flags);
768void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); 771void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
769void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 772void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
770 unsigned long max_rate); 773 unsigned long max_rate);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index c826b0b5232a..6cb8a5789668 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -368,7 +368,9 @@ static inline void free_part_stats(struct hd_struct *part)
368 part_stat_add(cpu, gendiskp, field, -subnd) 368 part_stat_add(cpu, gendiskp, field, -subnd)
369 369
370void part_in_flight(struct request_queue *q, struct hd_struct *part, 370void part_in_flight(struct request_queue *q, struct hd_struct *part,
371 unsigned int inflight[2]); 371 unsigned int inflight[2]);
372void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
373 unsigned int inflight[2]);
372void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, 374void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
373 int rw); 375 int rw);
374void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, 376void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..2803264c512f 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k);
62int kthread_park(struct task_struct *k); 62int kthread_park(struct task_struct *k);
63void kthread_unpark(struct task_struct *k); 63void kthread_unpark(struct task_struct *k);
64void kthread_parkme(void); 64void kthread_parkme(void);
65void kthread_park_complete(struct task_struct *k);
65 66
66int kthreadd(void *unused); 67int kthreadd(void *unused);
67extern struct task_struct *kthreadd_task; 68extern struct task_struct *kthreadd_task;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 767d193c269a..2a156c5dfadd 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1284,25 +1284,19 @@ enum {
1284}; 1284};
1285 1285
1286static inline const struct cpumask * 1286static inline const struct cpumask *
1287mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) 1287mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
1288{ 1288{
1289 const struct cpumask *mask;
1290 struct irq_desc *desc; 1289 struct irq_desc *desc;
1291 unsigned int irq; 1290 unsigned int irq;
1292 int eqn; 1291 int eqn;
1293 int err; 1292 int err;
1294 1293
1295 err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); 1294 err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
1296 if (err) 1295 if (err)
1297 return NULL; 1296 return NULL;
1298 1297
1299 desc = irq_to_desc(irq); 1298 desc = irq_to_desc(irq);
1300#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 1299 return desc->affinity_hint;
1301 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
1302#else
1303 mask = desc->irq_common_data.affinity;
1304#endif
1305 return mask;
1306} 1300}
1307 1301
1308#endif /* MLX5_DRIVER_H */ 1302#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5bad038ac012..6adac113e96d 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm)
95 return 0; 95 return 0;
96} 96}
97 97
98void __oom_reap_task_mm(struct mm_struct *mm);
99
98extern unsigned long oom_badness(struct task_struct *p, 100extern unsigned long oom_badness(struct task_struct *p,
99 struct mem_cgroup *memcg, const nodemask_t *nodemask, 101 struct mem_cgroup *memcg, const nodemask_t *nodemask,
100 unsigned long totalpages); 102 unsigned long totalpages);
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 6bfd2b581f75..af8a61be2d8d 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -26,6 +26,7 @@
26 26
27#include <linux/compiler.h> 27#include <linux/compiler.h>
28#include <linux/rbtree.h> 28#include <linux/rbtree.h>
29#include <linux/rcupdate.h>
29 30
30/* 31/*
31 * Please note - only struct rb_augment_callbacks and the prototypes for 32 * Please note - only struct rb_augment_callbacks and the prototypes for
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index ece43e882b56..7d012faa509a 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -35,6 +35,7 @@
35 35
36#include <linux/rbtree.h> 36#include <linux/rbtree.h>
37#include <linux/seqlock.h> 37#include <linux/seqlock.h>
38#include <linux/rcupdate.h>
38 39
39struct latch_tree_node { 40struct latch_tree_node {
40 struct rb_node node[2]; 41 struct rb_node node[2];
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index d09a9c7af109..dfdaede9139e 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -569,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
569void rproc_add_subdev(struct rproc *rproc, 569void rproc_add_subdev(struct rproc *rproc,
570 struct rproc_subdev *subdev, 570 struct rproc_subdev *subdev,
571 int (*probe)(struct rproc_subdev *subdev), 571 int (*probe)(struct rproc_subdev *subdev),
572 void (*remove)(struct rproc_subdev *subdev, bool graceful)); 572 void (*remove)(struct rproc_subdev *subdev, bool crashed));
573 573
574void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); 574void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
575 575
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b3d697f3b573..c2413703f45d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -112,17 +112,36 @@ struct task_group;
112 112
113#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 113#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
114 114
115/*
116 * Special states are those that do not use the normal wait-loop pattern. See
117 * the comment with set_special_state().
118 */
119#define is_special_task_state(state) \
120 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
121
115#define __set_current_state(state_value) \ 122#define __set_current_state(state_value) \
116 do { \ 123 do { \
124 WARN_ON_ONCE(is_special_task_state(state_value));\
117 current->task_state_change = _THIS_IP_; \ 125 current->task_state_change = _THIS_IP_; \
118 current->state = (state_value); \ 126 current->state = (state_value); \
119 } while (0) 127 } while (0)
128
120#define set_current_state(state_value) \ 129#define set_current_state(state_value) \
121 do { \ 130 do { \
131 WARN_ON_ONCE(is_special_task_state(state_value));\
122 current->task_state_change = _THIS_IP_; \ 132 current->task_state_change = _THIS_IP_; \
123 smp_store_mb(current->state, (state_value)); \ 133 smp_store_mb(current->state, (state_value)); \
124 } while (0) 134 } while (0)
125 135
136#define set_special_state(state_value) \
137 do { \
138 unsigned long flags; /* may shadow */ \
139 WARN_ON_ONCE(!is_special_task_state(state_value)); \
140 raw_spin_lock_irqsave(&current->pi_lock, flags); \
141 current->task_state_change = _THIS_IP_; \
142 current->state = (state_value); \
143 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
144 } while (0)
126#else 145#else
127/* 146/*
128 * set_current_state() includes a barrier so that the write of current->state 147 * set_current_state() includes a barrier so that the write of current->state
@@ -144,8 +163,8 @@ struct task_group;
144 * 163 *
145 * The above is typically ordered against the wakeup, which does: 164 * The above is typically ordered against the wakeup, which does:
146 * 165 *
147 * need_sleep = false; 166 * need_sleep = false;
148 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 167 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
149 * 168 *
150 * Where wake_up_state() (and all other wakeup primitives) imply enough 169 * Where wake_up_state() (and all other wakeup primitives) imply enough
151 * barriers to order the store of the variable against wakeup. 170 * barriers to order the store of the variable against wakeup.
@@ -154,12 +173,33 @@ struct task_group;
154 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 173 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
155 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 174 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
156 * 175 *
157 * This is obviously fine, since they both store the exact same value. 176 * However, with slightly different timing the wakeup TASK_RUNNING store can
177 * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
178 * a problem either because that will result in one extra go around the loop
179 * and our @cond test will save the day.
158 * 180 *
159 * Also see the comments of try_to_wake_up(). 181 * Also see the comments of try_to_wake_up().
160 */ 182 */
161#define __set_current_state(state_value) do { current->state = (state_value); } while (0) 183#define __set_current_state(state_value) \
162#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) 184 current->state = (state_value)
185
186#define set_current_state(state_value) \
187 smp_store_mb(current->state, (state_value))
188
189/*
190 * set_special_state() should be used for those states when the blocking task
191 * can not use the regular condition based wait-loop. In that case we must
192 * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
193 * will not collide with our state change.
194 */
195#define set_special_state(state_value) \
196 do { \
197 unsigned long flags; /* may shadow */ \
198 raw_spin_lock_irqsave(&current->pi_lock, flags); \
199 current->state = (state_value); \
200 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
201 } while (0)
202
163#endif 203#endif
164 204
165/* Task command name length: */ 205/* Task command name length: */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index a7ce74c74e49..113d1ad1ced7 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
280{ 280{
281 spin_lock_irq(&current->sighand->siglock); 281 spin_lock_irq(&current->sighand->siglock);
282 if (current->jobctl & JOBCTL_STOP_DEQUEUED) 282 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
283 __set_current_state(TASK_STOPPED); 283 set_special_state(TASK_STOPPED);
284 spin_unlock_irq(&current->sighand->siglock); 284 spin_unlock_irq(&current->sighand->siglock);
285 285
286 schedule(); 286 schedule();
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4b6b9283fa7b..8675e145ea8b 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -52,7 +52,7 @@
52#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ 52#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
53 53
54/* big enough to hold our biggest descriptor */ 54/* big enough to hold our biggest descriptor */
55#define USB_COMP_EP0_BUFSIZ 1024 55#define USB_COMP_EP0_BUFSIZ 4096
56 56
57/* OS feature descriptor length <= 4kB */ 57/* OS feature descriptor length <= 4kB */
58#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096 58#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 9318b2166439..2b0072fa5e92 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -305,4 +305,21 @@ do { \
305 __ret; \ 305 __ret; \
306}) 306})
307 307
308/**
309 * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
310 *
311 * @bit: the bit of the word being waited on
312 * @word: the word being waited on, a kernel virtual address
313 *
314 * You can use this helper if bitflags are manipulated atomically rather than
315 * non-atomically under a lock.
316 */
317static inline void clear_and_wake_up_bit(int bit, void *word)
318{
319 clear_bit_unlock(bit, word);
320 /* See wake_up_bit() for which memory barrier you need to use. */
321 smp_mb__after_atomic();
322 wake_up_bit(word, bit);
323}
324
308#endif /* _LINUX_WAIT_BIT_H */ 325#endif /* _LINUX_WAIT_BIT_H */