aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c1
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c1
11 files changed, 1 insertions, 19 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 98ac46ed7214..3bb6e35b0bbf 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -4,6 +4,7 @@ menuconfig INFINIBAND
4 depends on NET 4 depends on NET
5 depends on INET 5 depends on INET
6 depends on m || IPV6 != m 6 depends on m || IPV6 != m
7 depends on !ALPHA
7 select IRQ_POLL 8 select IRQ_POLL
8 ---help--- 9 ---help---
9 Core support for InfiniBand (IB). Make sure to also select 10 Core support for InfiniBand (IB). Make sure to also select
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index fd01a760259f..f527bcda4650 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
302 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 302 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
303 goto bail; 303 goto bail;
304 /* We are in the error state, flush the work request. */ 304 /* We are in the error state, flush the work request. */
305 smp_read_barrier_depends(); /* see post_one_send() */
306 if (qp->s_last == READ_ONCE(qp->s_head)) 305 if (qp->s_last == READ_ONCE(qp->s_head))
307 goto bail; 306 goto bail;
308 /* If DMAs are in progress, we can't flush immediately. */ 307 /* If DMAs are in progress, we can't flush immediately. */
@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
346 newreq = 0; 345 newreq = 0;
347 if (qp->s_cur == qp->s_tail) { 346 if (qp->s_cur == qp->s_tail) {
348 /* Check if send work queue is empty. */ 347 /* Check if send work queue is empty. */
349 smp_read_barrier_depends(); /* see post_one_send() */
350 if (qp->s_tail == READ_ONCE(qp->s_head)) { 348 if (qp->s_tail == READ_ONCE(qp->s_head)) {
351 clear_ahg(qp); 349 clear_ahg(qp);
352 goto bail; 350 goto bail;
@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
900 } 898 }
901 899
902 /* Ensure s_rdma_ack_cnt changes are committed */ 900 /* Ensure s_rdma_ack_cnt changes are committed */
903 smp_read_barrier_depends();
904 if (qp->s_rdma_ack_cnt) { 901 if (qp->s_rdma_ack_cnt) {
905 hfi1_queue_rc_ack(qp, is_fecn); 902 hfi1_queue_rc_ack(qp, is_fecn);
906 return; 903 return;
@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
1562 trace_hfi1_ack(qp, psn); 1559 trace_hfi1_ack(qp, psn);
1563 1560
1564 /* Ignore invalid responses. */ 1561 /* Ignore invalid responses. */
1565 smp_read_barrier_depends(); /* see post_one_send */
1566 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) 1562 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1567 goto ack_done; 1563 goto ack_done;
1568 1564
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 2c7fc6e331ea..13b994738f41 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
362 sqp->s_flags |= RVT_S_BUSY; 362 sqp->s_flags |= RVT_S_BUSY;
363 363
364again: 364again:
365 smp_read_barrier_depends(); /* see post_one_send() */
366 if (sqp->s_last == READ_ONCE(sqp->s_head)) 365 if (sqp->s_last == READ_ONCE(sqp->s_head))
367 goto clr_busy; 366 goto clr_busy;
368 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); 367 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 31c8f89b5fc8..61c130dbed10 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
553 553
554static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) 554static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
555{ 555{
556 smp_read_barrier_depends(); /* see sdma_update_tail() */
557 return sde->tx_ring[sde->tx_head & sde->sdma_mask]; 556 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
558} 557}
559 558
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 991bbee04821..132b63e787d1 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
80 goto bail; 80 goto bail;
81 /* We are in the error state, flush the work request. */ 81 /* We are in the error state, flush the work request. */
82 smp_read_barrier_depends(); /* see post_one_send() */
83 if (qp->s_last == READ_ONCE(qp->s_head)) 82 if (qp->s_last == READ_ONCE(qp->s_head))
84 goto bail; 83 goto bail;
85 /* If DMAs are in progress, we can't flush immediately. */ 84 /* If DMAs are in progress, we can't flush immediately. */
@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
119 RVT_PROCESS_NEXT_SEND_OK)) 118 RVT_PROCESS_NEXT_SEND_OK))
120 goto bail; 119 goto bail;
121 /* Check if send work queue is empty. */ 120 /* Check if send work queue is empty. */
122 smp_read_barrier_depends(); /* see post_one_send() */
123 if (qp->s_cur == READ_ONCE(qp->s_head)) { 121 if (qp->s_cur == READ_ONCE(qp->s_head)) {
124 clear_ahg(qp); 122 clear_ahg(qp);
125 goto bail; 123 goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index beb5091eccca..deb184574395 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
486 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 486 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
487 goto bail; 487 goto bail;
488 /* We are in the error state, flush the work request. */ 488 /* We are in the error state, flush the work request. */
489 smp_read_barrier_depends(); /* see post_one_send */
490 if (qp->s_last == READ_ONCE(qp->s_head)) 489 if (qp->s_last == READ_ONCE(qp->s_head))
491 goto bail; 490 goto bail;
492 /* If DMAs are in progress, we can't flush immediately. */ 491 /* If DMAs are in progress, we can't flush immediately. */
@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
500 } 499 }
501 500
502 /* see post_one_send() */ 501 /* see post_one_send() */
503 smp_read_barrier_depends();
504 if (qp->s_cur == READ_ONCE(qp->s_head)) 502 if (qp->s_cur == READ_ONCE(qp->s_head))
505 goto bail; 503 goto bail;
506 504
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8f5754fb8579..1a785c37ad0a 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
246 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 246 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
247 goto bail; 247 goto bail;
248 /* We are in the error state, flush the work request. */ 248 /* We are in the error state, flush the work request. */
249 smp_read_barrier_depends(); /* see post_one_send() */
250 if (qp->s_last == READ_ONCE(qp->s_head)) 249 if (qp->s_last == READ_ONCE(qp->s_head))
251 goto bail; 250 goto bail;
252 /* If DMAs are in progress, we can't flush immediately. */ 251 /* If DMAs are in progress, we can't flush immediately. */
@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
293 newreq = 0; 292 newreq = 0;
294 if (qp->s_cur == qp->s_tail) { 293 if (qp->s_cur == qp->s_tail) {
295 /* Check if send work queue is empty. */ 294 /* Check if send work queue is empty. */
296 smp_read_barrier_depends(); /* see post_one_send() */
297 if (qp->s_tail == READ_ONCE(qp->s_head)) 295 if (qp->s_tail == READ_ONCE(qp->s_head))
298 goto bail; 296 goto bail;
299 /* 297 /*
@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1340 goto ack_done; 1338 goto ack_done;
1341 1339
1342 /* Ignore invalid responses. */ 1340 /* Ignore invalid responses. */
1343 smp_read_barrier_depends(); /* see post_one_send */
1344 if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0) 1341 if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1345 goto ack_done; 1342 goto ack_done;
1346 1343
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 9a37e844d4c8..4662cc7bde92 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
367 sqp->s_flags |= RVT_S_BUSY; 367 sqp->s_flags |= RVT_S_BUSY;
368 368
369again: 369again:
370 smp_read_barrier_depends(); /* see post_one_send() */
371 if (sqp->s_last == READ_ONCE(sqp->s_head)) 370 if (sqp->s_last == READ_ONCE(sqp->s_head))
372 goto clr_busy; 371 goto clr_busy;
373 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); 372 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index bddcc37ace44..70c58b88192c 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
61 goto bail; 61 goto bail;
62 /* We are in the error state, flush the work request. */ 62 /* We are in the error state, flush the work request. */
63 smp_read_barrier_depends(); /* see post_one_send() */
64 if (qp->s_last == READ_ONCE(qp->s_head)) 63 if (qp->s_last == READ_ONCE(qp->s_head))
65 goto bail; 64 goto bail;
66 /* If DMAs are in progress, we can't flush immediately. */ 65 /* If DMAs are in progress, we can't flush immediately. */
@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
90 RVT_PROCESS_NEXT_SEND_OK)) 89 RVT_PROCESS_NEXT_SEND_OK))
91 goto bail; 90 goto bail;
92 /* Check if send work queue is empty. */ 91 /* Check if send work queue is empty. */
93 smp_read_barrier_depends(); /* see post_one_send() */
94 if (qp->s_cur == READ_ONCE(qp->s_head)) 92 if (qp->s_cur == READ_ONCE(qp->s_head))
95 goto bail; 93 goto bail;
96 /* 94 /*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 15962ed193ce..386c3c4da0c7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
252 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 252 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
253 goto bail; 253 goto bail;
254 /* We are in the error state, flush the work request. */ 254 /* We are in the error state, flush the work request. */
255 smp_read_barrier_depends(); /* see post_one_send */
256 if (qp->s_last == READ_ONCE(qp->s_head)) 255 if (qp->s_last == READ_ONCE(qp->s_head))
257 goto bail; 256 goto bail;
258 /* If DMAs are in progress, we can't flush immediately. */ 257 /* If DMAs are in progress, we can't flush immediately. */
@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
266 } 265 }
267 266
268 /* see post_one_send() */ 267 /* see post_one_send() */
269 smp_read_barrier_depends();
270 if (qp->s_cur == READ_ONCE(qp->s_head)) 268 if (qp->s_cur == READ_ONCE(qp->s_head))
271 goto bail; 269 goto bail;
272 270
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 9177df60742a..eae84c216e2f 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
1684 /* non-reserved operations */ 1684 /* non-reserved operations */
1685 if (likely(qp->s_avail)) 1685 if (likely(qp->s_avail))
1686 return 0; 1686 return 0;
1687 smp_read_barrier_depends(); /* see rc.c */
1688 slast = READ_ONCE(qp->s_last); 1687 slast = READ_ONCE(qp->s_last);
1689 if (qp->s_head >= slast) 1688 if (qp->s_head >= slast)
1690 avail = qp->s_size - (qp->s_head - slast); 1689 avail = qp->s_size - (qp->s_head - slast);