diff options
Diffstat (limited to 'net/smc')
-rw-r--r-- | net/smc/af_smc.c | 16 | ||||
-rw-r--r-- | net/smc/smc.h | 2 | ||||
-rw-r--r-- | net/smc/smc_clc.c | 10 | ||||
-rw-r--r-- | net/smc/smc_clc.h | 3 | ||||
-rw-r--r-- | net/smc/smc_close.c | 27 | ||||
-rw-r--r-- | net/smc/smc_core.c | 16 | ||||
-rw-r--r-- | net/smc/smc_ib.c | 1 | ||||
-rw-r--r-- | net/smc/smc_pnet.c | 4 | ||||
-rw-r--r-- | net/smc/smc_rx.c | 2 | ||||
-rw-r--r-- | net/smc/smc_tx.c | 12 | ||||
-rw-r--r-- | net/smc/smc_wr.c | 2 |
11 files changed, 58 insertions, 37 deletions
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 8c6d24b2995d..745f145d4c4d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -282,6 +282,7 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock, | |||
282 | __be32 *subnet, u8 *prefix_len) | 282 | __be32 *subnet, u8 *prefix_len) |
283 | { | 283 | { |
284 | struct dst_entry *dst = sk_dst_get(clcsock->sk); | 284 | struct dst_entry *dst = sk_dst_get(clcsock->sk); |
285 | struct in_device *in_dev; | ||
285 | struct sockaddr_in addr; | 286 | struct sockaddr_in addr; |
286 | int rc = -ENOENT; | 287 | int rc = -ENOENT; |
287 | int len; | 288 | int len; |
@@ -298,14 +299,17 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock, | |||
298 | /* get address to which the internal TCP socket is bound */ | 299 | /* get address to which the internal TCP socket is bound */ |
299 | kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len); | 300 | kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len); |
300 | /* analyze IPv4 specific data of net_device belonging to TCP socket */ | 301 | /* analyze IPv4 specific data of net_device belonging to TCP socket */ |
301 | for_ifa(dst->dev->ip_ptr) { | 302 | rcu_read_lock(); |
302 | if (ifa->ifa_address != addr.sin_addr.s_addr) | 303 | in_dev = __in_dev_get_rcu(dst->dev); |
304 | for_ifa(in_dev) { | ||
305 | if (!inet_ifa_match(addr.sin_addr.s_addr, ifa)) | ||
303 | continue; | 306 | continue; |
304 | *prefix_len = inet_mask_len(ifa->ifa_mask); | 307 | *prefix_len = inet_mask_len(ifa->ifa_mask); |
305 | *subnet = ifa->ifa_address & ifa->ifa_mask; | 308 | *subnet = ifa->ifa_address & ifa->ifa_mask; |
306 | rc = 0; | 309 | rc = 0; |
307 | break; | 310 | break; |
308 | } endfor_ifa(dst->dev->ip_ptr); | 311 | } endfor_ifa(in_dev); |
312 | rcu_read_unlock(); | ||
309 | 313 | ||
310 | out_rel: | 314 | out_rel: |
311 | dst_release(dst); | 315 | dst_release(dst); |
@@ -509,7 +513,7 @@ decline_rdma: | |||
509 | /* RDMA setup failed, switch back to TCP */ | 513 | /* RDMA setup failed, switch back to TCP */ |
510 | smc->use_fallback = true; | 514 | smc->use_fallback = true; |
511 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { | 515 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { |
512 | rc = smc_clc_send_decline(smc, reason_code, 0); | 516 | rc = smc_clc_send_decline(smc, reason_code); |
513 | if (rc < sizeof(struct smc_clc_msg_decline)) | 517 | if (rc < sizeof(struct smc_clc_msg_decline)) |
514 | goto out_err; | 518 | goto out_err; |
515 | } | 519 | } |
@@ -804,8 +808,6 @@ static void smc_listen_work(struct work_struct *work) | |||
804 | rc = local_contact; | 808 | rc = local_contact; |
805 | if (rc == -ENOMEM) | 809 | if (rc == -ENOMEM) |
806 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | 810 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ |
807 | else if (rc == -ENOLINK) | ||
808 | reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */ | ||
809 | goto decline_rdma; | 811 | goto decline_rdma; |
810 | } | 812 | } |
811 | link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; | 813 | link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; |
@@ -899,7 +901,7 @@ decline_rdma: | |||
899 | smc_conn_free(&new_smc->conn); | 901 | smc_conn_free(&new_smc->conn); |
900 | new_smc->use_fallback = true; | 902 | new_smc->use_fallback = true; |
901 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { | 903 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { |
902 | rc = smc_clc_send_decline(new_smc, reason_code, 0); | 904 | rc = smc_clc_send_decline(new_smc, reason_code); |
903 | if (rc < sizeof(struct smc_clc_msg_decline)) | 905 | if (rc < sizeof(struct smc_clc_msg_decline)) |
904 | goto out_err; | 906 | goto out_err; |
905 | } | 907 | } |
diff --git a/net/smc/smc.h b/net/smc/smc.h index 6e44313e4467..0ccd6fa387ad 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h | |||
@@ -149,7 +149,7 @@ struct smc_connection { | |||
149 | atomic_t sndbuf_space; /* remaining space in sndbuf */ | 149 | atomic_t sndbuf_space; /* remaining space in sndbuf */ |
150 | u16 tx_cdc_seq; /* sequence # for CDC send */ | 150 | u16 tx_cdc_seq; /* sequence # for CDC send */ |
151 | spinlock_t send_lock; /* protect wr_sends */ | 151 | spinlock_t send_lock; /* protect wr_sends */ |
152 | struct work_struct tx_work; /* retry of smc_cdc_msg_send */ | 152 | struct delayed_work tx_work; /* retry of smc_cdc_msg_send */ |
153 | 153 | ||
154 | struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. | 154 | struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. |
155 | * .prod cf. TCP rcv_nxt | 155 | * .prod cf. TCP rcv_nxt |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 3934913ab835..b7dd2743fb5c 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
@@ -95,9 +95,10 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, | |||
95 | } | 95 | } |
96 | if (clcm->type == SMC_CLC_DECLINE) { | 96 | if (clcm->type == SMC_CLC_DECLINE) { |
97 | reason_code = SMC_CLC_DECL_REPLY; | 97 | reason_code = SMC_CLC_DECL_REPLY; |
98 | if (ntohl(((struct smc_clc_msg_decline *)buf)->peer_diagnosis) | 98 | if (((struct smc_clc_msg_decline *)buf)->hdr.flag) { |
99 | == SMC_CLC_DECL_SYNCERR) | ||
100 | smc->conn.lgr->sync_err = true; | 99 | smc->conn.lgr->sync_err = true; |
100 | smc_lgr_terminate(smc->conn.lgr); | ||
101 | } | ||
101 | } | 102 | } |
102 | 103 | ||
103 | out: | 104 | out: |
@@ -105,8 +106,7 @@ out: | |||
105 | } | 106 | } |
106 | 107 | ||
107 | /* send CLC DECLINE message across internal TCP socket */ | 108 | /* send CLC DECLINE message across internal TCP socket */ |
108 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, | 109 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) |
109 | u8 out_of_sync) | ||
110 | { | 110 | { |
111 | struct smc_clc_msg_decline dclc; | 111 | struct smc_clc_msg_decline dclc; |
112 | struct msghdr msg; | 112 | struct msghdr msg; |
@@ -118,7 +118,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, | |||
118 | dclc.hdr.type = SMC_CLC_DECLINE; | 118 | dclc.hdr.type = SMC_CLC_DECLINE; |
119 | dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline)); | 119 | dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline)); |
120 | dclc.hdr.version = SMC_CLC_V1; | 120 | dclc.hdr.version = SMC_CLC_V1; |
121 | dclc.hdr.flag = out_of_sync ? 1 : 0; | 121 | dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0; |
122 | memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); | 122 | memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); |
123 | dclc.peer_diagnosis = htonl(peer_diag_info); | 123 | dclc.peer_diagnosis = htonl(peer_diag_info); |
124 | memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); | 124 | memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); |
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 13db8ce177c9..1c55414041d4 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h | |||
@@ -106,8 +106,7 @@ struct smc_ib_device; | |||
106 | 106 | ||
107 | int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, | 107 | int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, |
108 | u8 expected_type); | 108 | u8 expected_type); |
109 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, | 109 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info); |
110 | u8 out_of_sync); | ||
111 | int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, | 110 | int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, |
112 | u8 ibport); | 111 | u8 ibport); |
113 | int smc_clc_send_confirm(struct smc_sock *smc); | 112 | int smc_clc_send_confirm(struct smc_sock *smc); |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 3c2e166b5d22..f0d16fb825f7 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -174,15 +174,15 @@ int smc_close_active(struct smc_sock *smc) | |||
174 | { | 174 | { |
175 | struct smc_cdc_conn_state_flags *txflags = | 175 | struct smc_cdc_conn_state_flags *txflags = |
176 | &smc->conn.local_tx_ctrl.conn_state_flags; | 176 | &smc->conn.local_tx_ctrl.conn_state_flags; |
177 | long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT; | ||
178 | struct smc_connection *conn = &smc->conn; | 177 | struct smc_connection *conn = &smc->conn; |
179 | struct sock *sk = &smc->sk; | 178 | struct sock *sk = &smc->sk; |
180 | int old_state; | 179 | int old_state; |
180 | long timeout; | ||
181 | int rc = 0; | 181 | int rc = 0; |
182 | 182 | ||
183 | if (sock_flag(sk, SOCK_LINGER) && | 183 | timeout = current->flags & PF_EXITING ? |
184 | !(current->flags & PF_EXITING)) | 184 | 0 : sock_flag(sk, SOCK_LINGER) ? |
185 | timeout = sk->sk_lingertime; | 185 | sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; |
186 | 186 | ||
187 | again: | 187 | again: |
188 | old_state = sk->sk_state; | 188 | old_state = sk->sk_state; |
@@ -208,7 +208,7 @@ again: | |||
208 | case SMC_ACTIVE: | 208 | case SMC_ACTIVE: |
209 | smc_close_stream_wait(smc, timeout); | 209 | smc_close_stream_wait(smc, timeout); |
210 | release_sock(sk); | 210 | release_sock(sk); |
211 | cancel_work_sync(&conn->tx_work); | 211 | cancel_delayed_work_sync(&conn->tx_work); |
212 | lock_sock(sk); | 212 | lock_sock(sk); |
213 | if (sk->sk_state == SMC_ACTIVE) { | 213 | if (sk->sk_state == SMC_ACTIVE) { |
214 | /* send close request */ | 214 | /* send close request */ |
@@ -234,7 +234,7 @@ again: | |||
234 | if (!smc_cdc_rxed_any_close(conn)) | 234 | if (!smc_cdc_rxed_any_close(conn)) |
235 | smc_close_stream_wait(smc, timeout); | 235 | smc_close_stream_wait(smc, timeout); |
236 | release_sock(sk); | 236 | release_sock(sk); |
237 | cancel_work_sync(&conn->tx_work); | 237 | cancel_delayed_work_sync(&conn->tx_work); |
238 | lock_sock(sk); | 238 | lock_sock(sk); |
239 | if (sk->sk_err != ECONNABORTED) { | 239 | if (sk->sk_err != ECONNABORTED) { |
240 | /* confirm close from peer */ | 240 | /* confirm close from peer */ |
@@ -263,7 +263,9 @@ again: | |||
263 | /* peer sending PeerConnectionClosed will cause transition */ | 263 | /* peer sending PeerConnectionClosed will cause transition */ |
264 | break; | 264 | break; |
265 | case SMC_PROCESSABORT: | 265 | case SMC_PROCESSABORT: |
266 | cancel_work_sync(&conn->tx_work); | 266 | release_sock(sk); |
267 | cancel_delayed_work_sync(&conn->tx_work); | ||
268 | lock_sock(sk); | ||
267 | smc_close_abort(conn); | 269 | smc_close_abort(conn); |
268 | sk->sk_state = SMC_CLOSED; | 270 | sk->sk_state = SMC_CLOSED; |
269 | smc_close_wait_tx_pends(smc); | 271 | smc_close_wait_tx_pends(smc); |
@@ -411,13 +413,14 @@ void smc_close_sock_put_work(struct work_struct *work) | |||
411 | int smc_close_shutdown_write(struct smc_sock *smc) | 413 | int smc_close_shutdown_write(struct smc_sock *smc) |
412 | { | 414 | { |
413 | struct smc_connection *conn = &smc->conn; | 415 | struct smc_connection *conn = &smc->conn; |
414 | long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT; | ||
415 | struct sock *sk = &smc->sk; | 416 | struct sock *sk = &smc->sk; |
416 | int old_state; | 417 | int old_state; |
418 | long timeout; | ||
417 | int rc = 0; | 419 | int rc = 0; |
418 | 420 | ||
419 | if (sock_flag(sk, SOCK_LINGER)) | 421 | timeout = current->flags & PF_EXITING ? |
420 | timeout = sk->sk_lingertime; | 422 | 0 : sock_flag(sk, SOCK_LINGER) ? |
423 | sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; | ||
421 | 424 | ||
422 | again: | 425 | again: |
423 | old_state = sk->sk_state; | 426 | old_state = sk->sk_state; |
@@ -425,7 +428,7 @@ again: | |||
425 | case SMC_ACTIVE: | 428 | case SMC_ACTIVE: |
426 | smc_close_stream_wait(smc, timeout); | 429 | smc_close_stream_wait(smc, timeout); |
427 | release_sock(sk); | 430 | release_sock(sk); |
428 | cancel_work_sync(&conn->tx_work); | 431 | cancel_delayed_work_sync(&conn->tx_work); |
429 | lock_sock(sk); | 432 | lock_sock(sk); |
430 | /* send close wr request */ | 433 | /* send close wr request */ |
431 | rc = smc_close_wr(conn); | 434 | rc = smc_close_wr(conn); |
@@ -439,7 +442,7 @@ again: | |||
439 | if (!smc_cdc_rxed_any_close(conn)) | 442 | if (!smc_cdc_rxed_any_close(conn)) |
440 | smc_close_stream_wait(smc, timeout); | 443 | smc_close_stream_wait(smc, timeout); |
441 | release_sock(sk); | 444 | release_sock(sk); |
442 | cancel_work_sync(&conn->tx_work); | 445 | cancel_delayed_work_sync(&conn->tx_work); |
443 | lock_sock(sk); | 446 | lock_sock(sk); |
444 | /* confirm close from peer */ | 447 | /* confirm close from peer */ |
445 | rc = smc_close_wr(conn); | 448 | rc = smc_close_wr(conn); |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 1a16d51e2330..20b66e79c5d6 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -25,8 +25,9 @@ | |||
25 | #include "smc_cdc.h" | 25 | #include "smc_cdc.h" |
26 | #include "smc_close.h" | 26 | #include "smc_close.h" |
27 | 27 | ||
28 | #define SMC_LGR_NUM_INCR 256 | 28 | #define SMC_LGR_NUM_INCR 256 |
29 | #define SMC_LGR_FREE_DELAY (600 * HZ) | 29 | #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) |
30 | #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10) | ||
30 | 31 | ||
31 | static u32 smc_lgr_num; /* unique link group number */ | 32 | static u32 smc_lgr_num; /* unique link group number */ |
32 | 33 | ||
@@ -107,8 +108,15 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) | |||
107 | __smc_lgr_unregister_conn(conn); | 108 | __smc_lgr_unregister_conn(conn); |
108 | } | 109 | } |
109 | write_unlock_bh(&lgr->conns_lock); | 110 | write_unlock_bh(&lgr->conns_lock); |
110 | if (reduced && !lgr->conns_num) | 111 | if (!reduced || lgr->conns_num) |
111 | schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY); | 112 | return; |
113 | /* client link group creation always follows the server link group | ||
114 | * creation. For client use a somewhat higher removal delay time, | ||
115 | * otherwise there is a risk of out-of-sync link groups. | ||
116 | */ | ||
117 | mod_delayed_work(system_wq, &lgr->free_work, | ||
118 | lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT : | ||
119 | SMC_LGR_FREE_DELAY_SERV); | ||
112 | } | 120 | } |
113 | 121 | ||
114 | static void smc_lgr_free_work(struct work_struct *work) | 122 | static void smc_lgr_free_work(struct work_struct *work) |
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 547e0e113b17..0b5852299158 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c | |||
@@ -380,6 +380,7 @@ static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) | |||
380 | ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport); | 380 | ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport); |
381 | if (ndev) { | 381 | if (ndev) { |
382 | memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN); | 382 | memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN); |
383 | dev_put(ndev); | ||
383 | } else if (!rc) { | 384 | } else if (!rc) { |
384 | memcpy(&smcibdev->mac[ibport - 1][0], | 385 | memcpy(&smcibdev->mac[ibport - 1][0], |
385 | &smcibdev->gid[ibport - 1].raw[8], 3); | 386 | &smcibdev->gid[ibport - 1].raw[8], 3); |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 78f7af28ae4f..31f8453c25c5 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
@@ -181,8 +181,10 @@ static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem) | |||
181 | sizeof(new_pnetelem->ndev->name)) || | 181 | sizeof(new_pnetelem->ndev->name)) || |
182 | smc_pnet_same_ibname(pnetelem, | 182 | smc_pnet_same_ibname(pnetelem, |
183 | new_pnetelem->smcibdev->ibdev->name, | 183 | new_pnetelem->smcibdev->ibdev->name, |
184 | new_pnetelem->ib_port)) | 184 | new_pnetelem->ib_port)) { |
185 | dev_put(pnetelem->ndev); | ||
185 | goto found; | 186 | goto found; |
187 | } | ||
186 | } | 188 | } |
187 | list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist); | 189 | list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist); |
188 | rc = 0; | 190 | rc = 0; |
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index b17a333e9bb0..3e631ae4b6b6 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c | |||
@@ -148,6 +148,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len, | |||
148 | read_done = sock_intr_errno(timeo); | 148 | read_done = sock_intr_errno(timeo); |
149 | break; | 149 | break; |
150 | } | 150 | } |
151 | if (!timeo) | ||
152 | return -EAGAIN; | ||
151 | } | 153 | } |
152 | 154 | ||
153 | if (!atomic_read(&conn->bytes_to_rcv)) { | 155 | if (!atomic_read(&conn->bytes_to_rcv)) { |
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 3c656beb8820..3866573288dd 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include "smc_cdc.h" | 24 | #include "smc_cdc.h" |
25 | #include "smc_tx.h" | 25 | #include "smc_tx.h" |
26 | 26 | ||
27 | #define SMC_TX_WORK_DELAY HZ | ||
28 | |||
27 | /***************************** sndbuf producer *******************************/ | 29 | /***************************** sndbuf producer *******************************/ |
28 | 30 | ||
29 | /* callback implementation for sk.sk_write_space() | 31 | /* callback implementation for sk.sk_write_space() |
@@ -406,7 +408,8 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn) | |||
406 | goto out_unlock; | 408 | goto out_unlock; |
407 | } | 409 | } |
408 | rc = 0; | 410 | rc = 0; |
409 | schedule_work(&conn->tx_work); | 411 | schedule_delayed_work(&conn->tx_work, |
412 | SMC_TX_WORK_DELAY); | ||
410 | } | 413 | } |
411 | goto out_unlock; | 414 | goto out_unlock; |
412 | } | 415 | } |
@@ -430,7 +433,7 @@ out_unlock: | |||
430 | */ | 433 | */ |
431 | static void smc_tx_work(struct work_struct *work) | 434 | static void smc_tx_work(struct work_struct *work) |
432 | { | 435 | { |
433 | struct smc_connection *conn = container_of(work, | 436 | struct smc_connection *conn = container_of(to_delayed_work(work), |
434 | struct smc_connection, | 437 | struct smc_connection, |
435 | tx_work); | 438 | tx_work); |
436 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 439 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
@@ -468,7 +471,8 @@ void smc_tx_consumer_update(struct smc_connection *conn) | |||
468 | if (!rc) | 471 | if (!rc) |
469 | rc = smc_cdc_msg_send(conn, wr_buf, pend); | 472 | rc = smc_cdc_msg_send(conn, wr_buf, pend); |
470 | if (rc < 0) { | 473 | if (rc < 0) { |
471 | schedule_work(&conn->tx_work); | 474 | schedule_delayed_work(&conn->tx_work, |
475 | SMC_TX_WORK_DELAY); | ||
472 | return; | 476 | return; |
473 | } | 477 | } |
474 | smc_curs_write(&conn->rx_curs_confirmed, | 478 | smc_curs_write(&conn->rx_curs_confirmed, |
@@ -487,6 +491,6 @@ void smc_tx_consumer_update(struct smc_connection *conn) | |||
487 | void smc_tx_init(struct smc_sock *smc) | 491 | void smc_tx_init(struct smc_sock *smc) |
488 | { | 492 | { |
489 | smc->sk.sk_write_space = smc_tx_write_space; | 493 | smc->sk.sk_write_space = smc_tx_write_space; |
490 | INIT_WORK(&smc->conn.tx_work, smc_tx_work); | 494 | INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); |
491 | spin_lock_init(&smc->conn.send_lock); | 495 | spin_lock_init(&smc->conn.send_lock); |
492 | } | 496 | } |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index ab56bda66783..525d91e0d57e 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
@@ -244,7 +244,7 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) | |||
244 | int rc; | 244 | int rc; |
245 | 245 | ||
246 | ib_req_notify_cq(link->smcibdev->roce_cq_send, | 246 | ib_req_notify_cq(link->smcibdev->roce_cq_send, |
247 | IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS); | 247 | IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); |
248 | pend = container_of(priv, struct smc_wr_tx_pend, priv); | 248 | pend = container_of(priv, struct smc_wr_tx_pend, priv); |
249 | rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], | 249 | rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], |
250 | &failed_wr); | 250 | &failed_wr); |