diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 128 |
1 files changed, 84 insertions, 44 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 02436d5d0dab..1f863a96a480 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -173,12 +173,15 @@ static void start_ep_timer(struct c4iw_ep *ep) | |||
| 173 | add_timer(&ep->timer); | 173 | add_timer(&ep->timer); |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | static void stop_ep_timer(struct c4iw_ep *ep) | 176 | static int stop_ep_timer(struct c4iw_ep *ep) |
| 177 | { | 177 | { |
| 178 | PDBG("%s ep %p stopping\n", __func__, ep); | 178 | PDBG("%s ep %p stopping\n", __func__, ep); |
| 179 | del_timer_sync(&ep->timer); | 179 | del_timer_sync(&ep->timer); |
| 180 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) | 180 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { |
| 181 | c4iw_put_ep(&ep->com); | 181 | c4iw_put_ep(&ep->com); |
| 182 | return 0; | ||
| 183 | } | ||
| 184 | return 1; | ||
| 182 | } | 185 | } |
| 183 | 186 | ||
| 184 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | 187 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, |
| @@ -584,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep) | |||
| 584 | opt2 |= SACK_EN(1); | 587 | opt2 |= SACK_EN(1); |
| 585 | if (wscale && enable_tcp_window_scaling) | 588 | if (wscale && enable_tcp_window_scaling) |
| 586 | opt2 |= WND_SCALE_EN(1); | 589 | opt2 |= WND_SCALE_EN(1); |
| 590 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
| 591 | opt2 |= T5_OPT_2_VALID; | ||
| 592 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
| 593 | } | ||
| 587 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | 594 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); |
| 588 | 595 | ||
| 589 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { | 596 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { |
| @@ -993,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) | |||
| 993 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | 1000 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) |
| 994 | { | 1001 | { |
| 995 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1002 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 996 | state_set(&ep->com, ABORTING); | 1003 | __state_set(&ep->com, ABORTING); |
| 997 | set_bit(ABORT_CONN, &ep->com.history); | 1004 | set_bit(ABORT_CONN, &ep->com.history); |
| 998 | return send_abort(ep, skb, gfp); | 1005 | return send_abort(ep, skb, gfp); |
| 999 | } | 1006 | } |
| @@ -1151,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
| 1151 | return credits; | 1158 | return credits; |
| 1152 | } | 1159 | } |
| 1153 | 1160 | ||
| 1154 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | 1161 | static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) |
| 1155 | { | 1162 | { |
| 1156 | struct mpa_message *mpa; | 1163 | struct mpa_message *mpa; |
| 1157 | struct mpa_v2_conn_params *mpa_v2_params; | 1164 | struct mpa_v2_conn_params *mpa_v2_params; |
| @@ -1161,17 +1168,17 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1161 | struct c4iw_qp_attributes attrs; | 1168 | struct c4iw_qp_attributes attrs; |
| 1162 | enum c4iw_qp_attr_mask mask; | 1169 | enum c4iw_qp_attr_mask mask; |
| 1163 | int err; | 1170 | int err; |
| 1171 | int disconnect = 0; | ||
| 1164 | 1172 | ||
| 1165 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1173 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1166 | 1174 | ||
| 1167 | /* | 1175 | /* |
| 1168 | * Stop mpa timer. If it expired, then the state has | 1176 | * Stop mpa timer. If it expired, then |
| 1169 | * changed and we bail since ep_timeout already aborted | 1177 | * we ignore the MPA reply. process_timeout() |
| 1170 | * the connection. | 1178 | * will abort the connection. |
| 1171 | */ | 1179 | */ |
| 1172 | stop_ep_timer(ep); | 1180 | if (stop_ep_timer(ep)) |
| 1173 | if (ep->com.state != MPA_REQ_SENT) | 1181 | return 0; |
| 1174 | return; | ||
| 1175 | 1182 | ||
| 1176 | /* | 1183 | /* |
| 1177 | * If we get more than the supported amount of private data | 1184 | * If we get more than the supported amount of private data |
| @@ -1193,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1193 | * if we don't even have the mpa message, then bail. | 1200 | * if we don't even have the mpa message, then bail. |
| 1194 | */ | 1201 | */ |
| 1195 | if (ep->mpa_pkt_len < sizeof(*mpa)) | 1202 | if (ep->mpa_pkt_len < sizeof(*mpa)) |
| 1196 | return; | 1203 | return 0; |
| 1197 | mpa = (struct mpa_message *) ep->mpa_pkt; | 1204 | mpa = (struct mpa_message *) ep->mpa_pkt; |
| 1198 | 1205 | ||
| 1199 | /* Validate MPA header. */ | 1206 | /* Validate MPA header. */ |
| @@ -1233,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1233 | * We'll continue process when more data arrives. | 1240 | * We'll continue process when more data arrives. |
| 1234 | */ | 1241 | */ |
| 1235 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | 1242 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) |
| 1236 | return; | 1243 | return 0; |
| 1237 | 1244 | ||
| 1238 | if (mpa->flags & MPA_REJECT) { | 1245 | if (mpa->flags & MPA_REJECT) { |
| 1239 | err = -ECONNREFUSED; | 1246 | err = -ECONNREFUSED; |
| @@ -1335,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1335 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1342 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
| 1336 | attrs.ecode = MPA_NOMATCH_RTR; | 1343 | attrs.ecode = MPA_NOMATCH_RTR; |
| 1337 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1344 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 1345 | attrs.send_term = 1; | ||
| 1338 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1346 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 1339 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1347 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
| 1340 | err = -ENOMEM; | 1348 | err = -ENOMEM; |
| 1349 | disconnect = 1; | ||
| 1341 | goto out; | 1350 | goto out; |
| 1342 | } | 1351 | } |
| 1343 | 1352 | ||
| @@ -1353,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1353 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1362 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
| 1354 | attrs.ecode = MPA_INSUFF_IRD; | 1363 | attrs.ecode = MPA_INSUFF_IRD; |
| 1355 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1364 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 1365 | attrs.send_term = 1; | ||
| 1356 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1366 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 1357 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1367 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
| 1358 | err = -ENOMEM; | 1368 | err = -ENOMEM; |
| 1369 | disconnect = 1; | ||
| 1359 | goto out; | 1370 | goto out; |
| 1360 | } | 1371 | } |
| 1361 | goto out; | 1372 | goto out; |
| @@ -1364,7 +1375,7 @@ err: | |||
| 1364 | send_abort(ep, skb, GFP_KERNEL); | 1375 | send_abort(ep, skb, GFP_KERNEL); |
| 1365 | out: | 1376 | out: |
| 1366 | connect_reply_upcall(ep, err); | 1377 | connect_reply_upcall(ep, err); |
| 1367 | return; | 1378 | return disconnect; |
| 1368 | } | 1379 | } |
| 1369 | 1380 | ||
| 1370 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | 1381 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) |
| @@ -1375,15 +1386,12 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1375 | 1386 | ||
| 1376 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1387 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1377 | 1388 | ||
| 1378 | if (ep->com.state != MPA_REQ_WAIT) | ||
| 1379 | return; | ||
| 1380 | |||
| 1381 | /* | 1389 | /* |
| 1382 | * If we get more than the supported amount of private data | 1390 | * If we get more than the supported amount of private data |
| 1383 | * then we must fail this connection. | 1391 | * then we must fail this connection. |
| 1384 | */ | 1392 | */ |
| 1385 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | 1393 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { |
| 1386 | stop_ep_timer(ep); | 1394 | (void)stop_ep_timer(ep); |
| 1387 | abort_connection(ep, skb, GFP_KERNEL); | 1395 | abort_connection(ep, skb, GFP_KERNEL); |
| 1388 | return; | 1396 | return; |
| 1389 | } | 1397 | } |
| @@ -1413,13 +1421,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1413 | if (mpa->revision > mpa_rev) { | 1421 | if (mpa->revision > mpa_rev) { |
| 1414 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | 1422 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," |
| 1415 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | 1423 | " Received = %d\n", __func__, mpa_rev, mpa->revision); |
| 1416 | stop_ep_timer(ep); | 1424 | (void)stop_ep_timer(ep); |
| 1417 | abort_connection(ep, skb, GFP_KERNEL); | 1425 | abort_connection(ep, skb, GFP_KERNEL); |
| 1418 | return; | 1426 | return; |
| 1419 | } | 1427 | } |
| 1420 | 1428 | ||
| 1421 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | 1429 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { |
| 1422 | stop_ep_timer(ep); | 1430 | (void)stop_ep_timer(ep); |
| 1423 | abort_connection(ep, skb, GFP_KERNEL); | 1431 | abort_connection(ep, skb, GFP_KERNEL); |
| 1424 | return; | 1432 | return; |
| 1425 | } | 1433 | } |
| @@ -1430,7 +1438,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1430 | * Fail if there's too much private data. | 1438 | * Fail if there's too much private data. |
| 1431 | */ | 1439 | */ |
| 1432 | if (plen > MPA_MAX_PRIVATE_DATA) { | 1440 | if (plen > MPA_MAX_PRIVATE_DATA) { |
| 1433 | stop_ep_timer(ep); | 1441 | (void)stop_ep_timer(ep); |
| 1434 | abort_connection(ep, skb, GFP_KERNEL); | 1442 | abort_connection(ep, skb, GFP_KERNEL); |
| 1435 | return; | 1443 | return; |
| 1436 | } | 1444 | } |
| @@ -1439,7 +1447,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1439 | * If plen does not account for pkt size | 1447 | * If plen does not account for pkt size |
| 1440 | */ | 1448 | */ |
| 1441 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | 1449 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { |
| 1442 | stop_ep_timer(ep); | 1450 | (void)stop_ep_timer(ep); |
| 1443 | abort_connection(ep, skb, GFP_KERNEL); | 1451 | abort_connection(ep, skb, GFP_KERNEL); |
| 1444 | return; | 1452 | return; |
| 1445 | } | 1453 | } |
| @@ -1496,18 +1504,24 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1496 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | 1504 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, |
| 1497 | ep->mpa_attr.p2p_type); | 1505 | ep->mpa_attr.p2p_type); |
| 1498 | 1506 | ||
| 1499 | __state_set(&ep->com, MPA_REQ_RCVD); | 1507 | /* |
| 1500 | stop_ep_timer(ep); | 1508 | * If the endpoint timer already expired, then we ignore |
| 1501 | 1509 | * the start request. process_timeout() will abort | |
| 1502 | /* drive upcall */ | 1510 | * the connection. |
| 1503 | mutex_lock(&ep->parent_ep->com.mutex); | 1511 | */ |
| 1504 | if (ep->parent_ep->com.state != DEAD) { | 1512 | if (!stop_ep_timer(ep)) { |
| 1505 | if (connect_request_upcall(ep)) | 1513 | __state_set(&ep->com, MPA_REQ_RCVD); |
| 1514 | |||
| 1515 | /* drive upcall */ | ||
| 1516 | mutex_lock(&ep->parent_ep->com.mutex); | ||
| 1517 | if (ep->parent_ep->com.state != DEAD) { | ||
| 1518 | if (connect_request_upcall(ep)) | ||
| 1519 | abort_connection(ep, skb, GFP_KERNEL); | ||
| 1520 | } else { | ||
| 1506 | abort_connection(ep, skb, GFP_KERNEL); | 1521 | abort_connection(ep, skb, GFP_KERNEL); |
| 1507 | } else { | 1522 | } |
| 1508 | abort_connection(ep, skb, GFP_KERNEL); | 1523 | mutex_unlock(&ep->parent_ep->com.mutex); |
| 1509 | } | 1524 | } |
| 1510 | mutex_unlock(&ep->parent_ep->com.mutex); | ||
| 1511 | return; | 1525 | return; |
| 1512 | } | 1526 | } |
| 1513 | 1527 | ||
| @@ -1519,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1519 | unsigned int tid = GET_TID(hdr); | 1533 | unsigned int tid = GET_TID(hdr); |
| 1520 | struct tid_info *t = dev->rdev.lldi.tids; | 1534 | struct tid_info *t = dev->rdev.lldi.tids; |
| 1521 | __u8 status = hdr->status; | 1535 | __u8 status = hdr->status; |
| 1536 | int disconnect = 0; | ||
| 1522 | 1537 | ||
| 1523 | ep = lookup_tid(t, tid); | 1538 | ep = lookup_tid(t, tid); |
| 1524 | if (!ep) | 1539 | if (!ep) |
| @@ -1534,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1534 | switch (ep->com.state) { | 1549 | switch (ep->com.state) { |
| 1535 | case MPA_REQ_SENT: | 1550 | case MPA_REQ_SENT: |
| 1536 | ep->rcv_seq += dlen; | 1551 | ep->rcv_seq += dlen; |
| 1537 | process_mpa_reply(ep, skb); | 1552 | disconnect = process_mpa_reply(ep, skb); |
| 1538 | break; | 1553 | break; |
| 1539 | case MPA_REQ_WAIT: | 1554 | case MPA_REQ_WAIT: |
| 1540 | ep->rcv_seq += dlen; | 1555 | ep->rcv_seq += dlen; |
| @@ -1550,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1550 | ep->com.state, ep->hwtid, status); | 1565 | ep->com.state, ep->hwtid, status); |
| 1551 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1566 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 1552 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1567 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 1553 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1568 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
| 1569 | disconnect = 1; | ||
| 1554 | break; | 1570 | break; |
| 1555 | } | 1571 | } |
| 1556 | default: | 1572 | default: |
| 1557 | break; | 1573 | break; |
| 1558 | } | 1574 | } |
| 1559 | mutex_unlock(&ep->com.mutex); | 1575 | mutex_unlock(&ep->com.mutex); |
| 1576 | if (disconnect) | ||
| 1577 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | ||
| 1560 | return 0; | 1578 | return 0; |
| 1561 | } | 1579 | } |
| 1562 | 1580 | ||
| @@ -2004,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
| 2004 | if (tcph->ece && tcph->cwr) | 2022 | if (tcph->ece && tcph->cwr) |
| 2005 | opt2 |= CCTRL_ECN(1); | 2023 | opt2 |= CCTRL_ECN(1); |
| 2006 | } | 2024 | } |
| 2025 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
| 2026 | opt2 |= T5_OPT_2_VALID; | ||
| 2027 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
| 2028 | } | ||
| 2007 | 2029 | ||
| 2008 | rpl = cplhdr(skb); | 2030 | rpl = cplhdr(skb); |
| 2009 | INIT_TP_WR(rpl, ep->hwtid); | 2031 | INIT_TP_WR(rpl, ep->hwtid); |
| @@ -2265,7 +2287,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2265 | disconnect = 0; | 2287 | disconnect = 0; |
| 2266 | break; | 2288 | break; |
| 2267 | case MORIBUND: | 2289 | case MORIBUND: |
| 2268 | stop_ep_timer(ep); | 2290 | (void)stop_ep_timer(ep); |
| 2269 | if (ep->com.cm_id && ep->com.qp) { | 2291 | if (ep->com.cm_id && ep->com.qp) { |
| 2270 | attrs.next_state = C4IW_QP_STATE_IDLE; | 2292 | attrs.next_state = C4IW_QP_STATE_IDLE; |
| 2271 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 2293 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| @@ -2325,10 +2347,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2325 | case CONNECTING: | 2347 | case CONNECTING: |
| 2326 | break; | 2348 | break; |
| 2327 | case MPA_REQ_WAIT: | 2349 | case MPA_REQ_WAIT: |
| 2328 | stop_ep_timer(ep); | 2350 | (void)stop_ep_timer(ep); |
| 2329 | break; | 2351 | break; |
| 2330 | case MPA_REQ_SENT: | 2352 | case MPA_REQ_SENT: |
| 2331 | stop_ep_timer(ep); | 2353 | (void)stop_ep_timer(ep); |
| 2332 | if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) | 2354 | if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) |
| 2333 | connect_reply_upcall(ep, -ECONNRESET); | 2355 | connect_reply_upcall(ep, -ECONNRESET); |
| 2334 | else { | 2356 | else { |
| @@ -2433,7 +2455,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2433 | __state_set(&ep->com, MORIBUND); | 2455 | __state_set(&ep->com, MORIBUND); |
| 2434 | break; | 2456 | break; |
| 2435 | case MORIBUND: | 2457 | case MORIBUND: |
| 2436 | stop_ep_timer(ep); | 2458 | (void)stop_ep_timer(ep); |
| 2437 | if ((ep->com.cm_id) && (ep->com.qp)) { | 2459 | if ((ep->com.cm_id) && (ep->com.qp)) { |
| 2438 | attrs.next_state = C4IW_QP_STATE_IDLE; | 2460 | attrs.next_state = C4IW_QP_STATE_IDLE; |
| 2439 | c4iw_modify_qp(ep->com.qp->rhp, | 2461 | c4iw_modify_qp(ep->com.qp->rhp, |
| @@ -3028,7 +3050,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
| 3028 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { | 3050 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { |
| 3029 | close = 1; | 3051 | close = 1; |
| 3030 | if (abrupt) { | 3052 | if (abrupt) { |
| 3031 | stop_ep_timer(ep); | 3053 | (void)stop_ep_timer(ep); |
| 3032 | ep->com.state = ABORTING; | 3054 | ep->com.state = ABORTING; |
| 3033 | } else | 3055 | } else |
| 3034 | ep->com.state = MORIBUND; | 3056 | ep->com.state = MORIBUND; |
| @@ -3462,14 +3484,24 @@ static void process_timeout(struct c4iw_ep *ep) | |||
| 3462 | __state_set(&ep->com, ABORTING); | 3484 | __state_set(&ep->com, ABORTING); |
| 3463 | close_complete_upcall(ep, -ETIMEDOUT); | 3485 | close_complete_upcall(ep, -ETIMEDOUT); |
| 3464 | break; | 3486 | break; |
| 3487 | case ABORTING: | ||
| 3488 | case DEAD: | ||
| 3489 | |||
| 3490 | /* | ||
| 3491 | * These states are expected if the ep timed out at the same | ||
| 3492 | * time as another thread was calling stop_ep_timer(). | ||
| 3493 | * So we silently do nothing for these states. | ||
| 3494 | */ | ||
| 3495 | abort = 0; | ||
| 3496 | break; | ||
| 3465 | default: | 3497 | default: |
| 3466 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", | 3498 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", |
| 3467 | __func__, ep, ep->hwtid, ep->com.state); | 3499 | __func__, ep, ep->hwtid, ep->com.state); |
| 3468 | abort = 0; | 3500 | abort = 0; |
| 3469 | } | 3501 | } |
| 3470 | mutex_unlock(&ep->com.mutex); | ||
| 3471 | if (abort) | 3502 | if (abort) |
| 3472 | abort_connection(ep, NULL, GFP_KERNEL); | 3503 | abort_connection(ep, NULL, GFP_KERNEL); |
| 3504 | mutex_unlock(&ep->com.mutex); | ||
| 3473 | c4iw_put_ep(&ep->com); | 3505 | c4iw_put_ep(&ep->com); |
| 3474 | } | 3506 | } |
| 3475 | 3507 | ||
| @@ -3483,6 +3515,8 @@ static void process_timedout_eps(void) | |||
| 3483 | 3515 | ||
| 3484 | tmp = timeout_list.next; | 3516 | tmp = timeout_list.next; |
| 3485 | list_del(tmp); | 3517 | list_del(tmp); |
| 3518 | tmp->next = NULL; | ||
| 3519 | tmp->prev = NULL; | ||
| 3486 | spin_unlock_irq(&timeout_lock); | 3520 | spin_unlock_irq(&timeout_lock); |
| 3487 | ep = list_entry(tmp, struct c4iw_ep, entry); | 3521 | ep = list_entry(tmp, struct c4iw_ep, entry); |
| 3488 | process_timeout(ep); | 3522 | process_timeout(ep); |
| @@ -3499,6 +3533,7 @@ static void process_work(struct work_struct *work) | |||
| 3499 | unsigned int opcode; | 3533 | unsigned int opcode; |
| 3500 | int ret; | 3534 | int ret; |
| 3501 | 3535 | ||
| 3536 | process_timedout_eps(); | ||
| 3502 | while ((skb = skb_dequeue(&rxq))) { | 3537 | while ((skb = skb_dequeue(&rxq))) { |
| 3503 | rpl = cplhdr(skb); | 3538 | rpl = cplhdr(skb); |
| 3504 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); | 3539 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); |
| @@ -3508,8 +3543,8 @@ static void process_work(struct work_struct *work) | |||
| 3508 | ret = work_handlers[opcode](dev, skb); | 3543 | ret = work_handlers[opcode](dev, skb); |
| 3509 | if (!ret) | 3544 | if (!ret) |
| 3510 | kfree_skb(skb); | 3545 | kfree_skb(skb); |
| 3546 | process_timedout_eps(); | ||
| 3511 | } | 3547 | } |
| 3512 | process_timedout_eps(); | ||
| 3513 | } | 3548 | } |
| 3514 | 3549 | ||
| 3515 | static DECLARE_WORK(skb_work, process_work); | 3550 | static DECLARE_WORK(skb_work, process_work); |
| @@ -3521,8 +3556,13 @@ static void ep_timeout(unsigned long arg) | |||
| 3521 | 3556 | ||
| 3522 | spin_lock(&timeout_lock); | 3557 | spin_lock(&timeout_lock); |
| 3523 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { | 3558 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { |
| 3524 | list_add_tail(&ep->entry, &timeout_list); | 3559 | /* |
| 3525 | kickit = 1; | 3560 | * Only insert if it is not already on the list. |
| 3561 | */ | ||
| 3562 | if (!ep->entry.next) { | ||
| 3563 | list_add_tail(&ep->entry, &timeout_list); | ||
| 3564 | kickit = 1; | ||
| 3565 | } | ||
| 3526 | } | 3566 | } |
| 3527 | spin_unlock(&timeout_lock); | 3567 | spin_unlock(&timeout_lock); |
| 3528 | if (kickit) | 3568 | if (kickit) |
