aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-05-16 14:52:30 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-16 14:52:30 -0400
commitfee8fb952d1e1c1a60e2017878a16cf83c92c154 (patch)
tree116104e8ed4b1c5938c3d837425d07d32ba50392
parent5a4931ae0193f8a4a97e8260fd0df1d705d83299 (diff)
parent490068deaef0c76e47bf89c457de899b7d3995c7 (diff)
Merge branch 'qed-LL2-fixes'
Michal Kalderon says: ==================== qed: LL2 fixes This series fixes some issues in ll2 related to synchronization and resource freeing ==================== Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com> Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c61
1 files changed, 50 insertions, 11 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 38502815d681..468c59d2e491 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
292 struct qed_ll2_tx_packet *p_pkt = NULL; 292 struct qed_ll2_tx_packet *p_pkt = NULL;
293 struct qed_ll2_info *p_ll2_conn; 293 struct qed_ll2_info *p_ll2_conn;
294 struct qed_ll2_tx_queue *p_tx; 294 struct qed_ll2_tx_queue *p_tx;
295 unsigned long flags = 0;
295 dma_addr_t tx_frag; 296 dma_addr_t tx_frag;
296 297
297 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 298 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
300 301
301 p_tx = &p_ll2_conn->tx_queue; 302 p_tx = &p_ll2_conn->tx_queue;
302 303
304 spin_lock_irqsave(&p_tx->lock, flags);
303 while (!list_empty(&p_tx->active_descq)) { 305 while (!list_empty(&p_tx->active_descq)) {
304 p_pkt = list_first_entry(&p_tx->active_descq, 306 p_pkt = list_first_entry(&p_tx->active_descq,
305 struct qed_ll2_tx_packet, list_entry); 307 struct qed_ll2_tx_packet, list_entry);
@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
309 list_del(&p_pkt->list_entry); 311 list_del(&p_pkt->list_entry);
310 b_last_packet = list_empty(&p_tx->active_descq); 312 b_last_packet = list_empty(&p_tx->active_descq);
311 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 313 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
314 spin_unlock_irqrestore(&p_tx->lock, flags);
312 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 315 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
313 struct qed_ooo_buffer *p_buffer; 316 struct qed_ooo_buffer *p_buffer;
314 317
@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
328 b_last_frag, 331 b_last_frag,
329 b_last_packet); 332 b_last_packet);
330 } 333 }
334 spin_lock_irqsave(&p_tx->lock, flags);
331 } 335 }
336 spin_unlock_irqrestore(&p_tx->lock, flags);
332} 337}
333 338
334static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) 339static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
556 struct qed_ll2_info *p_ll2_conn = NULL; 561 struct qed_ll2_info *p_ll2_conn = NULL;
557 struct qed_ll2_rx_packet *p_pkt = NULL; 562 struct qed_ll2_rx_packet *p_pkt = NULL;
558 struct qed_ll2_rx_queue *p_rx; 563 struct qed_ll2_rx_queue *p_rx;
564 unsigned long flags = 0;
559 565
560 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 566 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
561 if (!p_ll2_conn) 567 if (!p_ll2_conn)
@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
563 569
564 p_rx = &p_ll2_conn->rx_queue; 570 p_rx = &p_ll2_conn->rx_queue;
565 571
572 spin_lock_irqsave(&p_rx->lock, flags);
566 while (!list_empty(&p_rx->active_descq)) { 573 while (!list_empty(&p_rx->active_descq)) {
567 p_pkt = list_first_entry(&p_rx->active_descq, 574 p_pkt = list_first_entry(&p_rx->active_descq,
568 struct qed_ll2_rx_packet, list_entry); 575 struct qed_ll2_rx_packet, list_entry);
569 if (!p_pkt) 576 if (!p_pkt)
570 break; 577 break;
571
572 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); 578 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
579 spin_unlock_irqrestore(&p_rx->lock, flags);
573 580
574 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 581 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
575 struct qed_ooo_buffer *p_buffer; 582 struct qed_ooo_buffer *p_buffer;
@@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
588 cookie, 595 cookie,
589 rx_buf_addr, b_last); 596 rx_buf_addr, b_last);
590 } 597 }
598 spin_lock_irqsave(&p_rx->lock, flags);
591 } 599 }
600 spin_unlock_irqrestore(&p_rx->lock, flags);
601}
602
603static bool
604qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
605 struct core_rx_slow_path_cqe *p_cqe)
606{
607 struct ooo_opaque *iscsi_ooo;
608 u32 cid;
609
610 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
611 return false;
612
613 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
614 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
615 return false;
616
617 /* Need to make a flush */
618 cid = le32_to_cpu(iscsi_ooo->cid);
619 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
620
621 return true;
592} 622}
593 623
594static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, 624static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
@@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
617 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 647 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
618 cqe_type = cqe->rx_cqe_sp.type; 648 cqe_type = cqe->rx_cqe_sp.type;
619 649
650 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
651 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
652 &cqe->rx_cqe_sp))
653 continue;
654
620 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { 655 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
621 DP_NOTICE(p_hwfn, 656 DP_NOTICE(p_hwfn,
622 "Got a non-regular LB LL2 completion [type 0x%02x]\n", 657 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
@@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
794 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; 829 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
795 int rc; 830 int rc;
796 831
832 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
833 return 0;
834
797 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); 835 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
798 if (rc) 836 if (rc)
799 return rc; 837 return rc;
@@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
814 u16 new_idx = 0, num_bds = 0; 852 u16 new_idx = 0, num_bds = 0;
815 int rc; 853 int rc;
816 854
855 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
856 return 0;
857
817 new_idx = le16_to_cpu(*p_tx->p_fw_cons); 858 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
818 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); 859 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
819 860
@@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
1867 1908
1868 /* Stop Tx & Rx of connection, if needed */ 1909 /* Stop Tx & Rx of connection, if needed */
1869 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { 1910 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1911 p_ll2_conn->tx_queue.b_cb_registred = false;
1912 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1870 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); 1913 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1871 if (rc) 1914 if (rc)
1872 goto out; 1915 goto out;
1916
1873 qed_ll2_txq_flush(p_hwfn, connection_handle); 1917 qed_ll2_txq_flush(p_hwfn, connection_handle);
1918 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1874 } 1919 }
1875 1920
1876 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { 1921 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1922 p_ll2_conn->rx_queue.b_cb_registred = false;
1923 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1877 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); 1924 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1878 if (rc) 1925 if (rc)
1879 goto out; 1926 goto out;
1927
1880 qed_ll2_rxq_flush(p_hwfn, connection_handle); 1928 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1929 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1881 } 1930 }
1882 1931
1883 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) 1932 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
@@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
1925 if (!p_ll2_conn) 1974 if (!p_ll2_conn)
1926 return; 1975 return;
1927 1976
1928 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1929 p_ll2_conn->rx_queue.b_cb_registred = false;
1930 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1931 }
1932
1933 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1934 p_ll2_conn->tx_queue.b_cb_registred = false;
1935 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1936 }
1937
1938 kfree(p_ll2_conn->tx_queue.descq_mem); 1977 kfree(p_ll2_conn->tx_queue.descq_mem);
1939 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); 1978 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1940 1979