aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c8
11 files changed, 99 insertions, 48 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cc1b373c0ace..46dc93d3b9b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", 147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
148 fcoe_pf_params->num_cqs, 148 fcoe_pf_params->num_cqs,
149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); 149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
150 return -EINVAL; 150 rc = -EINVAL;
151 goto err;
151 } 152 }
152 153
153 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); 154 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
156 157
157 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); 158 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
158 if (rc) 159 if (rc)
159 return rc; 160 goto err;
160 161
161 cxt_info.iid = dummy_cid; 162 cxt_info.iid = dummy_cid;
162 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); 163 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
163 if (rc) { 164 if (rc) {
164 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", 165 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
165 dummy_cid); 166 dummy_cid);
166 return rc; 167 goto err;
167 } 168 }
168 p_cxt = cxt_info.p_cxt; 169 p_cxt = cxt_info.p_cxt;
169 SET_FIELD(p_cxt->tstorm_ag_context.flags3, 170 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
240 rc = qed_spq_post(p_hwfn, p_ent, NULL); 241 rc = qed_spq_post(p_hwfn, p_ent, NULL);
241 242
242 return rc; 243 return rc;
244
245err:
246 qed_sp_destroy_request(p_hwfn, p_ent);
247 return rc;
243} 248}
244 249
245static int 250static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 1135387bd99d..4f8a685d1a55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
200 "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", 200 "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
201 p_params->num_queues, 201 p_params->num_queues,
202 p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); 202 p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
203 qed_sp_destroy_request(p_hwfn, p_ent);
203 return -EINVAL; 204 return -EINVAL;
204 } 205 }
205 206
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 82a1bd1f8a8c..67c02ea93906 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
740 740
741 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 741 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
742 if (rc) { 742 if (rc) {
743 /* Return spq entry which is taken in qed_sp_init_request()*/ 743 qed_sp_destroy_request(p_hwfn, p_ent);
744 qed_spq_return_entry(p_hwfn, p_ent);
745 return rc; 744 return rc;
746 } 745 }
747 746
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1355 DP_NOTICE(p_hwfn, 1354 DP_NOTICE(p_hwfn,
1356 "%d is not supported yet\n", 1355 "%d is not supported yet\n",
1357 p_filter_cmd->opcode); 1356 p_filter_cmd->opcode);
1357 qed_sp_destroy_request(p_hwfn, *pp_ent);
1358 return -EINVAL; 1358 return -EINVAL;
1359 } 1359 }
1360 1360
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2056 } else { 2056 } else {
2057 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 2057 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2058 if (rc) 2058 if (rc)
2059 return rc; 2059 goto err;
2060 2060
2061 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { 2061 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2062 rc = qed_fw_l2_queue(p_hwfn, p_params->qid, 2062 rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2063 &abs_rx_q_id); 2063 &abs_rx_q_id);
2064 if (rc) 2064 if (rc)
2065 return rc; 2065 goto err;
2066 2066
2067 p_ramrod->rx_qid_valid = 1; 2067 p_ramrod->rx_qid_valid = 1;
2068 p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); 2068 p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2083 (u64)p_params->addr, p_params->length); 2083 (u64)p_params->addr, p_params->length);
2084 2084
2085 return qed_spq_post(p_hwfn, p_ent, NULL); 2085 return qed_spq_post(p_hwfn, p_ent, NULL);
2086
2087err:
2088 qed_sp_destroy_request(p_hwfn, p_ent);
2089 return rc;
2086} 2090}
2087 2091
2088int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, 2092int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f40f654398a0..a96364df4320 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
1944 struct qed_ptt *p_ptt, u32 *p_speed_mask) 1944 struct qed_ptt *p_ptt, u32 *p_speed_mask)
1945{ 1945{
1946 u32 transceiver_type, transceiver_state; 1946 u32 transceiver_type, transceiver_state;
1947 int ret;
1947 1948
1948 qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, 1949 ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
1949 &transceiver_type); 1950 &transceiver_type);
1951 if (ret)
1952 return ret;
1950 1953
1951 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == 1954 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
1952 false) 1955 false)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c71391b9c757..62113438c880 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
1514 default: 1514 default:
1515 rc = -EINVAL; 1515 rc = -EINVAL;
1516 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1516 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1517 qed_sp_destroy_request(p_hwfn, p_ent);
1517 return rc; 1518 return rc;
1518 } 1519 }
1519 SET_FIELD(p_ramrod->flags1, 1520 SET_FIELD(p_ramrod->flags1,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f9167d1354bb..e49fada85410 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
745 DP_NOTICE(p_hwfn, 745 DP_NOTICE(p_hwfn,
746 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 746 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
747 rc); 747 rc);
748 qed_sp_destroy_request(p_hwfn, p_ent);
748 return rc; 749 return rc;
749 } 750 }
750 751
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index e95431f6acd4..3157c0d99441 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -167,6 +167,9 @@ struct qed_spq_entry {
167 enum spq_mode comp_mode; 167 enum spq_mode comp_mode;
168 struct qed_spq_comp_cb comp_cb; 168 struct qed_spq_comp_cb comp_cb;
169 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ 169 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
170
171 /* Posted entry for unlimited list entry in EBLOCK mode */
172 struct qed_spq_entry *post_ent;
170}; 173};
171 174
172struct qed_eq { 175struct qed_eq {
@@ -396,6 +399,17 @@ struct qed_sp_init_data {
396 struct qed_spq_comp_cb *p_comp_data; 399 struct qed_spq_comp_cb *p_comp_data;
397}; 400};
398 401
402/**
403 * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
404 * Should be called on in error flows after initializing the SPQ entry
405 * and before posting it.
406 *
407 * @param p_hwfn
408 * @param p_ent
409 */
410void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
411 struct qed_spq_entry *p_ent);
412
399int qed_sp_init_request(struct qed_hwfn *p_hwfn, 413int qed_sp_init_request(struct qed_hwfn *p_hwfn,
400 struct qed_spq_entry **pp_ent, 414 struct qed_spq_entry **pp_ent,
401 u8 cmd, 415 u8 cmd,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 77b6248ad3b9..888274fa208b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -47,6 +47,19 @@
47#include "qed_sp.h" 47#include "qed_sp.h"
48#include "qed_sriov.h" 48#include "qed_sriov.h"
49 49
50void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
51 struct qed_spq_entry *p_ent)
52{
53 /* qed_spq_get_entry() can either get an entry from the free_pool,
54 * or, if no entries are left, allocate a new entry and add it to
55 * the unlimited_pending list.
56 */
57 if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
58 kfree(p_ent);
59 else
60 qed_spq_return_entry(p_hwfn, p_ent);
61}
62
50int qed_sp_init_request(struct qed_hwfn *p_hwfn, 63int qed_sp_init_request(struct qed_hwfn *p_hwfn,
51 struct qed_spq_entry **pp_ent, 64 struct qed_spq_entry **pp_ent,
52 u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) 65 u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
80 93
81 case QED_SPQ_MODE_BLOCK: 94 case QED_SPQ_MODE_BLOCK:
82 if (!p_data->p_comp_data) 95 if (!p_data->p_comp_data)
83 return -EINVAL; 96 goto err;
84 97
85 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; 98 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
86 break; 99 break;
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
95 default: 108 default:
96 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", 109 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
97 p_ent->comp_mode); 110 p_ent->comp_mode);
98 return -EINVAL; 111 goto err;
99 } 112 }
100 113
101 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 114 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
109 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); 122 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
110 123
111 return 0; 124 return 0;
125
126err:
127 qed_sp_destroy_request(p_hwfn, p_ent);
128
129 return -EINVAL;
112} 130}
113 131
114static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) 132static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index c4a6274dd625..0a9c5bb0fa48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
142 142
143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); 143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144 rc = qed_mcp_drain(p_hwfn, p_ptt); 144 rc = qed_mcp_drain(p_hwfn, p_ptt);
145 qed_ptt_release(p_hwfn, p_ptt);
145 if (rc) { 146 if (rc) {
146 DP_NOTICE(p_hwfn, "MCP drain failed\n"); 147 DP_NOTICE(p_hwfn, "MCP drain failed\n");
147 goto err; 148 goto err;
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
150 /* Retry after drain */ 151 /* Retry after drain */
151 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); 152 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
152 if (!rc) 153 if (!rc)
153 goto out; 154 return 0;
154 155
155 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; 156 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
156 if (comp_done->done == 1) 157 if (comp_done->done == 1) {
157 if (p_fw_ret) 158 if (p_fw_ret)
158 *p_fw_ret = comp_done->fw_return_code; 159 *p_fw_ret = comp_done->fw_return_code;
159out: 160 return 0;
160 qed_ptt_release(p_hwfn, p_ptt); 161 }
161 return 0;
162
163err: 162err:
164 qed_ptt_release(p_hwfn, p_ptt);
165 DP_NOTICE(p_hwfn, 163 DP_NOTICE(p_hwfn,
166 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", 164 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
167 le32_to_cpu(p_ent->elem.hdr.cid), 165 le32_to_cpu(p_ent->elem.hdr.cid),
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
685 /* EBLOCK responsible to free the allocated p_ent */ 683 /* EBLOCK responsible to free the allocated p_ent */
686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) 684 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
687 kfree(p_ent); 685 kfree(p_ent);
686 else
687 p_ent->post_ent = p_en2;
688 688
689 p_ent = p_en2; 689 p_ent = p_en2;
690 } 690 }
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
767 SPQ_HIGH_PRI_RESERVE_DEFAULT); 767 SPQ_HIGH_PRI_RESERVE_DEFAULT);
768} 768}
769 769
770/* Avoid overriding of SPQ entries when getting out-of-order completions, by
771 * marking the completions in a bitmap and increasing the chain consumer only
772 * for the first successive completed entries.
773 */
774static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
775{
776 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
777 struct qed_spq *p_spq = p_hwfn->p_spq;
778
779 __set_bit(pos, p_spq->p_comp_bitmap);
780 while (test_bit(p_spq->comp_bitmap_idx,
781 p_spq->p_comp_bitmap)) {
782 __clear_bit(p_spq->comp_bitmap_idx,
783 p_spq->p_comp_bitmap);
784 p_spq->comp_bitmap_idx++;
785 qed_chain_return_produced(&p_spq->chain);
786 }
787}
788
770int qed_spq_post(struct qed_hwfn *p_hwfn, 789int qed_spq_post(struct qed_hwfn *p_hwfn,
771 struct qed_spq_entry *p_ent, u8 *fw_return_code) 790 struct qed_spq_entry *p_ent, u8 *fw_return_code)
772{ 791{
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
824 p_ent->queue == &p_spq->unlimited_pending); 843 p_ent->queue == &p_spq->unlimited_pending);
825 844
826 if (p_ent->queue == &p_spq->unlimited_pending) { 845 if (p_ent->queue == &p_spq->unlimited_pending) {
827 /* This is an allocated p_ent which does not need to 846 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
828 * return to pool. 847
829 */
830 kfree(p_ent); 848 kfree(p_ent);
831 return rc; 849
850 /* Return the entry which was actually posted */
851 p_ent = p_post_ent;
832 } 852 }
833 853
834 if (rc) 854 if (rc)
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
842spq_post_fail2: 862spq_post_fail2:
843 spin_lock_bh(&p_spq->lock); 863 spin_lock_bh(&p_spq->lock);
844 list_del(&p_ent->list); 864 list_del(&p_ent->list);
845 qed_chain_return_produced(&p_spq->chain); 865 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
846 866
847spq_post_fail: 867spq_post_fail:
848 /* return to the free pool */ 868 /* return to the free pool */
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
874 spin_lock_bh(&p_spq->lock); 894 spin_lock_bh(&p_spq->lock);
875 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { 895 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
876 if (p_ent->elem.hdr.echo == echo) { 896 if (p_ent->elem.hdr.echo == echo) {
877 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
878
879 list_del(&p_ent->list); 897 list_del(&p_ent->list);
880 898 qed_spq_comp_bmap_update(p_hwfn, echo);
881 /* Avoid overriding of SPQ entries when getting
882 * out-of-order completions, by marking the completions
883 * in a bitmap and increasing the chain consumer only
884 * for the first successive completed entries.
885 */
886 __set_bit(pos, p_spq->p_comp_bitmap);
887
888 while (test_bit(p_spq->comp_bitmap_idx,
889 p_spq->p_comp_bitmap)) {
890 __clear_bit(p_spq->comp_bitmap_idx,
891 p_spq->p_comp_bitmap);
892 p_spq->comp_bitmap_idx++;
893 qed_chain_return_produced(&p_spq->chain);
894 }
895
896 p_spq->comp_count++; 899 p_spq->comp_count++;
897 found = p_ent; 900 found = p_ent;
898 break; 901 break;
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
931 QED_MSG_SPQ, 934 QED_MSG_SPQ,
932 "Got a completion without a callback function\n"); 935 "Got a completion without a callback function\n");
933 936
934 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || 937 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
935 (found->queue == &p_spq->unlimited_pending))
936 /* EBLOCK is responsible for returning its own entry into the 938 /* EBLOCK is responsible for returning its own entry into the
937 * free list, unless it originally added the entry into the 939 * free list.
938 * unlimited pending list.
939 */ 940 */
940 qed_spq_return_entry(p_hwfn, found); 941 qed_spq_return_entry(p_hwfn, found);
941 942
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9b08a9d9e151..ca6290fa0f30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
101 default: 101 default:
102 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 102 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
103 p_hwfn->hw_info.personality); 103 p_hwfn->hw_info.personality);
104 qed_sp_destroy_request(p_hwfn, p_ent);
104 return -EINVAL; 105 return -EINVAL;
105 } 106 }
106 107
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 9647578cbe6a..14f26bf3b388 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
459 struct cmd_desc_type0 *first_desc, struct sk_buff *skb, 459 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
460 struct qlcnic_host_tx_ring *tx_ring) 460 struct qlcnic_host_tx_ring *tx_ring)
461{ 461{
462 u8 l4proto, opcode = 0, hdr_len = 0; 462 u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
463 u16 flags = 0, vlan_tci = 0; 463 u16 flags = 0, vlan_tci = 0;
464 int copied, offset, copy_len, size; 464 int copied, offset, copy_len, size;
465 struct cmd_desc_type0 *hwdesc; 465 struct cmd_desc_type0 *hwdesc;
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
472 flags = QLCNIC_FLAGS_VLAN_TAGGED; 472 flags = QLCNIC_FLAGS_VLAN_TAGGED;
473 vlan_tci = ntohs(vh->h_vlan_TCI); 473 vlan_tci = ntohs(vh->h_vlan_TCI);
474 protocol = ntohs(vh->h_vlan_encapsulated_proto); 474 protocol = ntohs(vh->h_vlan_encapsulated_proto);
475 tag_vlan = 1;
475 } else if (skb_vlan_tag_present(skb)) { 476 } else if (skb_vlan_tag_present(skb)) {
476 flags = QLCNIC_FLAGS_VLAN_OOB; 477 flags = QLCNIC_FLAGS_VLAN_OOB;
477 vlan_tci = skb_vlan_tag_get(skb); 478 vlan_tci = skb_vlan_tag_get(skb);
479 tag_vlan = 1;
478 } 480 }
479 if (unlikely(adapter->tx_pvid)) { 481 if (unlikely(adapter->tx_pvid)) {
480 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 482 if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
481 return -EIO; 483 return -EIO;
482 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 484 if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
483 goto set_flags; 485 goto set_flags;
484 486
485 flags = QLCNIC_FLAGS_VLAN_OOB; 487 flags = QLCNIC_FLAGS_VLAN_OOB;