aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c57
2 files changed, 33 insertions, 27 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index e95431f6acd4..04259df8a5c2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -167,6 +167,9 @@ struct qed_spq_entry {
167 enum spq_mode comp_mode; 167 enum spq_mode comp_mode;
168 struct qed_spq_comp_cb comp_cb; 168 struct qed_spq_comp_cb comp_cb;
169 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ 169 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
170
171 /* Posted entry for unlimited list entry in EBLOCK mode */
172 struct qed_spq_entry *post_ent;
170}; 173};
171 174
172struct qed_eq { 175struct qed_eq {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index c4a6274dd625..c1a81ec0524b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -685,6 +685,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
685 /* EBLOCK responsible to free the allocated p_ent */ 685 /* EBLOCK responsible to free the allocated p_ent */
686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) 686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
687 kfree(p_ent); 687 kfree(p_ent);
688 else
689 p_ent->post_ent = p_en2;
688 690
689 p_ent = p_en2; 691 p_ent = p_en2;
690 } 692 }
@@ -767,6 +769,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
767 SPQ_HIGH_PRI_RESERVE_DEFAULT); 769 SPQ_HIGH_PRI_RESERVE_DEFAULT);
768} 770}
769 771
772/* Avoid overriding of SPQ entries when getting out-of-order completions, by
773 * marking the completions in a bitmap and increasing the chain consumer only
774 * for the first successive completed entries.
775 */
776static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
777{
778 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
779 struct qed_spq *p_spq = p_hwfn->p_spq;
780
781 __set_bit(pos, p_spq->p_comp_bitmap);
782 while (test_bit(p_spq->comp_bitmap_idx,
783 p_spq->p_comp_bitmap)) {
784 __clear_bit(p_spq->comp_bitmap_idx,
785 p_spq->p_comp_bitmap);
786 p_spq->comp_bitmap_idx++;
787 qed_chain_return_produced(&p_spq->chain);
788 }
789}
790
770int qed_spq_post(struct qed_hwfn *p_hwfn, 791int qed_spq_post(struct qed_hwfn *p_hwfn,
771 struct qed_spq_entry *p_ent, u8 *fw_return_code) 792 struct qed_spq_entry *p_ent, u8 *fw_return_code)
772{ 793{
@@ -824,11 +845,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
824 p_ent->queue == &p_spq->unlimited_pending); 845 p_ent->queue == &p_spq->unlimited_pending);
825 846
826 if (p_ent->queue == &p_spq->unlimited_pending) { 847 if (p_ent->queue == &p_spq->unlimited_pending) {
827 /* This is an allocated p_ent which does not need to 848 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
828 * return to pool. 849
829 */
830 kfree(p_ent); 850 kfree(p_ent);
831 return rc; 851
852 /* Return the entry which was actually posted */
853 p_ent = p_post_ent;
832 } 854 }
833 855
834 if (rc) 856 if (rc)
@@ -842,7 +864,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
842spq_post_fail2: 864spq_post_fail2:
843 spin_lock_bh(&p_spq->lock); 865 spin_lock_bh(&p_spq->lock);
844 list_del(&p_ent->list); 866 list_del(&p_ent->list);
845 qed_chain_return_produced(&p_spq->chain); 867 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
846 868
847spq_post_fail: 869spq_post_fail:
848 /* return to the free pool */ 870 /* return to the free pool */
@@ -874,25 +896,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
874 spin_lock_bh(&p_spq->lock); 896 spin_lock_bh(&p_spq->lock);
875 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { 897 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
876 if (p_ent->elem.hdr.echo == echo) { 898 if (p_ent->elem.hdr.echo == echo) {
877 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
878
879 list_del(&p_ent->list); 899 list_del(&p_ent->list);
880 900 qed_spq_comp_bmap_update(p_hwfn, echo);
881 /* Avoid overriding of SPQ entries when getting
882 * out-of-order completions, by marking the completions
883 * in a bitmap and increasing the chain consumer only
884 * for the first successive completed entries.
885 */
886 __set_bit(pos, p_spq->p_comp_bitmap);
887
888 while (test_bit(p_spq->comp_bitmap_idx,
889 p_spq->p_comp_bitmap)) {
890 __clear_bit(p_spq->comp_bitmap_idx,
891 p_spq->p_comp_bitmap);
892 p_spq->comp_bitmap_idx++;
893 qed_chain_return_produced(&p_spq->chain);
894 }
895
896 p_spq->comp_count++; 901 p_spq->comp_count++;
897 found = p_ent; 902 found = p_ent;
898 break; 903 break;
@@ -931,11 +936,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
931 QED_MSG_SPQ, 936 QED_MSG_SPQ,
932 "Got a completion without a callback function\n"); 937 "Got a completion without a callback function\n");
933 938
934 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || 939 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
935 (found->queue == &p_spq->unlimited_pending))
936 /* EBLOCK is responsible for returning its own entry into the 940 /* EBLOCK is responsible for returning its own entry into the
937 * free list, unless it originally added the entry into the 941 * free list.
938 * unlimited pending list.
939 */ 942 */
940 qed_spq_return_entry(p_hwfn, found); 943 qed_spq_return_entry(p_hwfn, found);
941 944