diff options
author | Sathya Perla <sathyap@serverengines.com> | 2010-01-22 01:52:08 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-01-22 01:52:08 -0500 |
commit | 26d92f9276a56d55511a427fb70bd70886af647a (patch) | |
tree | dd7ce9164a504badabb7bfbe56c296ad1529d6fc /drivers/net/benet | |
parent | b94b50289622e816adc9f94111cfc2679c80177c (diff) |
be2net: fix bug in rx page posting
Pages are posted to the rxq in such a way that more than one frag
can share the page. The last frag that uses the page unmaps the
page. In the case when a page is not fully used (due to lack of space in rxq)
the last frag that uses the page is not being set as a "last_page_user";
instead, the next frag in the rxq is incorrectly being set.
The fix has also been tested on ppc64 with 64k pages...
Signed-off-by: Sathya Perla <sathyap@serverengines.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet')
-rw-r--r-- | drivers/net/benet/be_main.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 3a1f7902c16d..33ab8c7f14fe 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size) | |||
910 | static void be_post_rx_frags(struct be_adapter *adapter) | 910 | static void be_post_rx_frags(struct be_adapter *adapter) |
911 | { | 911 | { |
912 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; | 912 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; |
913 | struct be_rx_page_info *page_info = NULL; | 913 | struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; |
914 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 914 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
915 | struct page *pagep = NULL; | 915 | struct page *pagep = NULL; |
916 | struct be_eth_rx_d *rxd; | 916 | struct be_eth_rx_d *rxd; |
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter) | |||
941 | rxd = queue_head_node(rxq); | 941 | rxd = queue_head_node(rxq); |
942 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); | 942 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); |
943 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); | 943 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); |
944 | queue_head_inc(rxq); | ||
945 | 944 | ||
946 | /* Any space left in the current big page for another frag? */ | 945 | /* Any space left in the current big page for another frag? */ |
947 | if ((page_offset + rx_frag_size + rx_frag_size) > | 946 | if ((page_offset + rx_frag_size + rx_frag_size) > |
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter) | |||
949 | pagep = NULL; | 948 | pagep = NULL; |
950 | page_info->last_page_user = true; | 949 | page_info->last_page_user = true; |
951 | } | 950 | } |
951 | |||
952 | prev_page_info = page_info; | ||
953 | queue_head_inc(rxq); | ||
952 | page_info = &page_info_tbl[rxq->head]; | 954 | page_info = &page_info_tbl[rxq->head]; |
953 | } | 955 | } |
954 | if (pagep) | 956 | if (pagep) |
955 | page_info->last_page_user = true; | 957 | prev_page_info->last_page_user = true; |
956 | 958 | ||
957 | if (posted) { | 959 | if (posted) { |
958 | atomic_add(posted, &rxq->used); | 960 | atomic_add(posted, &rxq->used); |