diff options
author | Fugang Duan <B38611@freescale.com> | 2014-03-17 04:03:06 -0400 |
---|---|---|
committer | Nitin Garg <nitin.garg@freescale.com> | 2014-04-16 09:58:00 -0400 |
commit | 4cc4ba7e154ec99211a68c818dd82b3e3fb0d13b (patch) | |
tree | 50abd3129e8dd77c8016f3952d8742f58df67058 | |
parent | b63ce9a8b3bb1fa14090ea26b4b63f1260ea6f8d (diff) |
net: fec: fix the error to get the previous BD entry
Bug: error to get the previous BD entry. When the current BD
is the first BD, the previous BD entry must be the last BD,
not "bdp - 1" in current logic.
V4:
* Optimize fec_enet_get_nextdesc() for code clean.
Replace "ex_new_bd - ring_size" with "ex_base".
Replace "new_bd - ring_size" with "base".
V3:
* Restore the API name because David suggest to use fec_enet_
prefix for all function in fec driver.
So, change next_bd() -> fec_enet_get_nextdesc()
change pre_bd() -> fec_enet_get_prevdesc()
* Reduce the two APIs parameters for easy to call.
V2:
* Add tx_ring_size and rx_ring_size to struct fec_enet_private.
* Replace api fec_enet_get_nextdesc() with next_bd().
Replace api fec_enet_get_prevdesc() with pre_bd().
* Move all ring size check logic to next_bd() and pre_bd(), which
simplifies the code redundancy.
V1:
* Add BD ring size check to get the previous BD entry in correctly.
Reviewed-by: Li Frank <B20596@freescale.com>
Signed-off-by: Fugang Duan <B38611@freescale.com>
Acked-by: Frank Li <frank.li@freescale.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 102 |
1 files changed, 66 insertions, 36 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f49f43fef2aa..c0a531db1dbf 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -214,22 +214,63 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |||
214 | 214 | ||
215 | static int mii_cnt; | 215 | static int mii_cnt; |
216 | 216 | ||
217 | static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) | 217 | static inline |
218 | struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, | ||
219 | struct fec_enet_private *fep, int queue_id) | ||
218 | { | 220 | { |
219 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | 221 | struct bufdesc *new_bd = bdp + 1; |
220 | if (is_ex) | 222 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; |
221 | return (struct bufdesc *)(ex + 1); | 223 | struct fec_enet_priv_tx_q *tx_queue = fep->tx_queue[queue_id]; |
224 | struct fec_enet_priv_rx_q *rx_queue = fep->rx_queue[queue_id]; | ||
225 | struct bufdesc_ex *ex_base; | ||
226 | struct bufdesc *base; | ||
227 | int ring_size; | ||
228 | |||
229 | if (bdp >= tx_queue->tx_bd_base) { | ||
230 | base = tx_queue->tx_bd_base; | ||
231 | ring_size = tx_queue->tx_ring_size; | ||
232 | ex_base = (struct bufdesc_ex *)tx_queue->tx_bd_base; | ||
233 | } else { | ||
234 | base = rx_queue->rx_bd_base; | ||
235 | ring_size = rx_queue->rx_ring_size; | ||
236 | ex_base = (struct bufdesc_ex *)rx_queue->rx_bd_base; | ||
237 | } | ||
238 | |||
239 | if (fep->bufdesc_ex) | ||
240 | return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? | ||
241 | ex_base : ex_new_bd); | ||
222 | else | 242 | else |
223 | return bdp + 1; | 243 | return (new_bd >= (base + ring_size)) ? |
244 | base : new_bd; | ||
224 | } | 245 | } |
225 | 246 | ||
226 | static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) | 247 | static inline |
248 | struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, | ||
249 | struct fec_enet_private *fep, int queue_id) | ||
227 | { | 250 | { |
228 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | 251 | struct bufdesc *new_bd = bdp - 1; |
229 | if (is_ex) | 252 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; |
230 | return (struct bufdesc *)(ex - 1); | 253 | struct fec_enet_priv_tx_q *tx_queue = fep->tx_queue[queue_id]; |
254 | struct fec_enet_priv_rx_q *rx_queue = fep->rx_queue[queue_id]; | ||
255 | struct bufdesc_ex *ex_base; | ||
256 | struct bufdesc *base; | ||
257 | int ring_size; | ||
258 | |||
259 | if (bdp >= tx_queue->tx_bd_base) { | ||
260 | base = tx_queue->tx_bd_base; | ||
261 | ring_size = tx_queue->tx_ring_size; | ||
262 | ex_base = (struct bufdesc_ex *)tx_queue->tx_bd_base; | ||
263 | } else { | ||
264 | base = rx_queue->rx_bd_base; | ||
265 | ring_size = rx_queue->rx_ring_size; | ||
266 | ex_base = (struct bufdesc_ex *)rx_queue->rx_bd_base; | ||
267 | } | ||
268 | |||
269 | if (fep->bufdesc_ex) | ||
270 | return (struct bufdesc *)((ex_new_bd < ex_base) ? | ||
271 | (ex_new_bd + ring_size) : ex_new_bd); | ||
231 | else | 272 | else |
232 | return bdp - 1; | 273 | return (new_bd < base) ? (new_bd + ring_size) : new_bd; |
233 | } | 274 | } |
234 | 275 | ||
235 | static void *swap_buffer(void *bufaddr, int len) | 276 | static void *swap_buffer(void *bufaddr, int len) |
@@ -372,7 +413,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
372 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | 413 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
373 | bdp->cbd_sc = status; | 414 | bdp->cbd_sc = status; |
374 | 415 | ||
375 | bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 416 | bdp_pre = fec_enet_get_prevdesc(bdp, fep, queue); |
376 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | 417 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && |
377 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | 418 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { |
378 | fep->delay_work.trig_tx = queue + 1; | 419 | fep->delay_work.trig_tx = queue + 1; |
@@ -381,10 +422,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
381 | } | 422 | } |
382 | 423 | ||
383 | /* If this was the last BD in the ring, start at the beginning again. */ | 424 | /* If this was the last BD in the ring, start at the beginning again. */ |
384 | if (status & BD_ENET_TX_WRAP) | 425 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
385 | bdp = txq->tx_bd_base; | ||
386 | else | ||
387 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
388 | 426 | ||
389 | skb_tx_timestamp(skb); | 427 | skb_tx_timestamp(skb); |
390 | 428 | ||
@@ -426,11 +464,11 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
426 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 464 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
427 | else | 465 | else |
428 | bdp->cbd_sc = 0; | 466 | bdp->cbd_sc = 0; |
429 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 467 | bdp = fec_enet_get_nextdesc(bdp, fep, i); |
430 | } | 468 | } |
431 | 469 | ||
432 | /* Set the last buffer to wrap */ | 470 | /* Set the last buffer to wrap */ |
433 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 471 | bdp = fec_enet_get_prevdesc(bdp, fep, i); |
434 | bdp->cbd_sc |= BD_SC_WRAP; | 472 | bdp->cbd_sc |= BD_SC_WRAP; |
435 | 473 | ||
436 | rx_queue->cur_rx = rx_queue->rx_bd_base; | 474 | rx_queue->cur_rx = rx_queue->rx_bd_base; |
@@ -451,11 +489,11 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
451 | tx_queue->tx_skbuff[i] = NULL; | 489 | tx_queue->tx_skbuff[i] = NULL; |
452 | } | 490 | } |
453 | bdp->cbd_bufaddr = 0; | 491 | bdp->cbd_bufaddr = 0; |
454 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 492 | bdp = fec_enet_get_nextdesc(bdp, fep, i); |
455 | } | 493 | } |
456 | 494 | ||
457 | /* Set the last buffer to wrap */ | 495 | /* Set the last buffer to wrap */ |
458 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 496 | bdp = fec_enet_get_prevdesc(bdp, fep, i); |
459 | bdp->cbd_sc |= BD_SC_WRAP; | 497 | bdp->cbd_sc |= BD_SC_WRAP; |
460 | tx_queue->dirty_tx = bdp; | 498 | tx_queue->dirty_tx = bdp; |
461 | } | 499 | } |
@@ -816,10 +854,7 @@ fec_enet_tx(struct net_device *ndev) | |||
816 | bdp = txq->dirty_tx; | 854 | bdp = txq->dirty_tx; |
817 | 855 | ||
818 | /* get next bdp of dirty_tx */ | 856 | /* get next bdp of dirty_tx */ |
819 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | 857 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
820 | bdp = txq->tx_bd_base; | ||
821 | else | ||
822 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
823 | 858 | ||
824 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 859 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
825 | 860 | ||
@@ -889,10 +924,7 @@ fec_enet_tx(struct net_device *ndev) | |||
889 | txq->dirty_tx = bdp; | 924 | txq->dirty_tx = bdp; |
890 | 925 | ||
891 | /* Update pointer to next buffer descriptor to be transmitted */ | 926 | /* Update pointer to next buffer descriptor to be transmitted */ |
892 | if (status & BD_ENET_TX_WRAP) | 927 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
893 | bdp = txq->tx_bd_base; | ||
894 | else | ||
895 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
896 | 928 | ||
897 | /* Since we have freed up a buffer, the ring is no longer full | 929 | /* Since we have freed up a buffer, the ring is no longer full |
898 | */ | 930 | */ |
@@ -1098,10 +1130,8 @@ rx_processing_done: | |||
1098 | } | 1130 | } |
1099 | 1131 | ||
1100 | /* Update BD pointer to next entry */ | 1132 | /* Update BD pointer to next entry */ |
1101 | if (status & BD_ENET_RX_WRAP) | 1133 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
1102 | bdp = rxq->rx_bd_base; | 1134 | |
1103 | else | ||
1104 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1105 | /* Doing this here will keep the FEC running while we process | 1135 | /* Doing this here will keep the FEC running while we process |
1106 | * incoming frames. On a heavily loaded network, we should be | 1136 | * incoming frames. On a heavily loaded network, we should be |
1107 | * able to keep up at the expense of system resources. | 1137 | * able to keep up at the expense of system resources. |
@@ -1961,7 +1991,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
1961 | DMA_FROM_DEVICE); | 1991 | DMA_FROM_DEVICE); |
1962 | if (skb) | 1992 | if (skb) |
1963 | dev_kfree_skb(skb); | 1993 | dev_kfree_skb(skb); |
1964 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 1994 | bdp = fec_enet_get_nextdesc(bdp, fep, j); |
1965 | } | 1995 | } |
1966 | } | 1996 | } |
1967 | 1997 | ||
@@ -2013,11 +2043,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
2013 | ebdp->cbd_esc = BD_ENET_RX_INT; | 2043 | ebdp->cbd_esc = BD_ENET_RX_INT; |
2014 | } | 2044 | } |
2015 | 2045 | ||
2016 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 2046 | bdp = fec_enet_get_nextdesc(bdp, fep, j); |
2017 | } | 2047 | } |
2018 | 2048 | ||
2019 | /* Set the last buffer to wrap. */ | 2049 | /* Set the last buffer to wrap. */ |
2020 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 2050 | bdp = fec_enet_get_prevdesc(bdp, fep, j); |
2021 | bdp->cbd_sc |= BD_SC_WRAP; | 2051 | bdp->cbd_sc |= BD_SC_WRAP; |
2022 | } | 2052 | } |
2023 | 2053 | ||
@@ -2043,11 +2073,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
2043 | ebdp->cbd_esc = BD_ENET_TX_INT; | 2073 | ebdp->cbd_esc = BD_ENET_TX_INT; |
2044 | } | 2074 | } |
2045 | 2075 | ||
2046 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 2076 | bdp = fec_enet_get_nextdesc(bdp, fep, j); |
2047 | } | 2077 | } |
2048 | 2078 | ||
2049 | /* Set the last buffer to wrap. */ | 2079 | /* Set the last buffer to wrap. */ |
2050 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 2080 | bdp = fec_enet_get_prevdesc(bdp, fep, j); |
2051 | bdp->cbd_sc |= BD_SC_WRAP; | 2081 | bdp->cbd_sc |= BD_SC_WRAP; |
2052 | } | 2082 | } |
2053 | 2083 | ||