diff options
author | Frank Li <Frank.Li@freescale.com> | 2013-01-03 11:04:23 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-04 18:15:39 -0500 |
commit | ff43da86c69d76a726ffe7d1666148960dc1d108 (patch) | |
tree | cb043ff99ad1a7df3b51e79907734173ea93d817 /drivers/net/ethernet/freescale/fec.c | |
parent | 579e1d816260d9f66cb63e4c2911794c9970c293 (diff) |
NET: FEC: dynamtic check DMA desc buff type
MX6 and mx28 support enhanced DMA descriptor buff to support 1588
ptp. But MX25, MX3x, MX5x can't support enhanced DMA descriptor buff.
Check fec type and choose correct DMA descriptor buff type.
Remove static config CONFIG_FEC_PTP.
ptp function will be auto detected.
Signed-off-by: Frank Li <Frank.Li@freescale.com>
Acked-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale/fec.c')
-rw-r--r-- | drivers/net/ethernet/freescale/fec.c | 175 |
1 files changed, 108 insertions, 67 deletions
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 0704bcab178a..a3793190437f 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -76,6 +76,8 @@ | |||
76 | #define FEC_QUIRK_USE_GASKET (1 << 2) | 76 | #define FEC_QUIRK_USE_GASKET (1 << 2) |
77 | /* Controller has GBIT support */ | 77 | /* Controller has GBIT support */ |
78 | #define FEC_QUIRK_HAS_GBIT (1 << 3) | 78 | #define FEC_QUIRK_HAS_GBIT (1 << 3) |
79 | /* Controller has extend desc buffer */ | ||
80 | #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) | ||
79 | 81 | ||
80 | static struct platform_device_id fec_devtype[] = { | 82 | static struct platform_device_id fec_devtype[] = { |
81 | { | 83 | { |
@@ -93,7 +95,8 @@ static struct platform_device_id fec_devtype[] = { | |||
93 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | 95 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, |
94 | }, { | 96 | }, { |
95 | .name = "imx6q-fec", | 97 | .name = "imx6q-fec", |
96 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT, | 98 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
99 | FEC_QUIRK_HAS_BUFDESC_EX, | ||
97 | }, { | 100 | }, { |
98 | /* sentinel */ | 101 | /* sentinel */ |
99 | } | 102 | } |
@@ -140,7 +143,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |||
140 | #endif | 143 | #endif |
141 | #endif /* CONFIG_M5272 */ | 144 | #endif /* CONFIG_M5272 */ |
142 | 145 | ||
143 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) | 146 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE) |
144 | #error "FEC: descriptor ring size constants too large" | 147 | #error "FEC: descriptor ring size constants too large" |
145 | #endif | 148 | #endif |
146 | 149 | ||
@@ -192,6 +195,24 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |||
192 | 195 | ||
193 | static int mii_cnt; | 196 | static int mii_cnt; |
194 | 197 | ||
198 | static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) | ||
199 | { | ||
200 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | ||
201 | if (is_ex) | ||
202 | return (struct bufdesc *)(ex + 1); | ||
203 | else | ||
204 | return bdp + 1; | ||
205 | } | ||
206 | |||
207 | static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) | ||
208 | { | ||
209 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | ||
210 | if (is_ex) | ||
211 | return (struct bufdesc *)(ex - 1); | ||
212 | else | ||
213 | return bdp - 1; | ||
214 | } | ||
215 | |||
195 | static void *swap_buffer(void *bufaddr, int len) | 216 | static void *swap_buffer(void *bufaddr, int len) |
196 | { | 217 | { |
197 | int i; | 218 | int i; |
@@ -248,7 +269,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
248 | */ | 269 | */ |
249 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { | 270 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { |
250 | unsigned int index; | 271 | unsigned int index; |
251 | index = bdp - fep->tx_bd_base; | 272 | if (fep->bufdesc_ex) |
273 | index = (struct bufdesc_ex *)bdp - | ||
274 | (struct bufdesc_ex *)fep->tx_bd_base; | ||
275 | else | ||
276 | index = bdp - fep->tx_bd_base; | ||
252 | memcpy(fep->tx_bounce[index], skb->data, skb->len); | 277 | memcpy(fep->tx_bounce[index], skb->data, skb->len); |
253 | bufaddr = fep->tx_bounce[index]; | 278 | bufaddr = fep->tx_bounce[index]; |
254 | } | 279 | } |
@@ -280,17 +305,19 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
280 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | 305 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
281 | bdp->cbd_sc = status; | 306 | bdp->cbd_sc = status; |
282 | 307 | ||
283 | #ifdef CONFIG_FEC_PTP | 308 | if (fep->bufdesc_ex) { |
284 | bdp->cbd_bdu = 0; | 309 | |
285 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | 310 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
311 | ebdp->cbd_bdu = 0; | ||
312 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | ||
286 | fep->hwts_tx_en)) { | 313 | fep->hwts_tx_en)) { |
287 | bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); | 314 | ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); |
288 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 315 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
289 | } else { | 316 | } else { |
290 | 317 | ||
291 | bdp->cbd_esc = BD_ENET_TX_INT; | 318 | ebdp->cbd_esc = BD_ENET_TX_INT; |
319 | } | ||
292 | } | 320 | } |
293 | #endif | ||
294 | /* Trigger transmission start */ | 321 | /* Trigger transmission start */ |
295 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 322 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
296 | 323 | ||
@@ -298,7 +325,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
298 | if (status & BD_ENET_TX_WRAP) | 325 | if (status & BD_ENET_TX_WRAP) |
299 | bdp = fep->tx_bd_base; | 326 | bdp = fep->tx_bd_base; |
300 | else | 327 | else |
301 | bdp++; | 328 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
302 | 329 | ||
303 | if (bdp == fep->dirty_tx) { | 330 | if (bdp == fep->dirty_tx) { |
304 | fep->tx_full = 1; | 331 | fep->tx_full = 1; |
@@ -359,8 +386,12 @@ fec_restart(struct net_device *ndev, int duplex) | |||
359 | 386 | ||
360 | /* Set receive and transmit descriptor base. */ | 387 | /* Set receive and transmit descriptor base. */ |
361 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 388 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
362 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, | 389 | if (fep->bufdesc_ex) |
363 | fep->hwp + FEC_X_DES_START); | 390 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) |
391 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | ||
392 | else | ||
393 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | ||
394 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | ||
364 | 395 | ||
365 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | 396 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; |
366 | fep->cur_rx = fep->rx_bd_base; | 397 | fep->cur_rx = fep->rx_bd_base; |
@@ -448,17 +479,16 @@ fec_restart(struct net_device *ndev, int duplex) | |||
448 | writel(1 << 8, fep->hwp + FEC_X_WMRK); | 479 | writel(1 << 8, fep->hwp + FEC_X_WMRK); |
449 | } | 480 | } |
450 | 481 | ||
451 | #ifdef CONFIG_FEC_PTP | 482 | if (fep->bufdesc_ex) |
452 | ecntl |= (1 << 4); | 483 | ecntl |= (1 << 4); |
453 | #endif | ||
454 | 484 | ||
455 | /* And last, enable the transmit and receive processing */ | 485 | /* And last, enable the transmit and receive processing */ |
456 | writel(ecntl, fep->hwp + FEC_ECNTRL); | 486 | writel(ecntl, fep->hwp + FEC_ECNTRL); |
457 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 487 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); |
458 | 488 | ||
459 | #ifdef CONFIG_FEC_PTP | 489 | if (fep->bufdesc_ex) |
460 | fec_ptp_start_cyclecounter(ndev); | 490 | fec_ptp_start_cyclecounter(ndev); |
461 | #endif | 491 | |
462 | /* Enable interrupts we wish to service */ | 492 | /* Enable interrupts we wish to service */ |
463 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | 493 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); |
464 | } | 494 | } |
@@ -544,19 +574,20 @@ fec_enet_tx(struct net_device *ndev) | |||
544 | ndev->stats.tx_packets++; | 574 | ndev->stats.tx_packets++; |
545 | } | 575 | } |
546 | 576 | ||
547 | #ifdef CONFIG_FEC_PTP | 577 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && |
548 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { | 578 | fep->bufdesc_ex) { |
549 | struct skb_shared_hwtstamps shhwtstamps; | 579 | struct skb_shared_hwtstamps shhwtstamps; |
550 | unsigned long flags; | 580 | unsigned long flags; |
581 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | ||
551 | 582 | ||
552 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | 583 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
553 | spin_lock_irqsave(&fep->tmreg_lock, flags); | 584 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
554 | shhwtstamps.hwtstamp = ns_to_ktime( | 585 | shhwtstamps.hwtstamp = ns_to_ktime( |
555 | timecounter_cyc2time(&fep->tc, bdp->ts)); | 586 | timecounter_cyc2time(&fep->tc, ebdp->ts)); |
556 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | 587 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); |
557 | skb_tstamp_tx(skb, &shhwtstamps); | 588 | skb_tstamp_tx(skb, &shhwtstamps); |
558 | } | 589 | } |
559 | #endif | 590 | |
560 | if (status & BD_ENET_TX_READY) | 591 | if (status & BD_ENET_TX_READY) |
561 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); | 592 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); |
562 | 593 | ||
@@ -575,7 +606,7 @@ fec_enet_tx(struct net_device *ndev) | |||
575 | if (status & BD_ENET_TX_WRAP) | 606 | if (status & BD_ENET_TX_WRAP) |
576 | bdp = fep->tx_bd_base; | 607 | bdp = fep->tx_bd_base; |
577 | else | 608 | else |
578 | bdp++; | 609 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
579 | 610 | ||
580 | /* Since we have freed up a buffer, the ring is no longer full | 611 | /* Since we have freed up a buffer, the ring is no longer full |
581 | */ | 612 | */ |
@@ -683,21 +714,23 @@ fec_enet_rx(struct net_device *ndev) | |||
683 | skb_put(skb, pkt_len - 4); /* Make room */ | 714 | skb_put(skb, pkt_len - 4); /* Make room */ |
684 | skb_copy_to_linear_data(skb, data, pkt_len - 4); | 715 | skb_copy_to_linear_data(skb, data, pkt_len - 4); |
685 | skb->protocol = eth_type_trans(skb, ndev); | 716 | skb->protocol = eth_type_trans(skb, ndev); |
686 | #ifdef CONFIG_FEC_PTP | 717 | |
687 | /* Get receive timestamp from the skb */ | 718 | /* Get receive timestamp from the skb */ |
688 | if (fep->hwts_rx_en) { | 719 | if (fep->hwts_rx_en && fep->bufdesc_ex) { |
689 | struct skb_shared_hwtstamps *shhwtstamps = | 720 | struct skb_shared_hwtstamps *shhwtstamps = |
690 | skb_hwtstamps(skb); | 721 | skb_hwtstamps(skb); |
691 | unsigned long flags; | 722 | unsigned long flags; |
723 | struct bufdesc_ex *ebdp = | ||
724 | (struct bufdesc_ex *)bdp; | ||
692 | 725 | ||
693 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | 726 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
694 | 727 | ||
695 | spin_lock_irqsave(&fep->tmreg_lock, flags); | 728 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
696 | shhwtstamps->hwtstamp = ns_to_ktime( | 729 | shhwtstamps->hwtstamp = ns_to_ktime( |
697 | timecounter_cyc2time(&fep->tc, bdp->ts)); | 730 | timecounter_cyc2time(&fep->tc, ebdp->ts)); |
698 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | 731 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); |
699 | } | 732 | } |
700 | #endif | 733 | |
701 | if (!skb_defer_rx_timestamp(skb)) | 734 | if (!skb_defer_rx_timestamp(skb)) |
702 | netif_rx(skb); | 735 | netif_rx(skb); |
703 | } | 736 | } |
@@ -712,17 +745,19 @@ rx_processing_done: | |||
712 | status |= BD_ENET_RX_EMPTY; | 745 | status |= BD_ENET_RX_EMPTY; |
713 | bdp->cbd_sc = status; | 746 | bdp->cbd_sc = status; |
714 | 747 | ||
715 | #ifdef CONFIG_FEC_PTP | 748 | if (fep->bufdesc_ex) { |
716 | bdp->cbd_esc = BD_ENET_RX_INT; | 749 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
717 | bdp->cbd_prot = 0; | 750 | |
718 | bdp->cbd_bdu = 0; | 751 | ebdp->cbd_esc = BD_ENET_RX_INT; |
719 | #endif | 752 | ebdp->cbd_prot = 0; |
753 | ebdp->cbd_bdu = 0; | ||
754 | } | ||
720 | 755 | ||
721 | /* Update BD pointer to next entry */ | 756 | /* Update BD pointer to next entry */ |
722 | if (status & BD_ENET_RX_WRAP) | 757 | if (status & BD_ENET_RX_WRAP) |
723 | bdp = fep->rx_bd_base; | 758 | bdp = fep->rx_bd_base; |
724 | else | 759 | else |
725 | bdp++; | 760 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
726 | /* Doing this here will keep the FEC running while we process | 761 | /* Doing this here will keep the FEC running while we process |
727 | * incoming frames. On a heavily loaded network, we should be | 762 | * incoming frames. On a heavily loaded network, we should be |
728 | * able to keep up at the expense of system resources. | 763 | * able to keep up at the expense of system resources. |
@@ -1157,10 +1192,9 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | |||
1157 | if (!phydev) | 1192 | if (!phydev) |
1158 | return -ENODEV; | 1193 | return -ENODEV; |
1159 | 1194 | ||
1160 | #ifdef CONFIG_FEC_PTP | 1195 | if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex) |
1161 | if (cmd == SIOCSHWTSTAMP) | ||
1162 | return fec_ptp_ioctl(ndev, rq, cmd); | 1196 | return fec_ptp_ioctl(ndev, rq, cmd); |
1163 | #endif | 1197 | |
1164 | return phy_mii_ioctl(phydev, rq, cmd); | 1198 | return phy_mii_ioctl(phydev, rq, cmd); |
1165 | } | 1199 | } |
1166 | 1200 | ||
@@ -1180,7 +1214,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
1180 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1214 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
1181 | if (skb) | 1215 | if (skb) |
1182 | dev_kfree_skb(skb); | 1216 | dev_kfree_skb(skb); |
1183 | bdp++; | 1217 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
1184 | } | 1218 | } |
1185 | 1219 | ||
1186 | bdp = fep->tx_bd_base; | 1220 | bdp = fep->tx_bd_base; |
@@ -1207,14 +1241,17 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1207 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, | 1241 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, |
1208 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1242 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
1209 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 1243 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
1210 | #ifdef CONFIG_FEC_PTP | 1244 | |
1211 | bdp->cbd_esc = BD_ENET_RX_INT; | 1245 | if (fep->bufdesc_ex) { |
1212 | #endif | 1246 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
1213 | bdp++; | 1247 | ebdp->cbd_esc = BD_ENET_RX_INT; |
1248 | } | ||
1249 | |||
1250 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1214 | } | 1251 | } |
1215 | 1252 | ||
1216 | /* Set the last buffer to wrap. */ | 1253 | /* Set the last buffer to wrap. */ |
1217 | bdp--; | 1254 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); |
1218 | bdp->cbd_sc |= BD_SC_WRAP; | 1255 | bdp->cbd_sc |= BD_SC_WRAP; |
1219 | 1256 | ||
1220 | bdp = fep->tx_bd_base; | 1257 | bdp = fep->tx_bd_base; |
@@ -1224,14 +1261,16 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1224 | bdp->cbd_sc = 0; | 1261 | bdp->cbd_sc = 0; |
1225 | bdp->cbd_bufaddr = 0; | 1262 | bdp->cbd_bufaddr = 0; |
1226 | 1263 | ||
1227 | #ifdef CONFIG_FEC_PTP | 1264 | if (fep->bufdesc_ex) { |
1228 | bdp->cbd_esc = BD_ENET_RX_INT; | 1265 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
1229 | #endif | 1266 | ebdp->cbd_esc = BD_ENET_RX_INT; |
1230 | bdp++; | 1267 | } |
1268 | |||
1269 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1231 | } | 1270 | } |
1232 | 1271 | ||
1233 | /* Set the last buffer to wrap. */ | 1272 | /* Set the last buffer to wrap. */ |
1234 | bdp--; | 1273 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); |
1235 | bdp->cbd_sc |= BD_SC_WRAP; | 1274 | bdp->cbd_sc |= BD_SC_WRAP; |
1236 | 1275 | ||
1237 | return 0; | 1276 | return 0; |
@@ -1444,7 +1483,11 @@ static int fec_enet_init(struct net_device *ndev) | |||
1444 | 1483 | ||
1445 | /* Set receive and transmit descriptor base. */ | 1484 | /* Set receive and transmit descriptor base. */ |
1446 | fep->rx_bd_base = cbd_base; | 1485 | fep->rx_bd_base = cbd_base; |
1447 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | 1486 | if (fep->bufdesc_ex) |
1487 | fep->tx_bd_base = (struct bufdesc *) | ||
1488 | (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); | ||
1489 | else | ||
1490 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | ||
1448 | 1491 | ||
1449 | /* The FEC Ethernet specific entries in the device structure */ | 1492 | /* The FEC Ethernet specific entries in the device structure */ |
1450 | ndev->watchdog_timeo = TX_TIMEOUT; | 1493 | ndev->watchdog_timeo = TX_TIMEOUT; |
@@ -1457,11 +1500,11 @@ static int fec_enet_init(struct net_device *ndev) | |||
1457 | 1500 | ||
1458 | /* Initialize the BD for every fragment in the page. */ | 1501 | /* Initialize the BD for every fragment in the page. */ |
1459 | bdp->cbd_sc = 0; | 1502 | bdp->cbd_sc = 0; |
1460 | bdp++; | 1503 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
1461 | } | 1504 | } |
1462 | 1505 | ||
1463 | /* Set the last buffer to wrap */ | 1506 | /* Set the last buffer to wrap */ |
1464 | bdp--; | 1507 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); |
1465 | bdp->cbd_sc |= BD_SC_WRAP; | 1508 | bdp->cbd_sc |= BD_SC_WRAP; |
1466 | 1509 | ||
1467 | /* ...and the same for transmit */ | 1510 | /* ...and the same for transmit */ |
@@ -1471,11 +1514,11 @@ static int fec_enet_init(struct net_device *ndev) | |||
1471 | /* Initialize the BD for every fragment in the page. */ | 1514 | /* Initialize the BD for every fragment in the page. */ |
1472 | bdp->cbd_sc = 0; | 1515 | bdp->cbd_sc = 0; |
1473 | bdp->cbd_bufaddr = 0; | 1516 | bdp->cbd_bufaddr = 0; |
1474 | bdp++; | 1517 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
1475 | } | 1518 | } |
1476 | 1519 | ||
1477 | /* Set the last buffer to wrap */ | 1520 | /* Set the last buffer to wrap */ |
1478 | bdp--; | 1521 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); |
1479 | bdp->cbd_sc |= BD_SC_WRAP; | 1522 | bdp->cbd_sc |= BD_SC_WRAP; |
1480 | 1523 | ||
1481 | fec_restart(ndev, 0); | 1524 | fec_restart(ndev, 0); |
@@ -1574,6 +1617,8 @@ fec_probe(struct platform_device *pdev) | |||
1574 | fep->pdev = pdev; | 1617 | fep->pdev = pdev; |
1575 | fep->dev_id = dev_id++; | 1618 | fep->dev_id = dev_id++; |
1576 | 1619 | ||
1620 | fep->bufdesc_ex = 0; | ||
1621 | |||
1577 | if (!fep->hwp) { | 1622 | if (!fep->hwp) { |
1578 | ret = -ENOMEM; | 1623 | ret = -ENOMEM; |
1579 | goto failed_ioremap; | 1624 | goto failed_ioremap; |
@@ -1628,19 +1673,19 @@ fec_probe(struct platform_device *pdev) | |||
1628 | goto failed_clk; | 1673 | goto failed_clk; |
1629 | } | 1674 | } |
1630 | 1675 | ||
1631 | #ifdef CONFIG_FEC_PTP | ||
1632 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); | 1676 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); |
1677 | fep->bufdesc_ex = | ||
1678 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; | ||
1633 | if (IS_ERR(fep->clk_ptp)) { | 1679 | if (IS_ERR(fep->clk_ptp)) { |
1634 | ret = PTR_ERR(fep->clk_ptp); | 1680 | ret = PTR_ERR(fep->clk_ptp); |
1635 | goto failed_clk; | 1681 | fep->bufdesc_ex = 0; |
1636 | } | 1682 | } |
1637 | #endif | ||
1638 | 1683 | ||
1639 | clk_prepare_enable(fep->clk_ahb); | 1684 | clk_prepare_enable(fep->clk_ahb); |
1640 | clk_prepare_enable(fep->clk_ipg); | 1685 | clk_prepare_enable(fep->clk_ipg); |
1641 | #ifdef CONFIG_FEC_PTP | 1686 | if (!IS_ERR(fep->clk_ptp)) |
1642 | clk_prepare_enable(fep->clk_ptp); | 1687 | clk_prepare_enable(fep->clk_ptp); |
1643 | #endif | 1688 | |
1644 | reg_phy = devm_regulator_get(&pdev->dev, "phy"); | 1689 | reg_phy = devm_regulator_get(&pdev->dev, "phy"); |
1645 | if (!IS_ERR(reg_phy)) { | 1690 | if (!IS_ERR(reg_phy)) { |
1646 | ret = regulator_enable(reg_phy); | 1691 | ret = regulator_enable(reg_phy); |
@@ -1668,9 +1713,8 @@ fec_probe(struct platform_device *pdev) | |||
1668 | if (ret) | 1713 | if (ret) |
1669 | goto failed_register; | 1714 | goto failed_register; |
1670 | 1715 | ||
1671 | #ifdef CONFIG_FEC_PTP | 1716 | if (fep->bufdesc_ex) |
1672 | fec_ptp_init(ndev, pdev); | 1717 | fec_ptp_init(ndev, pdev); |
1673 | #endif | ||
1674 | 1718 | ||
1675 | return 0; | 1719 | return 0; |
1676 | 1720 | ||
@@ -1681,9 +1725,8 @@ failed_init: | |||
1681 | failed_regulator: | 1725 | failed_regulator: |
1682 | clk_disable_unprepare(fep->clk_ahb); | 1726 | clk_disable_unprepare(fep->clk_ahb); |
1683 | clk_disable_unprepare(fep->clk_ipg); | 1727 | clk_disable_unprepare(fep->clk_ipg); |
1684 | #ifdef CONFIG_FEC_PTP | 1728 | if (!IS_ERR(fep->clk_ptp)) |
1685 | clk_disable_unprepare(fep->clk_ptp); | 1729 | clk_disable_unprepare(fep->clk_ptp); |
1686 | #endif | ||
1687 | failed_pin: | 1730 | failed_pin: |
1688 | failed_clk: | 1731 | failed_clk: |
1689 | for (i = 0; i < FEC_IRQ_NUM; i++) { | 1732 | for (i = 0; i < FEC_IRQ_NUM; i++) { |
@@ -1716,12 +1759,10 @@ fec_drv_remove(struct platform_device *pdev) | |||
1716 | if (irq > 0) | 1759 | if (irq > 0) |
1717 | free_irq(irq, ndev); | 1760 | free_irq(irq, ndev); |
1718 | } | 1761 | } |
1719 | #ifdef CONFIG_FEC_PTP | ||
1720 | del_timer_sync(&fep->time_keep); | 1762 | del_timer_sync(&fep->time_keep); |
1721 | clk_disable_unprepare(fep->clk_ptp); | 1763 | clk_disable_unprepare(fep->clk_ptp); |
1722 | if (fep->ptp_clock) | 1764 | if (fep->ptp_clock) |
1723 | ptp_clock_unregister(fep->ptp_clock); | 1765 | ptp_clock_unregister(fep->ptp_clock); |
1724 | #endif | ||
1725 | clk_disable_unprepare(fep->clk_ahb); | 1766 | clk_disable_unprepare(fep->clk_ahb); |
1726 | clk_disable_unprepare(fep->clk_ipg); | 1767 | clk_disable_unprepare(fep->clk_ipg); |
1727 | iounmap(fep->hwp); | 1768 | iounmap(fep->hwp); |