diff options
author | David S. Miller <davem@davemloft.net> | 2014-09-13 17:32:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-13 17:32:29 -0400 |
commit | 45f85a2565a9ba4ab4dc095d4d32c4cd2a56b168 (patch) | |
tree | 89954e5f2582ff8608aad840d222983e04d70f5a | |
parent | 42f272539487e49c9ea830ad97db41eb9937d5dc (diff) | |
parent | 0afdfe951989aec4528a88213b1e1b1b595feae0 (diff) |
Merge branch 'fec-next'
Frank Li says:
====================
net: fec: imx6sx multiqueue support
These patches enable i.MX6SX multi queue support.
i.MX6SX support 3 queue and AVB feature.
Change from v3 to v4
- use "unsigned int" instead of "unsigned"
Change from v2 to v3
- fixed alignment requirement for ARM and NO-ARM platform
Change from v1 to v2.
- Change num_tx_queue to unsigned int
- Avoid block non-dt platform
- remove call netif_set_real_num_rx_queues
- seperate multi queue patch two part, one is tx and rx handle, with fixed queue 0
then other one is initilized multiqueue
- use two difference alignment for tx and rx path
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | Documentation/devicetree/bindings/net/fsl-fec.txt | 6 | ||||
-rw-r--r-- | arch/arm/boot/dts/imx6sx.dtsi | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fec.h | 154 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 853 |
4 files changed, 756 insertions, 259 deletions
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index 8a2c7b55ec16..0c8775c45798 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt | |||
@@ -16,6 +16,12 @@ Optional properties: | |||
16 | - phy-handle : phandle to the PHY device connected to this device. | 16 | - phy-handle : phandle to the PHY device connected to this device. |
17 | - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. | 17 | - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. |
18 | Use instead of phy-handle. | 18 | Use instead of phy-handle. |
19 | - fsl,num-tx-queues : The property is valid for enet-avb IP, which supports | ||
20 | hw multi queues. Should specify the tx queue number, otherwise set tx queue | ||
21 | number to 1. | ||
22 | - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports | ||
23 | hw multi queues. Should specify the rx queue number, otherwise set rx queue | ||
24 | number to 1. | ||
19 | 25 | ||
20 | Optional subnodes: | 26 | Optional subnodes: |
21 | - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes | 27 | - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes |
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index f4b9da65bc0f..0a03260c1707 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi | |||
@@ -776,6 +776,8 @@ | |||
776 | <&clks IMX6SX_CLK_ENET_PTP>; | 776 | <&clks IMX6SX_CLK_ENET_PTP>; |
777 | clock-names = "ipg", "ahb", "ptp", | 777 | clock-names = "ipg", "ahb", "ptp", |
778 | "enet_clk_ref", "enet_out"; | 778 | "enet_clk_ref", "enet_out"; |
779 | fsl,num-tx-queues=<3>; | ||
780 | fsl,num-rx-queues=<3>; | ||
779 | status = "disabled"; | 781 | status = "disabled"; |
780 | }; | 782 | }; |
781 | 783 | ||
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ee41d98b44b6..b7c77229f1e9 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -27,8 +27,8 @@ | |||
27 | */ | 27 | */ |
28 | #define FEC_IEVENT 0x004 /* Interrupt event reg */ | 28 | #define FEC_IEVENT 0x004 /* Interrupt event reg */ |
29 | #define FEC_IMASK 0x008 /* Interrupt mask reg */ | 29 | #define FEC_IMASK 0x008 /* Interrupt mask reg */ |
30 | #define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ | 30 | #define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ |
31 | #define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ | 31 | #define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ |
32 | #define FEC_ECNTRL 0x024 /* Ethernet control reg */ | 32 | #define FEC_ECNTRL 0x024 /* Ethernet control reg */ |
33 | #define FEC_MII_DATA 0x040 /* MII manage frame reg */ | 33 | #define FEC_MII_DATA 0x040 /* MII manage frame reg */ |
34 | #define FEC_MII_SPEED 0x044 /* MII speed control reg */ | 34 | #define FEC_MII_SPEED 0x044 /* MII speed control reg */ |
@@ -38,6 +38,12 @@ | |||
38 | #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ | 38 | #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ |
39 | #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ | 39 | #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ |
40 | #define FEC_OPD 0x0ec /* Opcode + Pause duration */ | 40 | #define FEC_OPD 0x0ec /* Opcode + Pause duration */ |
41 | #define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */ | ||
42 | #define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */ | ||
43 | #define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */ | ||
44 | #define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ | ||
45 | #define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ | ||
46 | #define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ | ||
41 | #define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ | 47 | #define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ |
42 | #define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ | 48 | #define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ |
43 | #define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ | 49 | #define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ |
@@ -45,14 +51,27 @@ | |||
45 | #define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ | 51 | #define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ |
46 | #define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ | 52 | #define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ |
47 | #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ | 53 | #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ |
48 | #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ | 54 | #define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ |
49 | #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ | 55 | #define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ |
56 | #define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ | ||
57 | #define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ | ||
58 | #define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ | ||
59 | #define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ | ||
50 | #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ | 60 | #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ |
51 | #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ | 61 | #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ |
52 | #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ | 62 | #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ |
53 | #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ | 63 | #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ |
54 | #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ | 64 | #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ |
55 | #define FEC_RACC 0x1C4 /* Receive Accelerator function */ | 65 | #define FEC_RACC 0x1C4 /* Receive Accelerator function */ |
66 | #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ | ||
67 | #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ | ||
68 | #define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ | ||
69 | #define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */ | ||
70 | #define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */ | ||
71 | #define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */ | ||
72 | #define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ | ||
73 | #define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ | ||
74 | #define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ | ||
56 | #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ | 75 | #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ |
57 | #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ | 76 | #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ |
58 | 77 | ||
@@ -233,6 +252,43 @@ struct bufdesc_ex { | |||
233 | /* This device has up to three irqs on some platforms */ | 252 | /* This device has up to three irqs on some platforms */ |
234 | #define FEC_IRQ_NUM 3 | 253 | #define FEC_IRQ_NUM 3 |
235 | 254 | ||
255 | /* Maximum number of queues supported | ||
256 | * ENET with AVB IP can support up to 3 independent tx queues and rx queues. | ||
257 | * User can point the queue number that is less than or equal to 3. | ||
258 | */ | ||
259 | #define FEC_ENET_MAX_TX_QS 3 | ||
260 | #define FEC_ENET_MAX_RX_QS 3 | ||
261 | |||
262 | #define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \ | ||
263 | ((X == 2) ? \ | ||
264 | FEC_R_DES_START_2 : FEC_R_DES_START_0)) | ||
265 | #define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \ | ||
266 | ((X == 2) ? \ | ||
267 | FEC_X_DES_START_2 : FEC_X_DES_START_0)) | ||
268 | #define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \ | ||
269 | ((X == 2) ? \ | ||
270 | FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) | ||
271 | #define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \ | ||
272 | ((X == 2) ? \ | ||
273 | FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) | ||
274 | |||
275 | #define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) | ||
276 | |||
277 | #define DMA_CLASS_EN (1 << 16) | ||
278 | #define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1) | ||
279 | #define IDLE_SLOPE_MASK 0xFFFF | ||
280 | #define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ | ||
281 | #define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ | ||
282 | #define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ | ||
283 | (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) | ||
284 | #define RCMR_MATCHEN (0x1 << 16) | ||
285 | #define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2)) | ||
286 | #define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ | ||
287 | RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) | ||
288 | #define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ | ||
289 | RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) | ||
290 | #define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2) | ||
291 | |||
236 | /* The number of Tx and Rx buffers. These are allocated from the page | 292 | /* The number of Tx and Rx buffers. These are allocated from the page |
237 | * pool. The code may assume these are power of two, so it it best | 293 | * pool. The code may assume these are power of two, so it it best |
238 | * to keep them that size. | 294 | * to keep them that size. |
@@ -256,6 +312,61 @@ struct bufdesc_ex { | |||
256 | #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) | 312 | #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) |
257 | #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) | 313 | #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) |
258 | 314 | ||
315 | /* Interrupt events/masks. */ | ||
316 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ | ||
317 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ | ||
318 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ | ||
319 | #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ | ||
320 | #define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */ | ||
321 | #define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */ | ||
322 | #define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */ | ||
323 | #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ | ||
324 | #define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */ | ||
325 | #define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */ | ||
326 | #define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */ | ||
327 | #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ | ||
328 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ | ||
329 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ | ||
330 | #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) | ||
331 | #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) | ||
332 | #define FEC_ENET_TS_AVAIL ((uint)0x00010000) | ||
333 | #define FEC_ENET_TS_TIMER ((uint)0x00008000) | ||
334 | |||
335 | #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) | ||
336 | #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) | ||
337 | |||
338 | #define FEC_VLAN_TAG_LEN 0x04 | ||
339 | #define FEC_ETHTYPE_LEN 0x02 | ||
340 | |||
341 | struct fec_enet_priv_tx_q { | ||
342 | int index; | ||
343 | unsigned char *tx_bounce[TX_RING_SIZE]; | ||
344 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | ||
345 | |||
346 | dma_addr_t bd_dma; | ||
347 | struct bufdesc *tx_bd_base; | ||
348 | uint tx_ring_size; | ||
349 | |||
350 | unsigned short tx_stop_threshold; | ||
351 | unsigned short tx_wake_threshold; | ||
352 | |||
353 | struct bufdesc *cur_tx; | ||
354 | struct bufdesc *dirty_tx; | ||
355 | char *tso_hdrs; | ||
356 | dma_addr_t tso_hdrs_dma; | ||
357 | }; | ||
358 | |||
359 | struct fec_enet_priv_rx_q { | ||
360 | int index; | ||
361 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; | ||
362 | |||
363 | dma_addr_t bd_dma; | ||
364 | struct bufdesc *rx_bd_base; | ||
365 | uint rx_ring_size; | ||
366 | |||
367 | struct bufdesc *cur_rx; | ||
368 | }; | ||
369 | |||
259 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and | 370 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and |
260 | * tx_bd_base always point to the base of the buffer descriptors. The | 371 | * tx_bd_base always point to the base of the buffer descriptors. The |
261 | * cur_rx and cur_tx point to the currently available buffer. | 372 | * cur_rx and cur_tx point to the currently available buffer. |
@@ -272,36 +383,28 @@ struct fec_enet_private { | |||
272 | 383 | ||
273 | struct clk *clk_ipg; | 384 | struct clk *clk_ipg; |
274 | struct clk *clk_ahb; | 385 | struct clk *clk_ahb; |
386 | struct clk *clk_ref; | ||
275 | struct clk *clk_enet_out; | 387 | struct clk *clk_enet_out; |
276 | struct clk *clk_ptp; | 388 | struct clk *clk_ptp; |
277 | 389 | ||
278 | bool ptp_clk_on; | 390 | bool ptp_clk_on; |
279 | struct mutex ptp_clk_mutex; | 391 | struct mutex ptp_clk_mutex; |
392 | unsigned int num_tx_queues; | ||
393 | unsigned int num_rx_queues; | ||
280 | 394 | ||
281 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 395 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
282 | unsigned char *tx_bounce[TX_RING_SIZE]; | 396 | struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; |
283 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | 397 | struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS]; |
284 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; | ||
285 | 398 | ||
286 | /* CPM dual port RAM relative addresses */ | 399 | unsigned int total_tx_ring_size; |
287 | dma_addr_t bd_dma; | 400 | unsigned int total_rx_ring_size; |
288 | /* Address of Rx and Tx buffers */ | ||
289 | struct bufdesc *rx_bd_base; | ||
290 | struct bufdesc *tx_bd_base; | ||
291 | /* The next free ring entry */ | ||
292 | struct bufdesc *cur_rx, *cur_tx; | ||
293 | /* The ring entries to be free()ed */ | ||
294 | struct bufdesc *dirty_tx; | ||
295 | 401 | ||
296 | unsigned short bufdesc_size; | 402 | unsigned long work_tx; |
297 | unsigned short tx_ring_size; | 403 | unsigned long work_rx; |
298 | unsigned short rx_ring_size; | 404 | unsigned long work_ts; |
299 | unsigned short tx_stop_threshold; | 405 | unsigned long work_mdio; |
300 | unsigned short tx_wake_threshold; | ||
301 | 406 | ||
302 | /* Software TSO */ | 407 | unsigned short bufdesc_size; |
303 | char *tso_hdrs; | ||
304 | dma_addr_t tso_hdrs_dma; | ||
305 | 408 | ||
306 | struct platform_device *pdev; | 409 | struct platform_device *pdev; |
307 | 410 | ||
@@ -340,6 +443,9 @@ struct fec_enet_private { | |||
340 | int hwts_tx_en; | 443 | int hwts_tx_en; |
341 | struct delayed_work time_keep; | 444 | struct delayed_work time_keep; |
342 | struct regulator *reg_phy; | 445 | struct regulator *reg_phy; |
446 | |||
447 | unsigned int tx_align; | ||
448 | unsigned int rx_align; | ||
343 | }; | 449 | }; |
344 | 450 | ||
345 | void fec_ptp_init(struct platform_device *pdev); | 451 | void fec_ptp_init(struct platform_device *pdev); |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 89355a719625..8f8e55ea7f85 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -64,14 +64,10 @@ | |||
64 | 64 | ||
65 | static void set_multicast_list(struct net_device *ndev); | 65 | static void set_multicast_list(struct net_device *ndev); |
66 | 66 | ||
67 | #if defined(CONFIG_ARM) | ||
68 | #define FEC_ALIGNMENT 0xf | ||
69 | #else | ||
70 | #define FEC_ALIGNMENT 0x3 | ||
71 | #endif | ||
72 | |||
73 | #define DRIVER_NAME "fec" | 67 | #define DRIVER_NAME "fec" |
74 | 68 | ||
69 | #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) | ||
70 | |||
75 | /* Pause frame feild and FIFO threshold */ | 71 | /* Pause frame feild and FIFO threshold */ |
76 | #define FEC_ENET_FCE (1 << 5) | 72 | #define FEC_ENET_FCE (1 << 5) |
77 | #define FEC_ENET_RSEM_V 0x84 | 73 | #define FEC_ENET_RSEM_V 0x84 |
@@ -104,6 +100,16 @@ static void set_multicast_list(struct net_device *ndev); | |||
104 | * ENET_TDAR[TDAR]. | 100 | * ENET_TDAR[TDAR]. |
105 | */ | 101 | */ |
106 | #define FEC_QUIRK_ERR006358 (1 << 7) | 102 | #define FEC_QUIRK_ERR006358 (1 << 7) |
103 | /* ENET IP hw AVB | ||
104 | * | ||
105 | * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. | ||
106 | * - Two class indicators on receive with configurable priority | ||
107 | * - Two class indicators and line speed timer on transmit allowing | ||
108 | * implementation class credit based shapers externally | ||
109 | * - Additional DMA registers provisioned to allow managing up to 3 | ||
110 | * independent rings | ||
111 | */ | ||
112 | #define FEC_QUIRK_HAS_AVB (1 << 8) | ||
107 | 113 | ||
108 | static struct platform_device_id fec_devtype[] = { | 114 | static struct platform_device_id fec_devtype[] = { |
109 | { | 115 | { |
@@ -128,6 +134,12 @@ static struct platform_device_id fec_devtype[] = { | |||
128 | .name = "mvf600-fec", | 134 | .name = "mvf600-fec", |
129 | .driver_data = FEC_QUIRK_ENET_MAC, | 135 | .driver_data = FEC_QUIRK_ENET_MAC, |
130 | }, { | 136 | }, { |
137 | .name = "imx6sx-fec", | ||
138 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | ||
139 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | ||
140 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | | ||
141 | FEC_QUIRK_HAS_AVB, | ||
142 | }, { | ||
131 | /* sentinel */ | 143 | /* sentinel */ |
132 | } | 144 | } |
133 | }; | 145 | }; |
@@ -139,6 +151,7 @@ enum imx_fec_type { | |||
139 | IMX28_FEC, | 151 | IMX28_FEC, |
140 | IMX6Q_FEC, | 152 | IMX6Q_FEC, |
141 | MVF600_FEC, | 153 | MVF600_FEC, |
154 | IMX6SX_FEC, | ||
142 | }; | 155 | }; |
143 | 156 | ||
144 | static const struct of_device_id fec_dt_ids[] = { | 157 | static const struct of_device_id fec_dt_ids[] = { |
@@ -147,6 +160,7 @@ static const struct of_device_id fec_dt_ids[] = { | |||
147 | { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, | 160 | { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, |
148 | { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, | 161 | { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, |
149 | { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, | 162 | { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, |
163 | { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, | ||
150 | { /* sentinel */ } | 164 | { /* sentinel */ } |
151 | }; | 165 | }; |
152 | MODULE_DEVICE_TABLE(of, fec_dt_ids); | 166 | MODULE_DEVICE_TABLE(of, fec_dt_ids); |
@@ -175,21 +189,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |||
175 | #endif | 189 | #endif |
176 | #endif /* CONFIG_M5272 */ | 190 | #endif /* CONFIG_M5272 */ |
177 | 191 | ||
178 | /* Interrupt events/masks. */ | ||
179 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ | ||
180 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ | ||
181 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ | ||
182 | #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ | ||
183 | #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ | ||
184 | #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ | ||
185 | #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ | ||
186 | #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ | ||
187 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ | ||
188 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ | ||
189 | |||
190 | #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) | ||
191 | #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) | ||
192 | |||
193 | /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. | 192 | /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. |
194 | */ | 193 | */ |
195 | #define PKT_MAXBUF_SIZE 1522 | 194 | #define PKT_MAXBUF_SIZE 1522 |
@@ -242,22 +241,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |||
242 | static int mii_cnt; | 241 | static int mii_cnt; |
243 | 242 | ||
244 | static inline | 243 | static inline |
245 | struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) | 244 | struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, |
245 | struct fec_enet_private *fep, | ||
246 | int queue_id) | ||
246 | { | 247 | { |
247 | struct bufdesc *new_bd = bdp + 1; | 248 | struct bufdesc *new_bd = bdp + 1; |
248 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; | 249 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; |
250 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; | ||
251 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; | ||
249 | struct bufdesc_ex *ex_base; | 252 | struct bufdesc_ex *ex_base; |
250 | struct bufdesc *base; | 253 | struct bufdesc *base; |
251 | int ring_size; | 254 | int ring_size; |
252 | 255 | ||
253 | if (bdp >= fep->tx_bd_base) { | 256 | if (bdp >= txq->tx_bd_base) { |
254 | base = fep->tx_bd_base; | 257 | base = txq->tx_bd_base; |
255 | ring_size = fep->tx_ring_size; | 258 | ring_size = txq->tx_ring_size; |
256 | ex_base = (struct bufdesc_ex *)fep->tx_bd_base; | 259 | ex_base = (struct bufdesc_ex *)txq->tx_bd_base; |
257 | } else { | 260 | } else { |
258 | base = fep->rx_bd_base; | 261 | base = rxq->rx_bd_base; |
259 | ring_size = fep->rx_ring_size; | 262 | ring_size = rxq->rx_ring_size; |
260 | ex_base = (struct bufdesc_ex *)fep->rx_bd_base; | 263 | ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; |
261 | } | 264 | } |
262 | 265 | ||
263 | if (fep->bufdesc_ex) | 266 | if (fep->bufdesc_ex) |
@@ -269,22 +272,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva | |||
269 | } | 272 | } |
270 | 273 | ||
271 | static inline | 274 | static inline |
272 | struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) | 275 | struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, |
276 | struct fec_enet_private *fep, | ||
277 | int queue_id) | ||
273 | { | 278 | { |
274 | struct bufdesc *new_bd = bdp - 1; | 279 | struct bufdesc *new_bd = bdp - 1; |
275 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; | 280 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; |
281 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; | ||
282 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; | ||
276 | struct bufdesc_ex *ex_base; | 283 | struct bufdesc_ex *ex_base; |
277 | struct bufdesc *base; | 284 | struct bufdesc *base; |
278 | int ring_size; | 285 | int ring_size; |
279 | 286 | ||
280 | if (bdp >= fep->tx_bd_base) { | 287 | if (bdp >= txq->tx_bd_base) { |
281 | base = fep->tx_bd_base; | 288 | base = txq->tx_bd_base; |
282 | ring_size = fep->tx_ring_size; | 289 | ring_size = txq->tx_ring_size; |
283 | ex_base = (struct bufdesc_ex *)fep->tx_bd_base; | 290 | ex_base = (struct bufdesc_ex *)txq->tx_bd_base; |
284 | } else { | 291 | } else { |
285 | base = fep->rx_bd_base; | 292 | base = rxq->rx_bd_base; |
286 | ring_size = fep->rx_ring_size; | 293 | ring_size = rxq->rx_ring_size; |
287 | ex_base = (struct bufdesc_ex *)fep->rx_bd_base; | 294 | ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; |
288 | } | 295 | } |
289 | 296 | ||
290 | if (fep->bufdesc_ex) | 297 | if (fep->bufdesc_ex) |
@@ -300,14 +307,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, | |||
300 | return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; | 307 | return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; |
301 | } | 308 | } |
302 | 309 | ||
303 | static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) | 310 | static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, |
311 | struct fec_enet_priv_tx_q *txq) | ||
304 | { | 312 | { |
305 | int entries; | 313 | int entries; |
306 | 314 | ||
307 | entries = ((const char *)fep->dirty_tx - | 315 | entries = ((const char *)txq->dirty_tx - |
308 | (const char *)fep->cur_tx) / fep->bufdesc_size - 1; | 316 | (const char *)txq->cur_tx) / fep->bufdesc_size - 1; |
309 | 317 | ||
310 | return entries > 0 ? entries : entries + fep->tx_ring_size; | 318 | return entries > 0 ? entries : entries + txq->tx_ring_size; |
311 | } | 319 | } |
312 | 320 | ||
313 | static void *swap_buffer(void *bufaddr, int len) | 321 | static void *swap_buffer(void *bufaddr, int len) |
@@ -324,22 +332,26 @@ static void *swap_buffer(void *bufaddr, int len) | |||
324 | static void fec_dump(struct net_device *ndev) | 332 | static void fec_dump(struct net_device *ndev) |
325 | { | 333 | { |
326 | struct fec_enet_private *fep = netdev_priv(ndev); | 334 | struct fec_enet_private *fep = netdev_priv(ndev); |
327 | struct bufdesc *bdp = fep->tx_bd_base; | 335 | struct bufdesc *bdp; |
328 | unsigned int index = 0; | 336 | struct fec_enet_priv_tx_q *txq; |
337 | int index = 0; | ||
329 | 338 | ||
330 | netdev_info(ndev, "TX ring dump\n"); | 339 | netdev_info(ndev, "TX ring dump\n"); |
331 | pr_info("Nr SC addr len SKB\n"); | 340 | pr_info("Nr SC addr len SKB\n"); |
332 | 341 | ||
342 | txq = fep->tx_queue[0]; | ||
343 | bdp = txq->tx_bd_base; | ||
344 | |||
333 | do { | 345 | do { |
334 | pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", | 346 | pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", |
335 | index, | 347 | index, |
336 | bdp == fep->cur_tx ? 'S' : ' ', | 348 | bdp == txq->cur_tx ? 'S' : ' ', |
337 | bdp == fep->dirty_tx ? 'H' : ' ', | 349 | bdp == txq->dirty_tx ? 'H' : ' ', |
338 | bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, | 350 | bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, |
339 | fep->tx_skbuff[index]); | 351 | txq->tx_skbuff[index]); |
340 | bdp = fec_enet_get_nextdesc(bdp, fep); | 352 | bdp = fec_enet_get_nextdesc(bdp, fep, 0); |
341 | index++; | 353 | index++; |
342 | } while (bdp != fep->tx_bd_base); | 354 | } while (bdp != txq->tx_bd_base); |
343 | } | 355 | } |
344 | 356 | ||
345 | static inline bool is_ipv4_pkt(struct sk_buff *skb) | 357 | static inline bool is_ipv4_pkt(struct sk_buff *skb) |
@@ -365,14 +377,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) | |||
365 | } | 377 | } |
366 | 378 | ||
367 | static int | 379 | static int |
368 | fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) | 380 | fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, |
381 | struct sk_buff *skb, | ||
382 | struct net_device *ndev) | ||
369 | { | 383 | { |
370 | struct fec_enet_private *fep = netdev_priv(ndev); | 384 | struct fec_enet_private *fep = netdev_priv(ndev); |
371 | const struct platform_device_id *id_entry = | 385 | const struct platform_device_id *id_entry = |
372 | platform_get_device_id(fep->pdev); | 386 | platform_get_device_id(fep->pdev); |
373 | struct bufdesc *bdp = fep->cur_tx; | 387 | struct bufdesc *bdp = txq->cur_tx; |
374 | struct bufdesc_ex *ebdp; | 388 | struct bufdesc_ex *ebdp; |
375 | int nr_frags = skb_shinfo(skb)->nr_frags; | 389 | int nr_frags = skb_shinfo(skb)->nr_frags; |
390 | unsigned short queue = skb_get_queue_mapping(skb); | ||
376 | int frag, frag_len; | 391 | int frag, frag_len; |
377 | unsigned short status; | 392 | unsigned short status; |
378 | unsigned int estatus = 0; | 393 | unsigned int estatus = 0; |
@@ -384,7 +399,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) | |||
384 | 399 | ||
385 | for (frag = 0; frag < nr_frags; frag++) { | 400 | for (frag = 0; frag < nr_frags; frag++) { |
386 | this_frag = &skb_shinfo(skb)->frags[frag]; | 401 | this_frag = &skb_shinfo(skb)->frags[frag]; |
387 | bdp = fec_enet_get_nextdesc(bdp, fep); | 402 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
388 | ebdp = (struct bufdesc_ex *)bdp; | 403 | ebdp = (struct bufdesc_ex *)bdp; |
389 | 404 | ||
390 | status = bdp->cbd_sc; | 405 | status = bdp->cbd_sc; |
@@ -412,11 +427,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) | |||
412 | 427 | ||
413 | bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; | 428 | bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; |
414 | 429 | ||
415 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | 430 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
416 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT || | 431 | if (((unsigned long) bufaddr) & fep->tx_align || |
417 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { | 432 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
418 | memcpy(fep->tx_bounce[index], bufaddr, frag_len); | 433 | memcpy(txq->tx_bounce[index], bufaddr, frag_len); |
419 | bufaddr = fep->tx_bounce[index]; | 434 | bufaddr = txq->tx_bounce[index]; |
420 | 435 | ||
421 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | 436 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
422 | swap_buffer(bufaddr, frag_len); | 437 | swap_buffer(bufaddr, frag_len); |
@@ -436,21 +451,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) | |||
436 | bdp->cbd_sc = status; | 451 | bdp->cbd_sc = status; |
437 | } | 452 | } |
438 | 453 | ||
439 | fep->cur_tx = bdp; | 454 | txq->cur_tx = bdp; |
440 | 455 | ||
441 | return 0; | 456 | return 0; |
442 | 457 | ||
443 | dma_mapping_error: | 458 | dma_mapping_error: |
444 | bdp = fep->cur_tx; | 459 | bdp = txq->cur_tx; |
445 | for (i = 0; i < frag; i++) { | 460 | for (i = 0; i < frag; i++) { |
446 | bdp = fec_enet_get_nextdesc(bdp, fep); | 461 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
447 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | 462 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, |
448 | bdp->cbd_datlen, DMA_TO_DEVICE); | 463 | bdp->cbd_datlen, DMA_TO_DEVICE); |
449 | } | 464 | } |
450 | return NETDEV_TX_OK; | 465 | return NETDEV_TX_OK; |
451 | } | 466 | } |
452 | 467 | ||
453 | static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | 468 | static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, |
469 | struct sk_buff *skb, struct net_device *ndev) | ||
454 | { | 470 | { |
455 | struct fec_enet_private *fep = netdev_priv(ndev); | 471 | struct fec_enet_private *fep = netdev_priv(ndev); |
456 | const struct platform_device_id *id_entry = | 472 | const struct platform_device_id *id_entry = |
@@ -461,12 +477,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
461 | dma_addr_t addr; | 477 | dma_addr_t addr; |
462 | unsigned short status; | 478 | unsigned short status; |
463 | unsigned short buflen; | 479 | unsigned short buflen; |
480 | unsigned short queue; | ||
464 | unsigned int estatus = 0; | 481 | unsigned int estatus = 0; |
465 | unsigned int index; | 482 | unsigned int index; |
466 | int entries_free; | 483 | int entries_free; |
467 | int ret; | 484 | int ret; |
468 | 485 | ||
469 | entries_free = fec_enet_get_free_txdesc_num(fep); | 486 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); |
470 | if (entries_free < MAX_SKB_FRAGS + 1) { | 487 | if (entries_free < MAX_SKB_FRAGS + 1) { |
471 | dev_kfree_skb_any(skb); | 488 | dev_kfree_skb_any(skb); |
472 | if (net_ratelimit()) | 489 | if (net_ratelimit()) |
@@ -481,7 +498,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
481 | } | 498 | } |
482 | 499 | ||
483 | /* Fill in a Tx ring entry */ | 500 | /* Fill in a Tx ring entry */ |
484 | bdp = fep->cur_tx; | 501 | bdp = txq->cur_tx; |
485 | status = bdp->cbd_sc; | 502 | status = bdp->cbd_sc; |
486 | status &= ~BD_ENET_TX_STATS; | 503 | status &= ~BD_ENET_TX_STATS; |
487 | 504 | ||
@@ -489,11 +506,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
489 | bufaddr = skb->data; | 506 | bufaddr = skb->data; |
490 | buflen = skb_headlen(skb); | 507 | buflen = skb_headlen(skb); |
491 | 508 | ||
492 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | 509 | queue = skb_get_queue_mapping(skb); |
493 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT || | 510 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
511 | if (((unsigned long) bufaddr) & fep->tx_align || | ||
494 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { | 512 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
495 | memcpy(fep->tx_bounce[index], skb->data, buflen); | 513 | memcpy(txq->tx_bounce[index], skb->data, buflen); |
496 | bufaddr = fep->tx_bounce[index]; | 514 | bufaddr = txq->tx_bounce[index]; |
497 | 515 | ||
498 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | 516 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
499 | swap_buffer(bufaddr, buflen); | 517 | swap_buffer(bufaddr, buflen); |
@@ -509,7 +527,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
509 | } | 527 | } |
510 | 528 | ||
511 | if (nr_frags) { | 529 | if (nr_frags) { |
512 | ret = fec_enet_txq_submit_frag_skb(skb, ndev); | 530 | ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); |
513 | if (ret) | 531 | if (ret) |
514 | return ret; | 532 | return ret; |
515 | } else { | 533 | } else { |
@@ -537,10 +555,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
537 | ebdp->cbd_esc = estatus; | 555 | ebdp->cbd_esc = estatus; |
538 | } | 556 | } |
539 | 557 | ||
540 | last_bdp = fep->cur_tx; | 558 | last_bdp = txq->cur_tx; |
541 | index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); | 559 | index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); |
542 | /* Save skb pointer */ | 560 | /* Save skb pointer */ |
543 | fep->tx_skbuff[index] = skb; | 561 | txq->tx_skbuff[index] = skb; |
544 | 562 | ||
545 | bdp->cbd_datlen = buflen; | 563 | bdp->cbd_datlen = buflen; |
546 | bdp->cbd_bufaddr = addr; | 564 | bdp->cbd_bufaddr = addr; |
@@ -552,22 +570,23 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
552 | bdp->cbd_sc = status; | 570 | bdp->cbd_sc = status; |
553 | 571 | ||
554 | /* If this was the last BD in the ring, start at the beginning again. */ | 572 | /* If this was the last BD in the ring, start at the beginning again. */ |
555 | bdp = fec_enet_get_nextdesc(last_bdp, fep); | 573 | bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); |
556 | 574 | ||
557 | skb_tx_timestamp(skb); | 575 | skb_tx_timestamp(skb); |
558 | 576 | ||
559 | fep->cur_tx = bdp; | 577 | txq->cur_tx = bdp; |
560 | 578 | ||
561 | /* Trigger transmission start */ | 579 | /* Trigger transmission start */ |
562 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 580 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); |
563 | 581 | ||
564 | return 0; | 582 | return 0; |
565 | } | 583 | } |
566 | 584 | ||
567 | static int | 585 | static int |
568 | fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, | 586 | fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, |
569 | struct bufdesc *bdp, int index, char *data, | 587 | struct net_device *ndev, |
570 | int size, bool last_tcp, bool is_last) | 588 | struct bufdesc *bdp, int index, char *data, |
589 | int size, bool last_tcp, bool is_last) | ||
571 | { | 590 | { |
572 | struct fec_enet_private *fep = netdev_priv(ndev); | 591 | struct fec_enet_private *fep = netdev_priv(ndev); |
573 | const struct platform_device_id *id_entry = | 592 | const struct platform_device_id *id_entry = |
@@ -582,10 +601,10 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, | |||
582 | 601 | ||
583 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | 602 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
584 | 603 | ||
585 | if (((unsigned long) data) & FEC_ALIGNMENT || | 604 | if (((unsigned long) data) & fep->tx_align || |
586 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { | 605 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
587 | memcpy(fep->tx_bounce[index], data, size); | 606 | memcpy(txq->tx_bounce[index], data, size); |
588 | data = fep->tx_bounce[index]; | 607 | data = txq->tx_bounce[index]; |
589 | 608 | ||
590 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | 609 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
591 | swap_buffer(data, size); | 610 | swap_buffer(data, size); |
@@ -624,8 +643,9 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, | |||
624 | } | 643 | } |
625 | 644 | ||
626 | static int | 645 | static int |
627 | fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, | 646 | fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, |
628 | struct bufdesc *bdp, int index) | 647 | struct sk_buff *skb, struct net_device *ndev, |
648 | struct bufdesc *bdp, int index) | ||
629 | { | 649 | { |
630 | struct fec_enet_private *fep = netdev_priv(ndev); | 650 | struct fec_enet_private *fep = netdev_priv(ndev); |
631 | const struct platform_device_id *id_entry = | 651 | const struct platform_device_id *id_entry = |
@@ -641,12 +661,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, | |||
641 | status &= ~BD_ENET_TX_STATS; | 661 | status &= ~BD_ENET_TX_STATS; |
642 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | 662 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
643 | 663 | ||
644 | bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE; | 664 | bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
645 | dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE; | 665 | dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; |
646 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT || | 666 | if (((unsigned long)bufaddr) & fep->tx_align || |
647 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { | 667 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
648 | memcpy(fep->tx_bounce[index], skb->data, hdr_len); | 668 | memcpy(txq->tx_bounce[index], skb->data, hdr_len); |
649 | bufaddr = fep->tx_bounce[index]; | 669 | bufaddr = txq->tx_bounce[index]; |
650 | 670 | ||
651 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | 671 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
652 | swap_buffer(bufaddr, hdr_len); | 672 | swap_buffer(bufaddr, hdr_len); |
@@ -676,17 +696,20 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, | |||
676 | return 0; | 696 | return 0; |
677 | } | 697 | } |
678 | 698 | ||
679 | static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) | 699 | static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, |
700 | struct sk_buff *skb, | ||
701 | struct net_device *ndev) | ||
680 | { | 702 | { |
681 | struct fec_enet_private *fep = netdev_priv(ndev); | 703 | struct fec_enet_private *fep = netdev_priv(ndev); |
682 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 704 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
683 | int total_len, data_left; | 705 | int total_len, data_left; |
684 | struct bufdesc *bdp = fep->cur_tx; | 706 | struct bufdesc *bdp = txq->cur_tx; |
707 | unsigned short queue = skb_get_queue_mapping(skb); | ||
685 | struct tso_t tso; | 708 | struct tso_t tso; |
686 | unsigned int index = 0; | 709 | unsigned int index = 0; |
687 | int ret; | 710 | int ret; |
688 | 711 | ||
689 | if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { | 712 | if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { |
690 | dev_kfree_skb_any(skb); | 713 | dev_kfree_skb_any(skb); |
691 | if (net_ratelimit()) | 714 | if (net_ratelimit()) |
692 | netdev_err(ndev, "NOT enough BD for TSO!\n"); | 715 | netdev_err(ndev, "NOT enough BD for TSO!\n"); |
@@ -706,14 +729,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) | |||
706 | while (total_len > 0) { | 729 | while (total_len > 0) { |
707 | char *hdr; | 730 | char *hdr; |
708 | 731 | ||
709 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | 732 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
710 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | 733 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
711 | total_len -= data_left; | 734 | total_len -= data_left; |
712 | 735 | ||
713 | /* prepare packet headers: MAC + IP + TCP */ | 736 | /* prepare packet headers: MAC + IP + TCP */ |
714 | hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE; | 737 | hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
715 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | 738 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); |
716 | ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index); | 739 | ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); |
717 | if (ret) | 740 | if (ret) |
718 | goto err_release; | 741 | goto err_release; |
719 | 742 | ||
@@ -721,10 +744,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) | |||
721 | int size; | 744 | int size; |
722 | 745 | ||
723 | size = min_t(int, tso.size, data_left); | 746 | size = min_t(int, tso.size, data_left); |
724 | bdp = fec_enet_get_nextdesc(bdp, fep); | 747 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
725 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | 748 | index = fec_enet_get_bd_index(txq->tx_bd_base, |
726 | ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, | 749 | bdp, fep); |
727 | size, size == data_left, | 750 | ret = fec_enet_txq_put_data_tso(txq, skb, ndev, |
751 | bdp, index, | ||
752 | tso.data, size, | ||
753 | size == data_left, | ||
728 | total_len == 0); | 754 | total_len == 0); |
729 | if (ret) | 755 | if (ret) |
730 | goto err_release; | 756 | goto err_release; |
@@ -733,17 +759,17 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) | |||
733 | tso_build_data(skb, &tso, size); | 759 | tso_build_data(skb, &tso, size); |
734 | } | 760 | } |
735 | 761 | ||
736 | bdp = fec_enet_get_nextdesc(bdp, fep); | 762 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
737 | } | 763 | } |
738 | 764 | ||
739 | /* Save skb pointer */ | 765 | /* Save skb pointer */ |
740 | fep->tx_skbuff[index] = skb; | 766 | txq->tx_skbuff[index] = skb; |
741 | 767 | ||
742 | skb_tx_timestamp(skb); | 768 | skb_tx_timestamp(skb); |
743 | fep->cur_tx = bdp; | 769 | txq->cur_tx = bdp; |
744 | 770 | ||
745 | /* Trigger transmission start */ | 771 | /* Trigger transmission start */ |
746 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 772 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); |
747 | 773 | ||
748 | return 0; | 774 | return 0; |
749 | 775 | ||
@@ -757,18 +783,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
757 | { | 783 | { |
758 | struct fec_enet_private *fep = netdev_priv(ndev); | 784 | struct fec_enet_private *fep = netdev_priv(ndev); |
759 | int entries_free; | 785 | int entries_free; |
786 | unsigned short queue; | ||
787 | struct fec_enet_priv_tx_q *txq; | ||
788 | struct netdev_queue *nq; | ||
760 | int ret; | 789 | int ret; |
761 | 790 | ||
791 | queue = skb_get_queue_mapping(skb); | ||
792 | txq = fep->tx_queue[queue]; | ||
793 | nq = netdev_get_tx_queue(ndev, queue); | ||
794 | |||
762 | if (skb_is_gso(skb)) | 795 | if (skb_is_gso(skb)) |
763 | ret = fec_enet_txq_submit_tso(skb, ndev); | 796 | ret = fec_enet_txq_submit_tso(txq, skb, ndev); |
764 | else | 797 | else |
765 | ret = fec_enet_txq_submit_skb(skb, ndev); | 798 | ret = fec_enet_txq_submit_skb(txq, skb, ndev); |
766 | if (ret) | 799 | if (ret) |
767 | return ret; | 800 | return ret; |
768 | 801 | ||
769 | entries_free = fec_enet_get_free_txdesc_num(fep); | 802 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); |
770 | if (entries_free <= fep->tx_stop_threshold) | 803 | if (entries_free <= txq->tx_stop_threshold) |
771 | netif_stop_queue(ndev); | 804 | netif_tx_stop_queue(nq); |
772 | 805 | ||
773 | return NETDEV_TX_OK; | 806 | return NETDEV_TX_OK; |
774 | } | 807 | } |
@@ -778,46 +811,111 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
778 | static void fec_enet_bd_init(struct net_device *dev) | 811 | static void fec_enet_bd_init(struct net_device *dev) |
779 | { | 812 | { |
780 | struct fec_enet_private *fep = netdev_priv(dev); | 813 | struct fec_enet_private *fep = netdev_priv(dev); |
814 | struct fec_enet_priv_tx_q *txq; | ||
815 | struct fec_enet_priv_rx_q *rxq; | ||
781 | struct bufdesc *bdp; | 816 | struct bufdesc *bdp; |
782 | unsigned int i; | 817 | unsigned int i; |
818 | unsigned int q; | ||
783 | 819 | ||
784 | /* Initialize the receive buffer descriptors. */ | 820 | for (q = 0; q < fep->num_rx_queues; q++) { |
785 | bdp = fep->rx_bd_base; | 821 | /* Initialize the receive buffer descriptors. */ |
786 | for (i = 0; i < fep->rx_ring_size; i++) { | 822 | rxq = fep->rx_queue[q]; |
823 | bdp = rxq->rx_bd_base; | ||
787 | 824 | ||
788 | /* Initialize the BD for every fragment in the page. */ | 825 | for (i = 0; i < rxq->rx_ring_size; i++) { |
789 | if (bdp->cbd_bufaddr) | 826 | |
790 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 827 | /* Initialize the BD for every fragment in the page. */ |
791 | else | 828 | if (bdp->cbd_bufaddr) |
829 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
830 | else | ||
831 | bdp->cbd_sc = 0; | ||
832 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | ||
833 | } | ||
834 | |||
835 | /* Set the last buffer to wrap */ | ||
836 | bdp = fec_enet_get_prevdesc(bdp, fep, q); | ||
837 | bdp->cbd_sc |= BD_SC_WRAP; | ||
838 | |||
839 | rxq->cur_rx = rxq->rx_bd_base; | ||
840 | } | ||
841 | |||
842 | for (q = 0; q < fep->num_tx_queues; q++) { | ||
843 | /* ...and the same for transmit */ | ||
844 | txq = fep->tx_queue[q]; | ||
845 | bdp = txq->tx_bd_base; | ||
846 | txq->cur_tx = bdp; | ||
847 | |||
848 | for (i = 0; i < txq->tx_ring_size; i++) { | ||
849 | /* Initialize the BD for every fragment in the page. */ | ||
792 | bdp->cbd_sc = 0; | 850 | bdp->cbd_sc = 0; |
793 | bdp = fec_enet_get_nextdesc(bdp, fep); | 851 | if (txq->tx_skbuff[i]) { |
852 | dev_kfree_skb_any(txq->tx_skbuff[i]); | ||
853 | txq->tx_skbuff[i] = NULL; | ||
854 | } | ||
855 | bdp->cbd_bufaddr = 0; | ||
856 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | ||
857 | } | ||
858 | |||
859 | /* Set the last buffer to wrap */ | ||
860 | bdp = fec_enet_get_prevdesc(bdp, fep, q); | ||
861 | bdp->cbd_sc |= BD_SC_WRAP; | ||
862 | txq->dirty_tx = bdp; | ||
794 | } | 863 | } |
864 | } | ||
795 | 865 | ||
796 | /* Set the last buffer to wrap */ | 866 | static void fec_enet_active_rxring(struct net_device *ndev) |
797 | bdp = fec_enet_get_prevdesc(bdp, fep); | 867 | { |
798 | bdp->cbd_sc |= BD_SC_WRAP; | 868 | struct fec_enet_private *fep = netdev_priv(ndev); |
869 | int i; | ||
870 | |||
871 | for (i = 0; i < fep->num_rx_queues; i++) | ||
872 | writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); | ||
873 | } | ||
874 | |||
875 | static void fec_enet_enable_ring(struct net_device *ndev) | ||
876 | { | ||
877 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
878 | struct fec_enet_priv_tx_q *txq; | ||
879 | struct fec_enet_priv_rx_q *rxq; | ||
880 | int i; | ||
799 | 881 | ||
800 | fep->cur_rx = fep->rx_bd_base; | 882 | for (i = 0; i < fep->num_rx_queues; i++) { |
883 | rxq = fep->rx_queue[i]; | ||
884 | writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); | ||
801 | 885 | ||
802 | /* ...and the same for transmit */ | 886 | /* enable DMA1/2 */ |
803 | bdp = fep->tx_bd_base; | 887 | if (i) |
804 | fep->cur_tx = bdp; | 888 | writel(RCMR_MATCHEN | RCMR_CMP(i), |
805 | for (i = 0; i < fep->tx_ring_size; i++) { | 889 | fep->hwp + FEC_RCMR(i)); |
890 | } | ||
806 | 891 | ||
807 | /* Initialize the BD for every fragment in the page. */ | 892 | for (i = 0; i < fep->num_tx_queues; i++) { |
808 | bdp->cbd_sc = 0; | 893 | txq = fep->tx_queue[i]; |
809 | if (fep->tx_skbuff[i]) { | 894 | writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); |
810 | dev_kfree_skb_any(fep->tx_skbuff[i]); | 895 | |
811 | fep->tx_skbuff[i] = NULL; | 896 | /* enable DMA1/2 */ |
812 | } | 897 | if (i) |
813 | bdp->cbd_bufaddr = 0; | 898 | writel(DMA_CLASS_EN | IDLE_SLOPE(i), |
814 | bdp = fec_enet_get_nextdesc(bdp, fep); | 899 | fep->hwp + FEC_DMA_CFG(i)); |
815 | } | 900 | } |
901 | } | ||
816 | 902 | ||
817 | /* Set the last buffer to wrap */ | 903 | static void fec_enet_reset_skb(struct net_device *ndev) |
818 | bdp = fec_enet_get_prevdesc(bdp, fep); | 904 | { |
819 | bdp->cbd_sc |= BD_SC_WRAP; | 905 | struct fec_enet_private *fep = netdev_priv(ndev); |
820 | fep->dirty_tx = bdp; | 906 | struct fec_enet_priv_tx_q *txq; |
907 | int i, j; | ||
908 | |||
909 | for (i = 0; i < fep->num_tx_queues; i++) { | ||
910 | txq = fep->tx_queue[i]; | ||
911 | |||
912 | for (j = 0; j < txq->tx_ring_size; j++) { | ||
913 | if (txq->tx_skbuff[j]) { | ||
914 | dev_kfree_skb_any(txq->tx_skbuff[j]); | ||
915 | txq->tx_skbuff[j] = NULL; | ||
916 | } | ||
917 | } | ||
918 | } | ||
821 | } | 919 | } |
822 | 920 | ||
823 | /* | 921 | /* |
@@ -831,15 +929,21 @@ fec_restart(struct net_device *ndev) | |||
831 | struct fec_enet_private *fep = netdev_priv(ndev); | 929 | struct fec_enet_private *fep = netdev_priv(ndev); |
832 | const struct platform_device_id *id_entry = | 930 | const struct platform_device_id *id_entry = |
833 | platform_get_device_id(fep->pdev); | 931 | platform_get_device_id(fep->pdev); |
834 | int i; | ||
835 | u32 val; | 932 | u32 val; |
836 | u32 temp_mac[2]; | 933 | u32 temp_mac[2]; |
837 | u32 rcntl = OPT_FRAME_SIZE | 0x04; | 934 | u32 rcntl = OPT_FRAME_SIZE | 0x04; |
838 | u32 ecntl = 0x2; /* ETHEREN */ | 935 | u32 ecntl = 0x2; /* ETHEREN */ |
839 | 936 | ||
840 | /* Whack a reset. We should wait for this. */ | 937 | /* Whack a reset. We should wait for this. |
841 | writel(1, fep->hwp + FEC_ECNTRL); | 938 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC |
842 | udelay(10); | 939 | * instead of reset MAC itself. |
940 | */ | ||
941 | if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { | ||
942 | writel(0, fep->hwp + FEC_ECNTRL); | ||
943 | } else { | ||
944 | writel(1, fep->hwp + FEC_ECNTRL); | ||
945 | udelay(10); | ||
946 | } | ||
843 | 947 | ||
844 | /* | 948 | /* |
845 | * enet-mac reset will reset mac address registers too, | 949 | * enet-mac reset will reset mac address registers too, |
@@ -859,22 +963,10 @@ fec_restart(struct net_device *ndev) | |||
859 | 963 | ||
860 | fec_enet_bd_init(ndev); | 964 | fec_enet_bd_init(ndev); |
861 | 965 | ||
862 | /* Set receive and transmit descriptor base. */ | 966 | fec_enet_enable_ring(ndev); |
863 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | ||
864 | if (fep->bufdesc_ex) | ||
865 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) | ||
866 | * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); | ||
867 | else | ||
868 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | ||
869 | * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); | ||
870 | |||
871 | 967 | ||
872 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 968 | /* Reset tx SKB buffers. */ |
873 | if (fep->tx_skbuff[i]) { | 969 | fec_enet_reset_skb(ndev); |
874 | dev_kfree_skb_any(fep->tx_skbuff[i]); | ||
875 | fep->tx_skbuff[i] = NULL; | ||
876 | } | ||
877 | } | ||
878 | 970 | ||
879 | /* Enable MII mode */ | 971 | /* Enable MII mode */ |
880 | if (fep->full_duplex == DUPLEX_FULL) { | 972 | if (fep->full_duplex == DUPLEX_FULL) { |
@@ -996,7 +1088,7 @@ fec_restart(struct net_device *ndev) | |||
996 | 1088 | ||
997 | /* And last, enable the transmit and receive processing */ | 1089 | /* And last, enable the transmit and receive processing */ |
998 | writel(ecntl, fep->hwp + FEC_ECNTRL); | 1090 | writel(ecntl, fep->hwp + FEC_ECNTRL); |
999 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 1091 | fec_enet_active_rxring(ndev); |
1000 | 1092 | ||
1001 | if (fep->bufdesc_ex) | 1093 | if (fep->bufdesc_ex) |
1002 | fec_ptp_start_cyclecounter(ndev); | 1094 | fec_ptp_start_cyclecounter(ndev); |
@@ -1021,9 +1113,16 @@ fec_stop(struct net_device *ndev) | |||
1021 | netdev_err(ndev, "Graceful transmit stop did not complete!\n"); | 1113 | netdev_err(ndev, "Graceful transmit stop did not complete!\n"); |
1022 | } | 1114 | } |
1023 | 1115 | ||
1024 | /* Whack a reset. We should wait for this. */ | 1116 | /* Whack a reset. We should wait for this. |
1025 | writel(1, fep->hwp + FEC_ECNTRL); | 1117 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC |
1026 | udelay(10); | 1118 | * instead of reset MAC itself. |
1119 | */ | ||
1120 | if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { | ||
1121 | writel(0, fep->hwp + FEC_ECNTRL); | ||
1122 | } else { | ||
1123 | writel(1, fep->hwp + FEC_ECNTRL); | ||
1124 | udelay(10); | ||
1125 | } | ||
1027 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 1126 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1028 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | 1127 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); |
1029 | 1128 | ||
@@ -1081,37 +1180,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, | |||
1081 | } | 1180 | } |
1082 | 1181 | ||
1083 | static void | 1182 | static void |
1084 | fec_enet_tx(struct net_device *ndev) | 1183 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) |
1085 | { | 1184 | { |
1086 | struct fec_enet_private *fep; | 1185 | struct fec_enet_private *fep; |
1087 | struct bufdesc *bdp; | 1186 | struct bufdesc *bdp; |
1088 | unsigned short status; | 1187 | unsigned short status; |
1089 | struct sk_buff *skb; | 1188 | struct sk_buff *skb; |
1189 | struct fec_enet_priv_tx_q *txq; | ||
1190 | struct netdev_queue *nq; | ||
1090 | int index = 0; | 1191 | int index = 0; |
1091 | int entries_free; | 1192 | int entries_free; |
1092 | 1193 | ||
1093 | fep = netdev_priv(ndev); | 1194 | fep = netdev_priv(ndev); |
1094 | bdp = fep->dirty_tx; | ||
1095 | 1195 | ||
1196 | queue_id = FEC_ENET_GET_QUQUE(queue_id); | ||
1197 | |||
1198 | txq = fep->tx_queue[queue_id]; | ||
1096 | /* get next bdp of dirty_tx */ | 1199 | /* get next bdp of dirty_tx */ |
1097 | bdp = fec_enet_get_nextdesc(bdp, fep); | 1200 | nq = netdev_get_tx_queue(ndev, queue_id); |
1201 | bdp = txq->dirty_tx; | ||
1202 | |||
1203 | /* get next bdp of dirty_tx */ | ||
1204 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | ||
1098 | 1205 | ||
1099 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 1206 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
1100 | 1207 | ||
1101 | /* current queue is empty */ | 1208 | /* current queue is empty */ |
1102 | if (bdp == fep->cur_tx) | 1209 | if (bdp == txq->cur_tx) |
1103 | break; | 1210 | break; |
1104 | 1211 | ||
1105 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | 1212 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
1106 | 1213 | ||
1107 | skb = fep->tx_skbuff[index]; | 1214 | skb = txq->tx_skbuff[index]; |
1108 | fep->tx_skbuff[index] = NULL; | 1215 | txq->tx_skbuff[index] = NULL; |
1109 | if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) | 1216 | if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) |
1110 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | 1217 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, |
1111 | bdp->cbd_datlen, DMA_TO_DEVICE); | 1218 | bdp->cbd_datlen, DMA_TO_DEVICE); |
1112 | bdp->cbd_bufaddr = 0; | 1219 | bdp->cbd_bufaddr = 0; |
1113 | if (!skb) { | 1220 | if (!skb) { |
1114 | bdp = fec_enet_get_nextdesc(bdp, fep); | 1221 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
1115 | continue; | 1222 | continue; |
1116 | } | 1223 | } |
1117 | 1224 | ||
@@ -1153,23 +1260,37 @@ fec_enet_tx(struct net_device *ndev) | |||
1153 | /* Free the sk buffer associated with this last transmit */ | 1260 | /* Free the sk buffer associated with this last transmit */ |
1154 | dev_kfree_skb_any(skb); | 1261 | dev_kfree_skb_any(skb); |
1155 | 1262 | ||
1156 | fep->dirty_tx = bdp; | 1263 | txq->dirty_tx = bdp; |
1157 | 1264 | ||
1158 | /* Update pointer to next buffer descriptor to be transmitted */ | 1265 | /* Update pointer to next buffer descriptor to be transmitted */ |
1159 | bdp = fec_enet_get_nextdesc(bdp, fep); | 1266 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
1160 | 1267 | ||
1161 | /* Since we have freed up a buffer, the ring is no longer full | 1268 | /* Since we have freed up a buffer, the ring is no longer full |
1162 | */ | 1269 | */ |
1163 | if (netif_queue_stopped(ndev)) { | 1270 | if (netif_queue_stopped(ndev)) { |
1164 | entries_free = fec_enet_get_free_txdesc_num(fep); | 1271 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); |
1165 | if (entries_free >= fep->tx_wake_threshold) | 1272 | if (entries_free >= txq->tx_wake_threshold) |
1166 | netif_wake_queue(ndev); | 1273 | netif_tx_wake_queue(nq); |
1167 | } | 1274 | } |
1168 | } | 1275 | } |
1169 | 1276 | ||
1170 | /* ERR006538: Keep the transmitter going */ | 1277 | /* ERR006538: Keep the transmitter going */ |
1171 | if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) | 1278 | if (bdp != txq->cur_tx && |
1172 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 1279 | readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) |
1280 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); | ||
1281 | } | ||
1282 | |||
1283 | static void | ||
1284 | fec_enet_tx(struct net_device *ndev) | ||
1285 | { | ||
1286 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1287 | u16 queue_id; | ||
1288 | /* First process class A queue, then Class B and Best Effort queue */ | ||
1289 | for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { | ||
1290 | clear_bit(queue_id, &fep->work_tx); | ||
1291 | fec_enet_tx_queue(ndev, queue_id); | ||
1292 | } | ||
1293 | return; | ||
1173 | } | 1294 | } |
1174 | 1295 | ||
1175 | /* During a receive, the cur_rx points to the current incoming buffer. | 1296 | /* During a receive, the cur_rx points to the current incoming buffer. |
@@ -1178,11 +1299,12 @@ fec_enet_tx(struct net_device *ndev) | |||
1178 | * effectively tossing the packet. | 1299 | * effectively tossing the packet. |
1179 | */ | 1300 | */ |
1180 | static int | 1301 | static int |
1181 | fec_enet_rx(struct net_device *ndev, int budget) | 1302 | fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) |
1182 | { | 1303 | { |
1183 | struct fec_enet_private *fep = netdev_priv(ndev); | 1304 | struct fec_enet_private *fep = netdev_priv(ndev); |
1184 | const struct platform_device_id *id_entry = | 1305 | const struct platform_device_id *id_entry = |
1185 | platform_get_device_id(fep->pdev); | 1306 | platform_get_device_id(fep->pdev); |
1307 | struct fec_enet_priv_rx_q *rxq; | ||
1186 | struct bufdesc *bdp; | 1308 | struct bufdesc *bdp; |
1187 | unsigned short status; | 1309 | unsigned short status; |
1188 | struct sk_buff *skb; | 1310 | struct sk_buff *skb; |
@@ -1197,11 +1319,13 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
1197 | #ifdef CONFIG_M532x | 1319 | #ifdef CONFIG_M532x |
1198 | flush_cache_all(); | 1320 | flush_cache_all(); |
1199 | #endif | 1321 | #endif |
1322 | queue_id = FEC_ENET_GET_QUQUE(queue_id); | ||
1323 | rxq = fep->rx_queue[queue_id]; | ||
1200 | 1324 | ||
1201 | /* First, grab all of the stats for the incoming packet. | 1325 | /* First, grab all of the stats for the incoming packet. |
1202 | * These get messed up if we get called due to a busy condition. | 1326 | * These get messed up if we get called due to a busy condition. |
1203 | */ | 1327 | */ |
1204 | bdp = fep->cur_rx; | 1328 | bdp = rxq->cur_rx; |
1205 | 1329 | ||
1206 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | 1330 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
1207 | 1331 | ||
@@ -1215,7 +1339,6 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
1215 | if ((status & BD_ENET_RX_LAST) == 0) | 1339 | if ((status & BD_ENET_RX_LAST) == 0) |
1216 | netdev_err(ndev, "rcv is not +last\n"); | 1340 | netdev_err(ndev, "rcv is not +last\n"); |
1217 | 1341 | ||
1218 | writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); | ||
1219 | 1342 | ||
1220 | /* Check for errors. */ | 1343 | /* Check for errors. */ |
1221 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | 1344 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
@@ -1248,8 +1371,8 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
1248 | pkt_len = bdp->cbd_datlen; | 1371 | pkt_len = bdp->cbd_datlen; |
1249 | ndev->stats.rx_bytes += pkt_len; | 1372 | ndev->stats.rx_bytes += pkt_len; |
1250 | 1373 | ||
1251 | index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep); | 1374 | index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); |
1252 | data = fep->rx_skbuff[index]->data; | 1375 | data = rxq->rx_skbuff[index]->data; |
1253 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, | 1376 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, |
1254 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1377 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
1255 | 1378 | ||
@@ -1264,7 +1387,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
1264 | /* If this is a VLAN packet remove the VLAN Tag */ | 1387 | /* If this is a VLAN packet remove the VLAN Tag */ |
1265 | vlan_packet_rcvd = false; | 1388 | vlan_packet_rcvd = false; |
1266 | if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && | 1389 | if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
1267 | fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { | 1390 | fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { |
1268 | /* Push and remove the vlan tag */ | 1391 | /* Push and remove the vlan tag */ |
1269 | struct vlan_hdr *vlan_header = | 1392 | struct vlan_hdr *vlan_header = |
1270 | (struct vlan_hdr *) (data + ETH_HLEN); | 1393 | (struct vlan_hdr *) (data + ETH_HLEN); |
@@ -1292,7 +1415,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
1292 | skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); | 1415 | skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); |
1293 | if (vlan_packet_rcvd) | 1416 | if (vlan_packet_rcvd) |
1294 | payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; | 1417 | payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; |
1295 | skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), | 1418 | skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), |
1296 | data + payload_offset, | 1419 | data + payload_offset, |
1297 | pkt_len - 4 - (2 * ETH_ALEN)); | 1420 | pkt_len - 4 - (2 * ETH_ALEN)); |
1298 | 1421 | ||
@@ -1341,19 +1464,56 @@ rx_processing_done: | |||
1341 | } | 1464 | } |
1342 | 1465 | ||
1343 | /* Update BD pointer to next entry */ | 1466 | /* Update BD pointer to next entry */ |
1344 | bdp = fec_enet_get_nextdesc(bdp, fep); | 1467 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
1345 | 1468 | ||
1346 | /* Doing this here will keep the FEC running while we process | 1469 | /* Doing this here will keep the FEC running while we process |
1347 | * incoming frames. On a heavily loaded network, we should be | 1470 | * incoming frames. On a heavily loaded network, we should be |
1348 | * able to keep up at the expense of system resources. | 1471 | * able to keep up at the expense of system resources. |
1349 | */ | 1472 | */ |
1350 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 1473 | writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); |
1351 | } | 1474 | } |
1352 | fep->cur_rx = bdp; | 1475 | rxq->cur_rx = bdp; |
1476 | return pkt_received; | ||
1477 | } | ||
1478 | |||
1479 | static int | ||
1480 | fec_enet_rx(struct net_device *ndev, int budget) | ||
1481 | { | ||
1482 | int pkt_received = 0; | ||
1483 | u16 queue_id; | ||
1484 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1353 | 1485 | ||
1486 | for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { | ||
1487 | clear_bit(queue_id, &fep->work_rx); | ||
1488 | pkt_received += fec_enet_rx_queue(ndev, | ||
1489 | budget - pkt_received, queue_id); | ||
1490 | } | ||
1354 | return pkt_received; | 1491 | return pkt_received; |
1355 | } | 1492 | } |
1356 | 1493 | ||
1494 | static bool | ||
1495 | fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) | ||
1496 | { | ||
1497 | if (int_events == 0) | ||
1498 | return false; | ||
1499 | |||
1500 | if (int_events & FEC_ENET_RXF) | ||
1501 | fep->work_rx |= (1 << 2); | ||
1502 | if (int_events & FEC_ENET_RXF_1) | ||
1503 | fep->work_rx |= (1 << 0); | ||
1504 | if (int_events & FEC_ENET_RXF_2) | ||
1505 | fep->work_rx |= (1 << 1); | ||
1506 | |||
1507 | if (int_events & FEC_ENET_TXF) | ||
1508 | fep->work_tx |= (1 << 2); | ||
1509 | if (int_events & FEC_ENET_TXF_1) | ||
1510 | fep->work_tx |= (1 << 0); | ||
1511 | if (int_events & FEC_ENET_TXF_2) | ||
1512 | fep->work_tx |= (1 << 1); | ||
1513 | |||
1514 | return true; | ||
1515 | } | ||
1516 | |||
1357 | static irqreturn_t | 1517 | static irqreturn_t |
1358 | fec_enet_interrupt(int irq, void *dev_id) | 1518 | fec_enet_interrupt(int irq, void *dev_id) |
1359 | { | 1519 | { |
@@ -1365,6 +1525,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1365 | 1525 | ||
1366 | int_events = readl(fep->hwp + FEC_IEVENT); | 1526 | int_events = readl(fep->hwp + FEC_IEVENT); |
1367 | writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); | 1527 | writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); |
1528 | fec_enet_collect_events(fep, int_events); | ||
1368 | 1529 | ||
1369 | if (int_events & napi_mask) { | 1530 | if (int_events & napi_mask) { |
1370 | ret = IRQ_HANDLED; | 1531 | ret = IRQ_HANDLED; |
@@ -1621,6 +1782,11 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
1621 | } | 1782 | } |
1622 | mutex_unlock(&fep->ptp_clk_mutex); | 1783 | mutex_unlock(&fep->ptp_clk_mutex); |
1623 | } | 1784 | } |
1785 | if (fep->clk_ref) { | ||
1786 | ret = clk_prepare_enable(fep->clk_ref); | ||
1787 | if (ret) | ||
1788 | goto failed_clk_ref; | ||
1789 | } | ||
1624 | } else { | 1790 | } else { |
1625 | clk_disable_unprepare(fep->clk_ahb); | 1791 | clk_disable_unprepare(fep->clk_ahb); |
1626 | clk_disable_unprepare(fep->clk_ipg); | 1792 | clk_disable_unprepare(fep->clk_ipg); |
@@ -1632,9 +1798,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
1632 | fep->ptp_clk_on = false; | 1798 | fep->ptp_clk_on = false; |
1633 | mutex_unlock(&fep->ptp_clk_mutex); | 1799 | mutex_unlock(&fep->ptp_clk_mutex); |
1634 | } | 1800 | } |
1801 | if (fep->clk_ref) | ||
1802 | clk_disable_unprepare(fep->clk_ref); | ||
1635 | } | 1803 | } |
1636 | 1804 | ||
1637 | return 0; | 1805 | return 0; |
1806 | |||
1807 | failed_clk_ref: | ||
1808 | if (fep->clk_ref) | ||
1809 | clk_disable_unprepare(fep->clk_ref); | ||
1638 | failed_clk_ptp: | 1810 | failed_clk_ptp: |
1639 | if (fep->clk_enet_out) | 1811 | if (fep->clk_enet_out) |
1640 | clk_disable_unprepare(fep->clk_enet_out); | 1812 | clk_disable_unprepare(fep->clk_enet_out); |
@@ -2105,46 +2277,140 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
2105 | unsigned int i; | 2277 | unsigned int i; |
2106 | struct sk_buff *skb; | 2278 | struct sk_buff *skb; |
2107 | struct bufdesc *bdp; | 2279 | struct bufdesc *bdp; |
2280 | struct fec_enet_priv_tx_q *txq; | ||
2281 | struct fec_enet_priv_rx_q *rxq; | ||
2282 | unsigned int q; | ||
2283 | |||
2284 | for (q = 0; q < fep->num_rx_queues; q++) { | ||
2285 | rxq = fep->rx_queue[q]; | ||
2286 | bdp = rxq->rx_bd_base; | ||
2287 | for (i = 0; i < rxq->rx_ring_size; i++) { | ||
2288 | skb = rxq->rx_skbuff[i]; | ||
2289 | rxq->rx_skbuff[i] = NULL; | ||
2290 | if (skb) { | ||
2291 | dma_unmap_single(&fep->pdev->dev, | ||
2292 | bdp->cbd_bufaddr, | ||
2293 | FEC_ENET_RX_FRSIZE, | ||
2294 | DMA_FROM_DEVICE); | ||
2295 | dev_kfree_skb(skb); | ||
2296 | } | ||
2297 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | ||
2298 | } | ||
2299 | } | ||
2108 | 2300 | ||
2109 | bdp = fep->rx_bd_base; | 2301 | for (q = 0; q < fep->num_tx_queues; q++) { |
2110 | for (i = 0; i < fep->rx_ring_size; i++) { | 2302 | txq = fep->tx_queue[q]; |
2111 | skb = fep->rx_skbuff[i]; | 2303 | bdp = txq->tx_bd_base; |
2112 | fep->rx_skbuff[i] = NULL; | 2304 | for (i = 0; i < txq->tx_ring_size; i++) { |
2113 | if (skb) { | 2305 | kfree(txq->tx_bounce[i]); |
2114 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | 2306 | txq->tx_bounce[i] = NULL; |
2115 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 2307 | skb = txq->tx_skbuff[i]; |
2308 | txq->tx_skbuff[i] = NULL; | ||
2116 | dev_kfree_skb(skb); | 2309 | dev_kfree_skb(skb); |
2117 | } | 2310 | } |
2118 | bdp = fec_enet_get_nextdesc(bdp, fep); | ||
2119 | } | 2311 | } |
2312 | } | ||
2313 | |||
2314 | static void fec_enet_free_queue(struct net_device *ndev) | ||
2315 | { | ||
2316 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
2317 | int i; | ||
2318 | struct fec_enet_priv_tx_q *txq; | ||
2319 | |||
2320 | for (i = 0; i < fep->num_tx_queues; i++) | ||
2321 | if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { | ||
2322 | txq = fep->tx_queue[i]; | ||
2323 | dma_free_coherent(NULL, | ||
2324 | txq->tx_ring_size * TSO_HEADER_SIZE, | ||
2325 | txq->tso_hdrs, | ||
2326 | txq->tso_hdrs_dma); | ||
2327 | } | ||
2328 | |||
2329 | for (i = 0; i < fep->num_rx_queues; i++) | ||
2330 | if (fep->rx_queue[i]) | ||
2331 | kfree(fep->rx_queue[i]); | ||
2332 | |||
2333 | for (i = 0; i < fep->num_tx_queues; i++) | ||
2334 | if (fep->tx_queue[i]) | ||
2335 | kfree(fep->tx_queue[i]); | ||
2336 | } | ||
2337 | |||
2338 | static int fec_enet_alloc_queue(struct net_device *ndev) | ||
2339 | { | ||
2340 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
2341 | int i; | ||
2342 | int ret = 0; | ||
2343 | struct fec_enet_priv_tx_q *txq; | ||
2344 | |||
2345 | for (i = 0; i < fep->num_tx_queues; i++) { | ||
2346 | txq = kzalloc(sizeof(*txq), GFP_KERNEL); | ||
2347 | if (!txq) { | ||
2348 | ret = -ENOMEM; | ||
2349 | goto alloc_failed; | ||
2350 | } | ||
2351 | |||
2352 | fep->tx_queue[i] = txq; | ||
2353 | txq->tx_ring_size = TX_RING_SIZE; | ||
2354 | fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; | ||
2355 | |||
2356 | txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; | ||
2357 | txq->tx_wake_threshold = | ||
2358 | (txq->tx_ring_size - txq->tx_stop_threshold) / 2; | ||
2359 | |||
2360 | txq->tso_hdrs = dma_alloc_coherent(NULL, | ||
2361 | txq->tx_ring_size * TSO_HEADER_SIZE, | ||
2362 | &txq->tso_hdrs_dma, | ||
2363 | GFP_KERNEL); | ||
2364 | if (!txq->tso_hdrs) { | ||
2365 | ret = -ENOMEM; | ||
2366 | goto alloc_failed; | ||
2367 | } | ||
2368 | } | ||
2369 | |||
2370 | for (i = 0; i < fep->num_rx_queues; i++) { | ||
2371 | fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), | ||
2372 | GFP_KERNEL); | ||
2373 | if (!fep->rx_queue[i]) { | ||
2374 | ret = -ENOMEM; | ||
2375 | goto alloc_failed; | ||
2376 | } | ||
2120 | 2377 | ||
2121 | bdp = fep->tx_bd_base; | 2378 | fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; |
2122 | for (i = 0; i < fep->tx_ring_size; i++) { | 2379 | fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; |
2123 | kfree(fep->tx_bounce[i]); | ||
2124 | fep->tx_bounce[i] = NULL; | ||
2125 | skb = fep->tx_skbuff[i]; | ||
2126 | fep->tx_skbuff[i] = NULL; | ||
2127 | dev_kfree_skb(skb); | ||
2128 | } | 2380 | } |
2381 | return ret; | ||
2382 | |||
2383 | alloc_failed: | ||
2384 | fec_enet_free_queue(ndev); | ||
2385 | return ret; | ||
2129 | } | 2386 | } |
2130 | 2387 | ||
2131 | static int fec_enet_alloc_buffers(struct net_device *ndev) | 2388 | static int |
2389 | fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) | ||
2132 | { | 2390 | { |
2133 | struct fec_enet_private *fep = netdev_priv(ndev); | 2391 | struct fec_enet_private *fep = netdev_priv(ndev); |
2134 | unsigned int i; | 2392 | unsigned int i; |
2135 | struct sk_buff *skb; | 2393 | struct sk_buff *skb; |
2136 | struct bufdesc *bdp; | 2394 | struct bufdesc *bdp; |
2395 | struct fec_enet_priv_rx_q *rxq; | ||
2396 | unsigned int off; | ||
2137 | 2397 | ||
2138 | bdp = fep->rx_bd_base; | 2398 | rxq = fep->rx_queue[queue]; |
2139 | for (i = 0; i < fep->rx_ring_size; i++) { | 2399 | bdp = rxq->rx_bd_base; |
2400 | for (i = 0; i < rxq->rx_ring_size; i++) { | ||
2140 | dma_addr_t addr; | 2401 | dma_addr_t addr; |
2141 | 2402 | ||
2142 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); | 2403 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); |
2143 | if (!skb) | 2404 | if (!skb) |
2144 | goto err_alloc; | 2405 | goto err_alloc; |
2145 | 2406 | ||
2407 | off = ((unsigned long)skb->data) & fep->rx_align; | ||
2408 | if (off) | ||
2409 | skb_reserve(skb, fep->rx_align + 1 - off); | ||
2410 | |||
2146 | addr = dma_map_single(&fep->pdev->dev, skb->data, | 2411 | addr = dma_map_single(&fep->pdev->dev, skb->data, |
2147 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 2412 | FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); |
2413 | |||
2148 | if (dma_mapping_error(&fep->pdev->dev, addr)) { | 2414 | if (dma_mapping_error(&fep->pdev->dev, addr)) { |
2149 | dev_kfree_skb(skb); | 2415 | dev_kfree_skb(skb); |
2150 | if (net_ratelimit()) | 2416 | if (net_ratelimit()) |
@@ -2152,7 +2418,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
2152 | goto err_alloc; | 2418 | goto err_alloc; |
2153 | } | 2419 | } |
2154 | 2420 | ||
2155 | fep->rx_skbuff[i] = skb; | 2421 | rxq->rx_skbuff[i] = skb; |
2156 | bdp->cbd_bufaddr = addr; | 2422 | bdp->cbd_bufaddr = addr; |
2157 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 2423 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
2158 | 2424 | ||
@@ -2161,17 +2427,32 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
2161 | ebdp->cbd_esc = BD_ENET_RX_INT; | 2427 | ebdp->cbd_esc = BD_ENET_RX_INT; |
2162 | } | 2428 | } |
2163 | 2429 | ||
2164 | bdp = fec_enet_get_nextdesc(bdp, fep); | 2430 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
2165 | } | 2431 | } |
2166 | 2432 | ||
2167 | /* Set the last buffer to wrap. */ | 2433 | /* Set the last buffer to wrap. */ |
2168 | bdp = fec_enet_get_prevdesc(bdp, fep); | 2434 | bdp = fec_enet_get_prevdesc(bdp, fep, queue); |
2169 | bdp->cbd_sc |= BD_SC_WRAP; | 2435 | bdp->cbd_sc |= BD_SC_WRAP; |
2436 | return 0; | ||
2437 | |||
2438 | err_alloc: | ||
2439 | fec_enet_free_buffers(ndev); | ||
2440 | return -ENOMEM; | ||
2441 | } | ||
2170 | 2442 | ||
2171 | bdp = fep->tx_bd_base; | 2443 | static int |
2172 | for (i = 0; i < fep->tx_ring_size; i++) { | 2444 | fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) |
2173 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | 2445 | { |
2174 | if (!fep->tx_bounce[i]) | 2446 | struct fec_enet_private *fep = netdev_priv(ndev); |
2447 | unsigned int i; | ||
2448 | struct bufdesc *bdp; | ||
2449 | struct fec_enet_priv_tx_q *txq; | ||
2450 | |||
2451 | txq = fep->tx_queue[queue]; | ||
2452 | bdp = txq->tx_bd_base; | ||
2453 | for (i = 0; i < txq->tx_ring_size; i++) { | ||
2454 | txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | ||
2455 | if (!txq->tx_bounce[i]) | ||
2175 | goto err_alloc; | 2456 | goto err_alloc; |
2176 | 2457 | ||
2177 | bdp->cbd_sc = 0; | 2458 | bdp->cbd_sc = 0; |
@@ -2182,11 +2463,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
2182 | ebdp->cbd_esc = BD_ENET_TX_INT; | 2463 | ebdp->cbd_esc = BD_ENET_TX_INT; |
2183 | } | 2464 | } |
2184 | 2465 | ||
2185 | bdp = fec_enet_get_nextdesc(bdp, fep); | 2466 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
2186 | } | 2467 | } |
2187 | 2468 | ||
2188 | /* Set the last buffer to wrap. */ | 2469 | /* Set the last buffer to wrap. */ |
2189 | bdp = fec_enet_get_prevdesc(bdp, fep); | 2470 | bdp = fec_enet_get_prevdesc(bdp, fep, queue); |
2190 | bdp->cbd_sc |= BD_SC_WRAP; | 2471 | bdp->cbd_sc |= BD_SC_WRAP; |
2191 | 2472 | ||
2192 | return 0; | 2473 | return 0; |
@@ -2196,6 +2477,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
2196 | return -ENOMEM; | 2477 | return -ENOMEM; |
2197 | } | 2478 | } |
2198 | 2479 | ||
2480 | static int fec_enet_alloc_buffers(struct net_device *ndev) | ||
2481 | { | ||
2482 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
2483 | unsigned int i; | ||
2484 | |||
2485 | for (i = 0; i < fep->num_rx_queues; i++) | ||
2486 | if (fec_enet_alloc_rxq_buffers(ndev, i)) | ||
2487 | return -ENOMEM; | ||
2488 | |||
2489 | for (i = 0; i < fep->num_tx_queues; i++) | ||
2490 | if (fec_enet_alloc_txq_buffers(ndev, i)) | ||
2491 | return -ENOMEM; | ||
2492 | return 0; | ||
2493 | } | ||
2494 | |||
2199 | static int | 2495 | static int |
2200 | fec_enet_open(struct net_device *ndev) | 2496 | fec_enet_open(struct net_device *ndev) |
2201 | { | 2497 | { |
@@ -2225,7 +2521,8 @@ fec_enet_open(struct net_device *ndev) | |||
2225 | fec_restart(ndev); | 2521 | fec_restart(ndev); |
2226 | napi_enable(&fep->napi); | 2522 | napi_enable(&fep->napi); |
2227 | phy_start(fep->phy_dev); | 2523 | phy_start(fep->phy_dev); |
2228 | netif_start_queue(ndev); | 2524 | netif_tx_start_all_queues(ndev); |
2525 | |||
2229 | return 0; | 2526 | return 0; |
2230 | } | 2527 | } |
2231 | 2528 | ||
@@ -2399,7 +2696,7 @@ static int fec_set_features(struct net_device *netdev, | |||
2399 | /* Resume the device after updates */ | 2696 | /* Resume the device after updates */ |
2400 | if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { | 2697 | if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { |
2401 | fec_restart(netdev); | 2698 | fec_restart(netdev); |
2402 | netif_wake_queue(netdev); | 2699 | netif_tx_wake_all_queues(netdev); |
2403 | netif_tx_unlock_bh(netdev); | 2700 | netif_tx_unlock_bh(netdev); |
2404 | napi_enable(&fep->napi); | 2701 | napi_enable(&fep->napi); |
2405 | } | 2702 | } |
@@ -2407,10 +2704,17 @@ static int fec_set_features(struct net_device *netdev, | |||
2407 | return 0; | 2704 | return 0; |
2408 | } | 2705 | } |
2409 | 2706 | ||
2707 | u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, | ||
2708 | void *accel_priv, select_queue_fallback_t fallback) | ||
2709 | { | ||
2710 | return skb_tx_hash(ndev, skb); | ||
2711 | } | ||
2712 | |||
2410 | static const struct net_device_ops fec_netdev_ops = { | 2713 | static const struct net_device_ops fec_netdev_ops = { |
2411 | .ndo_open = fec_enet_open, | 2714 | .ndo_open = fec_enet_open, |
2412 | .ndo_stop = fec_enet_close, | 2715 | .ndo_stop = fec_enet_close, |
2413 | .ndo_start_xmit = fec_enet_start_xmit, | 2716 | .ndo_start_xmit = fec_enet_start_xmit, |
2717 | .ndo_select_queue = fec_enet_select_queue, | ||
2414 | .ndo_set_rx_mode = set_multicast_list, | 2718 | .ndo_set_rx_mode = set_multicast_list, |
2415 | .ndo_change_mtu = eth_change_mtu, | 2719 | .ndo_change_mtu = eth_change_mtu, |
2416 | .ndo_validate_addr = eth_validate_addr, | 2720 | .ndo_validate_addr = eth_validate_addr, |
@@ -2432,39 +2736,38 @@ static int fec_enet_init(struct net_device *ndev) | |||
2432 | struct fec_enet_private *fep = netdev_priv(ndev); | 2736 | struct fec_enet_private *fep = netdev_priv(ndev); |
2433 | const struct platform_device_id *id_entry = | 2737 | const struct platform_device_id *id_entry = |
2434 | platform_get_device_id(fep->pdev); | 2738 | platform_get_device_id(fep->pdev); |
2739 | struct fec_enet_priv_tx_q *txq; | ||
2740 | struct fec_enet_priv_rx_q *rxq; | ||
2435 | struct bufdesc *cbd_base; | 2741 | struct bufdesc *cbd_base; |
2742 | dma_addr_t bd_dma; | ||
2436 | int bd_size; | 2743 | int bd_size; |
2744 | unsigned int i; | ||
2437 | 2745 | ||
2438 | /* init the tx & rx ring size */ | 2746 | #if defined(CONFIG_ARM) |
2439 | fep->tx_ring_size = TX_RING_SIZE; | 2747 | fep->rx_align = 0xf; |
2440 | fep->rx_ring_size = RX_RING_SIZE; | 2748 | fep->tx_align = 0xf; |
2749 | #else | ||
2750 | fep->rx_align = 0x3; | ||
2751 | fep->tx_align = 0x3; | ||
2752 | #endif | ||
2441 | 2753 | ||
2442 | fep->tx_stop_threshold = FEC_MAX_SKB_DESCS; | 2754 | fec_enet_alloc_queue(ndev); |
2443 | fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2; | ||
2444 | 2755 | ||
2445 | if (fep->bufdesc_ex) | 2756 | if (fep->bufdesc_ex) |
2446 | fep->bufdesc_size = sizeof(struct bufdesc_ex); | 2757 | fep->bufdesc_size = sizeof(struct bufdesc_ex); |
2447 | else | 2758 | else |
2448 | fep->bufdesc_size = sizeof(struct bufdesc); | 2759 | fep->bufdesc_size = sizeof(struct bufdesc); |
2449 | bd_size = (fep->tx_ring_size + fep->rx_ring_size) * | 2760 | bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * |
2450 | fep->bufdesc_size; | 2761 | fep->bufdesc_size; |
2451 | 2762 | ||
2452 | /* Allocate memory for buffer descriptors. */ | 2763 | /* Allocate memory for buffer descriptors. */ |
2453 | cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma, | 2764 | cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, |
2454 | GFP_KERNEL); | 2765 | GFP_KERNEL); |
2455 | if (!cbd_base) | 2766 | if (!cbd_base) { |
2456 | return -ENOMEM; | ||
2457 | |||
2458 | fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE, | ||
2459 | &fep->tso_hdrs_dma, GFP_KERNEL); | ||
2460 | if (!fep->tso_hdrs) { | ||
2461 | dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma); | ||
2462 | return -ENOMEM; | 2767 | return -ENOMEM; |
2463 | } | 2768 | } |
2464 | 2769 | ||
2465 | memset(cbd_base, 0, PAGE_SIZE); | 2770 | memset(cbd_base, 0, bd_size); |
2466 | |||
2467 | fep->netdev = ndev; | ||
2468 | 2771 | ||
2469 | /* Get the Ethernet address */ | 2772 | /* Get the Ethernet address */ |
2470 | fec_get_mac(ndev); | 2773 | fec_get_mac(ndev); |
@@ -2472,12 +2775,36 @@ static int fec_enet_init(struct net_device *ndev) | |||
2472 | fec_set_mac_address(ndev, NULL); | 2775 | fec_set_mac_address(ndev, NULL); |
2473 | 2776 | ||
2474 | /* Set receive and transmit descriptor base. */ | 2777 | /* Set receive and transmit descriptor base. */ |
2475 | fep->rx_bd_base = cbd_base; | 2778 | for (i = 0; i < fep->num_rx_queues; i++) { |
2476 | if (fep->bufdesc_ex) | 2779 | rxq = fep->rx_queue[i]; |
2477 | fep->tx_bd_base = (struct bufdesc *) | 2780 | rxq->index = i; |
2478 | (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); | 2781 | rxq->rx_bd_base = (struct bufdesc *)cbd_base; |
2479 | else | 2782 | rxq->bd_dma = bd_dma; |
2480 | fep->tx_bd_base = cbd_base + fep->rx_ring_size; | 2783 | if (fep->bufdesc_ex) { |
2784 | bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; | ||
2785 | cbd_base = (struct bufdesc *) | ||
2786 | (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); | ||
2787 | } else { | ||
2788 | bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; | ||
2789 | cbd_base += rxq->rx_ring_size; | ||
2790 | } | ||
2791 | } | ||
2792 | |||
2793 | for (i = 0; i < fep->num_tx_queues; i++) { | ||
2794 | txq = fep->tx_queue[i]; | ||
2795 | txq->index = i; | ||
2796 | txq->tx_bd_base = (struct bufdesc *)cbd_base; | ||
2797 | txq->bd_dma = bd_dma; | ||
2798 | if (fep->bufdesc_ex) { | ||
2799 | bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; | ||
2800 | cbd_base = (struct bufdesc *) | ||
2801 | (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); | ||
2802 | } else { | ||
2803 | bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; | ||
2804 | cbd_base += txq->tx_ring_size; | ||
2805 | } | ||
2806 | } | ||
2807 | |||
2481 | 2808 | ||
2482 | /* The FEC Ethernet specific entries in the device structure */ | 2809 | /* The FEC Ethernet specific entries in the device structure */ |
2483 | ndev->watchdog_timeo = TX_TIMEOUT; | 2810 | ndev->watchdog_timeo = TX_TIMEOUT; |
@@ -2500,6 +2827,11 @@ static int fec_enet_init(struct net_device *ndev) | |||
2500 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | 2827 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
2501 | } | 2828 | } |
2502 | 2829 | ||
2830 | if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { | ||
2831 | fep->tx_align = 0; | ||
2832 | fep->rx_align = 0x3f; | ||
2833 | } | ||
2834 | |||
2503 | ndev->hw_features = ndev->features; | 2835 | ndev->hw_features = ndev->features; |
2504 | 2836 | ||
2505 | fec_restart(ndev); | 2837 | fec_restart(ndev); |
@@ -2545,6 +2877,42 @@ static void fec_reset_phy(struct platform_device *pdev) | |||
2545 | } | 2877 | } |
2546 | #endif /* CONFIG_OF */ | 2878 | #endif /* CONFIG_OF */ |
2547 | 2879 | ||
2880 | static void | ||
2881 | fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) | ||
2882 | { | ||
2883 | struct device_node *np = pdev->dev.of_node; | ||
2884 | int err; | ||
2885 | |||
2886 | *num_tx = *num_rx = 1; | ||
2887 | |||
2888 | if (!np || !of_device_is_available(np)) | ||
2889 | return; | ||
2890 | |||
2891 | /* parse the num of tx and rx queues */ | ||
2892 | err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); | ||
2893 | err |= of_property_read_u32(np, "fsl,num-rx-queues", num_rx); | ||
2894 | if (err) { | ||
2895 | *num_tx = 1; | ||
2896 | *num_rx = 1; | ||
2897 | return; | ||
2898 | } | ||
2899 | |||
2900 | if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { | ||
2901 | dev_err(&pdev->dev, "Invalidate num_tx(=%d), fail back to 1\n", | ||
2902 | *num_tx); | ||
2903 | *num_tx = 1; | ||
2904 | return; | ||
2905 | } | ||
2906 | |||
2907 | if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { | ||
2908 | dev_err(&pdev->dev, "Invalidate num_rx(=%d), fail back to 1\n", | ||
2909 | *num_rx); | ||
2910 | *num_rx = 1; | ||
2911 | return; | ||
2912 | } | ||
2913 | |||
2914 | } | ||
2915 | |||
2548 | static int | 2916 | static int |
2549 | fec_probe(struct platform_device *pdev) | 2917 | fec_probe(struct platform_device *pdev) |
2550 | { | 2918 | { |
@@ -2556,13 +2924,18 @@ fec_probe(struct platform_device *pdev) | |||
2556 | const struct of_device_id *of_id; | 2924 | const struct of_device_id *of_id; |
2557 | static int dev_id; | 2925 | static int dev_id; |
2558 | struct device_node *np = pdev->dev.of_node, *phy_node; | 2926 | struct device_node *np = pdev->dev.of_node, *phy_node; |
2927 | int num_tx_qs = 1; | ||
2928 | int num_rx_qs = 1; | ||
2559 | 2929 | ||
2560 | of_id = of_match_device(fec_dt_ids, &pdev->dev); | 2930 | of_id = of_match_device(fec_dt_ids, &pdev->dev); |
2561 | if (of_id) | 2931 | if (of_id) |
2562 | pdev->id_entry = of_id->data; | 2932 | pdev->id_entry = of_id->data; |
2563 | 2933 | ||
2934 | fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); | ||
2935 | |||
2564 | /* Init network device */ | 2936 | /* Init network device */ |
2565 | ndev = alloc_etherdev(sizeof(struct fec_enet_private)); | 2937 | ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), |
2938 | num_tx_qs, num_rx_qs); | ||
2566 | if (!ndev) | 2939 | if (!ndev) |
2567 | return -ENOMEM; | 2940 | return -ENOMEM; |
2568 | 2941 | ||
@@ -2571,6 +2944,9 @@ fec_probe(struct platform_device *pdev) | |||
2571 | /* setup board info structure */ | 2944 | /* setup board info structure */ |
2572 | fep = netdev_priv(ndev); | 2945 | fep = netdev_priv(ndev); |
2573 | 2946 | ||
2947 | fep->num_rx_queues = num_rx_qs; | ||
2948 | fep->num_tx_queues = num_tx_qs; | ||
2949 | |||
2574 | #if !defined(CONFIG_M5272) | 2950 | #if !defined(CONFIG_M5272) |
2575 | /* default enable pause frame auto negotiation */ | 2951 | /* default enable pause frame auto negotiation */ |
2576 | if (pdev->id_entry && | 2952 | if (pdev->id_entry && |
@@ -2637,6 +3013,12 @@ fec_probe(struct platform_device *pdev) | |||
2637 | 3013 | ||
2638 | fep->ptp_clk_on = false; | 3014 | fep->ptp_clk_on = false; |
2639 | mutex_init(&fep->ptp_clk_mutex); | 3015 | mutex_init(&fep->ptp_clk_mutex); |
3016 | |||
3017 | /* clk_ref is optional, depends on board */ | ||
3018 | fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); | ||
3019 | if (IS_ERR(fep->clk_ref)) | ||
3020 | fep->clk_ref = NULL; | ||
3021 | |||
2640 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); | 3022 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); |
2641 | fep->bufdesc_ex = | 3023 | fep->bufdesc_ex = |
2642 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; | 3024 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; |
@@ -2684,6 +3066,7 @@ fec_probe(struct platform_device *pdev) | |||
2684 | goto failed_irq; | 3066 | goto failed_irq; |
2685 | } | 3067 | } |
2686 | 3068 | ||
3069 | init_completion(&fep->mdio_done); | ||
2687 | ret = fec_enet_mii_init(pdev); | 3070 | ret = fec_enet_mii_init(pdev); |
2688 | if (ret) | 3071 | if (ret) |
2689 | goto failed_mii_init; | 3072 | goto failed_mii_init; |