aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2013-03-26 00:43:05 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-26 12:53:36 -0400
commit4a7d666a7202744af32d4da31fb52857b7d86850 (patch)
tree37aaa3fc4b8aba60ff1d9557a9662e7d30a96c4d
parentad999eee669d6a0439f5b9734e87eed50e776e32 (diff)
stmmac: reorganize chain/ring modes removing Koptions
Previously we had two Koptions to decide if the stmmac had to use either a ring or a chain to manage its descriptors. This patch removes the Kernel configuration options and it allow us to use the chain mode by passing a module option. Ring mode continues to be the default. Also with this patch, it will be easier to validate the driver built and guarantee that all the two modes always compile fine. Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c94
10 files changed, 169 insertions, 137 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index c0ea838c78d1..f0720d0d5771 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -54,22 +54,4 @@ config STMMAC_DA
54 By default, the DMA arbitration scheme is based on Round-robin 54 By default, the DMA arbitration scheme is based on Round-robin
55 (rx:tx priority is 1:1). 55 (rx:tx priority is 1:1).
56 56
57choice
58 prompt "Select the DMA TX/RX descriptor operating modes"
59 depends on STMMAC_ETH
60 ---help---
61 This driver supports DMA descriptor to operate both in dual buffer
62 (RING) and linked-list(CHAINED) mode. In RING mode each descriptor
63 points to two data buffer pointers whereas in CHAINED mode they
64 points to only one data buffer pointer.
65
66config STMMAC_RING
67 bool "Enable Descriptor Ring Mode"
68
69config STMMAC_CHAINED
70 bool "Enable Descriptor Chained Mode"
71
72endchoice
73
74
75endif 57endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c8e8ea60ac19..ae995a367c91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,9 +1,7 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o 1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
3stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
4stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 2stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
5stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
6stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 4stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
7 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 5 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 6 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
9 mmc_core.o $(stmmac-y) 7 mmc_core.o $(stmmac-y)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 0668659803ed..08ff51e9c791 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,7 +28,7 @@
28 28
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *) p; 33 struct stmmac_priv *priv = (struct stmmac_priv *) p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,7 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
47 47
48 desc->des2 = dma_map_single(priv->device, skb->data, 48 desc->des2 = dma_map_single(priv->device, skb->data,
49 bmax, DMA_TO_DEVICE); 49 bmax, DMA_TO_DEVICE);
50 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum); 50 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
51 51
52 while (len != 0) { 52 while (len != 0) {
53 entry = (++priv->cur_tx) % txsize; 53 entry = (++priv->cur_tx) % txsize;
@@ -57,8 +57,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
57 desc->des2 = dma_map_single(priv->device, 57 desc->des2 = dma_map_single(priv->device,
58 (skb->data + bmax * i), 58 (skb->data + bmax * i),
59 bmax, DMA_TO_DEVICE); 59 bmax, DMA_TO_DEVICE);
60 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, 60 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
61 csum); 61 STMMAC_CHAIN_MODE);
62 priv->hw->desc->set_tx_owner(desc); 62 priv->hw->desc->set_tx_owner(desc);
63 priv->tx_skbuff[entry] = NULL; 63 priv->tx_skbuff[entry] = NULL;
64 len -= bmax; 64 len -= bmax;
@@ -67,8 +67,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
67 desc->des2 = dma_map_single(priv->device, 67 desc->des2 = dma_map_single(priv->device,
68 (skb->data + bmax * i), len, 68 (skb->data + bmax * i), len,
69 DMA_TO_DEVICE); 69 DMA_TO_DEVICE);
70 priv->hw->desc->prepare_tx_desc(desc, 0, len, 70 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
71 csum); 71 STMMAC_CHAIN_MODE);
72 priv->hw->desc->set_tx_owner(desc); 72 priv->hw->desc->set_tx_owner(desc);
73 priv->tx_skbuff[entry] = NULL; 73 priv->tx_skbuff[entry] = NULL;
74 len = 0; 74 len = 0;
@@ -89,18 +89,6 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
89 return ret; 89 return ret;
90} 90}
91 91
92static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
93{
94}
95
96static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
97{
98}
99
100static void stmmac_clean_desc3(struct dma_desc *p)
101{
102}
103
104static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, 92static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
105 unsigned int size) 93 unsigned int size)
106{ 94{
@@ -120,18 +108,8 @@ static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
120 p->des3 = (unsigned int)phy_addr; 108 p->des3 = (unsigned int)phy_addr;
121} 109}
122 110
123static int stmmac_set_16kib_bfsize(int mtu) 111const struct stmmac_chain_mode_ops chain_mode_ops = {
124{
125 /* Not supported */
126 return 0;
127}
128
129const struct stmmac_ring_mode_ops ring_mode_ops = {
130 .is_jumbo_frm = stmmac_is_jumbo_frm, 112 .is_jumbo_frm = stmmac_is_jumbo_frm,
131 .jumbo_frm = stmmac_jumbo_frm, 113 .jumbo_frm = stmmac_jumbo_frm,
132 .refill_desc3 = stmmac_refill_desc3,
133 .init_desc3 = stmmac_init_desc3,
134 .init_dma_chain = stmmac_init_dma_chain, 114 .init_dma_chain = stmmac_init_dma_chain,
135 .clean_desc3 = stmmac_clean_desc3,
136 .set_16kib_bfsize = stmmac_set_16kib_bfsize,
137}; 115};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 186d14806122..a29553211dee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -255,23 +255,27 @@ struct dma_features {
255#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 255#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
256#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 256#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
257 257
258#define STMMAC_CHAIN_MODE 0x1
259#define STMMAC_RING_MODE 0x2
260
258struct stmmac_desc_ops { 261struct stmmac_desc_ops {
259 /* DMA RX descriptor ring initialization */ 262 /* DMA RX descriptor ring initialization */
260 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 263 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
261 int disable_rx_ic); 264 int disable_rx_ic, int mode);
262 /* DMA TX descriptor ring initialization */ 265 /* DMA TX descriptor ring initialization */
263 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); 266 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size,
267 int mode);
264 268
265 /* Invoked by the xmit function to prepare the tx descriptor */ 269 /* Invoked by the xmit function to prepare the tx descriptor */
266 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, 270 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
267 int csum_flag); 271 int csum_flag, int mode);
268 /* Set/get the owner of the descriptor */ 272 /* Set/get the owner of the descriptor */
269 void (*set_tx_owner) (struct dma_desc *p); 273 void (*set_tx_owner) (struct dma_desc *p);
270 int (*get_tx_owner) (struct dma_desc *p); 274 int (*get_tx_owner) (struct dma_desc *p);
271 /* Invoked by the xmit function to close the tx descriptor */ 275 /* Invoked by the xmit function to close the tx descriptor */
272 void (*close_tx_desc) (struct dma_desc *p); 276 void (*close_tx_desc) (struct dma_desc *p);
273 /* Clean the tx descriptor as soon as the tx irq is received */ 277 /* Clean the tx descriptor as soon as the tx irq is received */
274 void (*release_tx_desc) (struct dma_desc *p); 278 void (*release_tx_desc) (struct dma_desc *p, int mode);
275 /* Clear interrupt on tx frame completion. When this bit is 279 /* Clear interrupt on tx frame completion. When this bit is
276 * set an interrupt happens as soon as the frame is transmitted */ 280 * set an interrupt happens as soon as the frame is transmitted */
277 void (*clear_tx_ic) (struct dma_desc *p); 281 void (*clear_tx_ic) (struct dma_desc *p);
@@ -361,18 +365,24 @@ struct stmmac_ring_mode_ops {
361 unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 365 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
362 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); 366 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
363 void (*refill_desc3) (int bfsize, struct dma_desc *p); 367 void (*refill_desc3) (int bfsize, struct dma_desc *p);
364 void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p); 368 void (*init_desc3) (struct dma_desc *p);
365 void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
366 unsigned int size);
367 void (*clean_desc3) (struct dma_desc *p); 369 void (*clean_desc3) (struct dma_desc *p);
368 int (*set_16kib_bfsize) (int mtu); 370 int (*set_16kib_bfsize) (int mtu);
369}; 371};
370 372
373struct stmmac_chain_mode_ops {
374 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
375 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
376 void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
377 unsigned int size);
378};
379
371struct mac_device_info { 380struct mac_device_info {
372 const struct stmmac_ops *mac; 381 const struct stmmac_ops *mac;
373 const struct stmmac_desc_ops *desc; 382 const struct stmmac_desc_ops *desc;
374 const struct stmmac_dma_ops *dma; 383 const struct stmmac_dma_ops *dma;
375 const struct stmmac_ring_mode_ops *ring; 384 const struct stmmac_ring_mode_ops *ring;
385 const struct stmmac_chain_mode_ops *chain;
376 struct mii_regs mii; /* MII register Addresses */ 386 struct mii_regs mii; /* MII register Addresses */
377 struct mac_link link; 387 struct mac_link link;
378 unsigned int synopsys_uid; 388 unsigned int synopsys_uid;
@@ -390,5 +400,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
390 400
391extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 401extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
392extern const struct stmmac_ring_mode_ops ring_mode_ops; 402extern const struct stmmac_ring_mode_ops ring_mode_ops;
403extern const struct stmmac_chain_mode_ops chain_mode_ops;
393 404
394#endif /* __COMMON_H__ */ 405#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 7ee9499a6e38..20f83fc9cf13 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -30,26 +30,28 @@
30#ifndef __DESC_COM_H__ 30#ifndef __DESC_COM_H__
31#define __DESC_COM_H__ 31#define __DESC_COM_H__
32 32
33#if defined(CONFIG_STMMAC_RING) 33/* Specific functions used for Ring mode */
34static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 34
35/* Enhanced descriptors */
36static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
35{ 37{
36 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; 38 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
37 if (end) 39 if (end)
38 p->des01.erx.end_ring = 1; 40 p->des01.erx.end_ring = 1;
39} 41}
40 42
41static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) 43static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
42{ 44{
43 if (end) 45 if (end)
44 p->des01.etx.end_ring = 1; 46 p->des01.etx.end_ring = 1;
45} 47}
46 48
47static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) 49static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
48{ 50{
49 p->des01.etx.end_ring = ter; 51 p->des01.etx.end_ring = ter;
50} 52}
51 53
52static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) 54static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
53{ 55{
54 if (unlikely(len > BUF_SIZE_4KiB)) { 56 if (unlikely(len > BUF_SIZE_4KiB)) {
55 p->des01.etx.buffer1_size = BUF_SIZE_4KiB; 57 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
@@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
58 p->des01.etx.buffer1_size = len; 60 p->des01.etx.buffer1_size = len;
59} 61}
60 62
61static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) 63/* Normal descriptors */
64static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
62{ 65{
63 p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; 66 p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
64 if (end) 67 if (end)
65 p->des01.rx.end_ring = 1; 68 p->des01.rx.end_ring = 1;
66} 69}
67 70
68static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end) 71static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
69{ 72{
70 if (end) 73 if (end)
71 p->des01.tx.end_ring = 1; 74 p->des01.tx.end_ring = 1;
72} 75}
73 76
74static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) 77static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
75{ 78{
76 p->des01.tx.end_ring = ter; 79 p->des01.tx.end_ring = ter;
77} 80}
78 81
79static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) 82static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
80{ 83{
81 if (unlikely(len > BUF_SIZE_2KiB)) { 84 if (unlikely(len > BUF_SIZE_2KiB)) {
82 p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; 85 p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
@@ -85,47 +88,48 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
85 p->des01.tx.buffer1_size = len; 88 p->des01.tx.buffer1_size = len;
86} 89}
87 90
88#else 91/* Specific functions used for Chain mode */
89 92
90static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 93/* Enhanced descriptors */
94static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
91{ 95{
92 p->des01.erx.second_address_chained = 1; 96 p->des01.erx.second_address_chained = 1;
93} 97}
94 98
95static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) 99static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
96{ 100{
97 p->des01.etx.second_address_chained = 1; 101 p->des01.etx.second_address_chained = 1;
98} 102}
99 103
100static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) 104static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
101{ 105{
102 p->des01.etx.second_address_chained = 1; 106 p->des01.etx.second_address_chained = 1;
103} 107}
104 108
105static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) 109static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
106{ 110{
107 p->des01.etx.buffer1_size = len; 111 p->des01.etx.buffer1_size = len;
108} 112}
109 113
110static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) 114/* Normal descriptors */
115static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
111{ 116{
112 p->des01.rx.second_address_chained = 1; 117 p->des01.rx.second_address_chained = 1;
113} 118}
114 119
115static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size) 120static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int
121 ring_size)
116{ 122{
117 p->des01.tx.second_address_chained = 1; 123 p->des01.tx.second_address_chained = 1;
118} 124}
119 125
120static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) 126static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
121{ 127{
122 p->des01.tx.second_address_chained = 1; 128 p->des01.tx.second_address_chained = 1;
123} 129}
124 130
125static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) 131static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
126{ 132{
127 p->des01.tx.buffer1_size = len; 133 p->des01.tx.buffer1_size = len;
128} 134}
129#endif
130
131#endif /* __DESC_COM_H__ */ 135#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2fc8ef95f97a..62f9f4e100fd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -229,14 +229,17 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
229} 229}
230 230
231static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 231static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
232 int disable_rx_ic) 232 int disable_rx_ic, int mode)
233{ 233{
234 int i; 234 int i;
235 for (i = 0; i < ring_size; i++) { 235 for (i = 0; i < ring_size; i++) {
236 p->des01.erx.own = 1; 236 p->des01.erx.own = 1;
237 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; 237 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
238 238
239 ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 239 if (mode == STMMAC_CHAIN_MODE)
240 ehn_desc_rx_set_on_chain(p, (i == ring_size - 1));
241 else
242 ehn_desc_rx_set_on_ring(p, (i == ring_size - 1));
240 243
241 if (disable_rx_ic) 244 if (disable_rx_ic)
242 p->des01.erx.disable_ic = 1; 245 p->des01.erx.disable_ic = 1;
@@ -244,13 +247,17 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
244 } 247 }
245} 248}
246 249
247static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 250static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size,
251 int mode)
248{ 252{
249 int i; 253 int i;
250 254
251 for (i = 0; i < ring_size; i++) { 255 for (i = 0; i < ring_size; i++) {
252 p->des01.etx.own = 0; 256 p->des01.etx.own = 0;
253 ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1)); 257 if (mode == STMMAC_CHAIN_MODE)
258 ehn_desc_tx_set_on_chain(p, (i == ring_size - 1));
259 else
260 ehn_desc_tx_set_on_ring(p, (i == ring_size - 1));
254 p++; 261 p++;
255 } 262 }
256} 263}
@@ -280,20 +287,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p)
280 return p->des01.etx.last_segment; 287 return p->des01.etx.last_segment;
281} 288}
282 289
283static void enh_desc_release_tx_desc(struct dma_desc *p) 290static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
284{ 291{
285 int ter = p->des01.etx.end_ring; 292 int ter = p->des01.etx.end_ring;
286 293
287 memset(p, 0, offsetof(struct dma_desc, des2)); 294 memset(p, 0, offsetof(struct dma_desc, des2));
288 enh_desc_end_tx_desc(p, ter); 295 if (mode == STMMAC_CHAIN_MODE)
296 enh_desc_end_tx_desc_on_chain(p, ter);
297 else
298 enh_desc_end_tx_desc_on_ring(p, ter);
289} 299}
290 300
291static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 301static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
292 int csum_flag) 302 int csum_flag, int mode)
293{ 303{
294 p->des01.etx.first_segment = is_fs; 304 p->des01.etx.first_segment = is_fs;
295 305
296 enh_set_tx_desc_len(p, len); 306 if (mode == STMMAC_CHAIN_MODE)
307 enh_set_tx_desc_len_on_chain(p, len);
308 else
309 enh_set_tx_desc_len_on_ring(p, len);
297 310
298 if (likely(csum_flag)) 311 if (likely(csum_flag))
299 p->des01.etx.checksum_insertion = cic_full; 312 p->des01.etx.checksum_insertion = cic_full;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 68962c549a2d..88df0b48e35b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -123,14 +123,17 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
123} 123}
124 124
125static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 125static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
126 int disable_rx_ic) 126 int disable_rx_ic, int mode)
127{ 127{
128 int i; 128 int i;
129 for (i = 0; i < ring_size; i++) { 129 for (i = 0; i < ring_size; i++) {
130 p->des01.rx.own = 1; 130 p->des01.rx.own = 1;
131 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; 131 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
132 132
133 ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 133 if (mode == STMMAC_CHAIN_MODE)
134 ndesc_rx_set_on_chain(p, (i == ring_size - 1));
135 else
136 ndesc_rx_set_on_ring(p, (i == ring_size - 1));
134 137
135 if (disable_rx_ic) 138 if (disable_rx_ic)
136 p->des01.rx.disable_ic = 1; 139 p->des01.rx.disable_ic = 1;
@@ -138,12 +141,16 @@ static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
138 } 141 }
139} 142}
140 143
141static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 144static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size,
145 int mode)
142{ 146{
143 int i; 147 int i;
144 for (i = 0; i < ring_size; i++) { 148 for (i = 0; i < ring_size; i++) {
145 p->des01.tx.own = 0; 149 p->des01.tx.own = 0;
146 ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1))); 150 if (mode == STMMAC_CHAIN_MODE)
151 ndesc_tx_set_on_chain(p, (i == (ring_size - 1)));
152 else
153 ndesc_tx_set_on_ring(p, (i == (ring_size - 1)));
147 p++; 154 p++;
148 } 155 }
149} 156}
@@ -173,19 +180,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p)
173 return p->des01.tx.last_segment; 180 return p->des01.tx.last_segment;
174} 181}
175 182
176static void ndesc_release_tx_desc(struct dma_desc *p) 183static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
177{ 184{
178 int ter = p->des01.tx.end_ring; 185 int ter = p->des01.tx.end_ring;
179 186
180 memset(p, 0, offsetof(struct dma_desc, des2)); 187 memset(p, 0, offsetof(struct dma_desc, des2));
181 ndesc_end_tx_desc(p, ter); 188 if (mode == STMMAC_CHAIN_MODE)
189 ndesc_end_tx_desc_on_chain(p, ter);
190 else
191 ndesc_end_tx_desc_on_ring(p, ter);
182} 192}
183 193
184static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 194static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
185 int csum_flag) 195 int csum_flag, int mode)
186{ 196{
187 p->des01.tx.first_segment = is_fs; 197 p->des01.tx.first_segment = is_fs;
188 norm_set_tx_desc_len(p, len); 198 if (mode == STMMAC_CHAIN_MODE)
199 norm_set_tx_desc_len_on_chain(p, len);
200 else
201 norm_set_tx_desc_len_on_ring(p, len);
189 202
190 if (likely(csum_flag)) 203 if (likely(csum_flag))
191 p->des01.tx.checksum_insertion = cic_full; 204 p->des01.tx.checksum_insertion = cic_full;
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 4b785e10f2ed..8a5e661f4fda 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -49,8 +49,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
49 desc->des2 = dma_map_single(priv->device, skb->data, 49 desc->des2 = dma_map_single(priv->device, skb->data,
50 bmax, DMA_TO_DEVICE); 50 bmax, DMA_TO_DEVICE);
51 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 51 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, 52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
53 csum); 53 STMMAC_RING_MODE);
54 wmb(); 54 wmb();
55 entry = (++priv->cur_tx) % txsize; 55 entry = (++priv->cur_tx) % txsize;
56 desc = priv->dma_tx + entry; 56 desc = priv->dma_tx + entry;
@@ -58,7 +58,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
58 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 58 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
59 len, DMA_TO_DEVICE); 59 len, DMA_TO_DEVICE);
60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 60 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); 61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
62 STMMAC_RING_MODE);
62 wmb(); 63 wmb();
63 priv->hw->desc->set_tx_owner(desc); 64 priv->hw->desc->set_tx_owner(desc);
64 priv->tx_skbuff[entry] = NULL; 65 priv->tx_skbuff[entry] = NULL;
@@ -66,7 +67,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
66 desc->des2 = dma_map_single(priv->device, skb->data, 67 desc->des2 = dma_map_single(priv->device, skb->data,
67 nopaged_len, DMA_TO_DEVICE); 68 nopaged_len, DMA_TO_DEVICE);
68 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 69 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
69 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum); 70 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
71 STMMAC_RING_MODE);
70 } 72 }
71 73
72 return entry; 74 return entry;
@@ -89,17 +91,10 @@ static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
89 p->des3 = p->des2 + BUF_SIZE_8KiB; 91 p->des3 = p->des2 + BUF_SIZE_8KiB;
90} 92}
91 93
92/* In ring mode we need to fill the desc3 because it is used 94/* In ring mode we need to fill the desc3 because it is used as buffer */
93 * as buffer */ 95static void stmmac_init_desc3(struct dma_desc *p)
94static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
95{
96 if (unlikely(des3_as_data_buf))
97 p->des3 = p->des2 + BUF_SIZE_8KiB;
98}
99
100static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
101 unsigned int size)
102{ 96{
97 p->des3 = p->des2 + BUF_SIZE_8KiB;
103} 98}
104 99
105static void stmmac_clean_desc3(struct dma_desc *p) 100static void stmmac_clean_desc3(struct dma_desc *p)
@@ -121,7 +116,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = {
121 .jumbo_frm = stmmac_jumbo_frm, 116 .jumbo_frm = stmmac_jumbo_frm,
122 .refill_desc3 = stmmac_refill_desc3, 117 .refill_desc3 = stmmac_refill_desc3,
123 .init_desc3 = stmmac_init_desc3, 118 .init_desc3 = stmmac_init_desc3,
124 .init_dma_chain = stmmac_init_dma_chain,
125 .clean_desc3 = stmmac_clean_desc3, 119 .clean_desc3 = stmmac_clean_desc3,
126 .set_16kib_bfsize = stmmac_set_16kib_bfsize, 120 .set_16kib_bfsize = stmmac_set_16kib_bfsize,
127}; 121};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b05df8983be5..e5f2f333616b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -93,6 +93,7 @@ struct stmmac_priv {
93 u32 tx_coal_timer; 93 u32 tx_coal_timer;
94 int use_riwt; 94 int use_riwt;
95 u32 rx_riwt; 95 u32 rx_riwt;
96 unsigned int mode;
96}; 97};
97 98
98extern int phyaddr; 99extern int phyaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d02b446037d7..bbee6b32ed63 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -130,6 +130,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR);
130MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 130MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
131#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) 131#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
132 132
133/* By default the driver will use the ring mode to manage tx and rx descriptors
134 * but passing this value so user can force to use the chain instead of the ring
135 */
136static unsigned int chain_mode;
137module_param(chain_mode, int, S_IRUGO);
138MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
139
133static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 140static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
134 141
135#ifdef CONFIG_STMMAC_DEBUG_FS 142#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -514,17 +521,15 @@ static void init_dma_desc_rings(struct net_device *dev)
514 struct sk_buff *skb; 521 struct sk_buff *skb;
515 unsigned int txsize = priv->dma_tx_size; 522 unsigned int txsize = priv->dma_tx_size;
516 unsigned int rxsize = priv->dma_rx_size; 523 unsigned int rxsize = priv->dma_rx_size;
517 unsigned int bfsize; 524 unsigned int bfsize = 0;
518 int dis_ic = 0; 525 int dis_ic = 0;
519 int des3_as_data_buf = 0;
520 526
521 /* Set the max buffer size according to the DESC mode 527 /* Set the max buffer size according to the DESC mode
522 * and the MTU. Note that RING mode allows 16KiB bsize. */ 528 * and the MTU. Note that RING mode allows 16KiB bsize. */
523 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); 529 if (priv->mode == STMMAC_RING_MODE)
530 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
524 531
525 if (bfsize == BUF_SIZE_16KiB) 532 if (bfsize < BUF_SIZE_16KiB)
526 des3_as_data_buf = 1;
527 else
528 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 533 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
529 534
530 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 535 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
@@ -571,7 +576,9 @@ static void init_dma_desc_rings(struct net_device *dev)
571 576
572 p->des2 = priv->rx_skbuff_dma[i]; 577 p->des2 = priv->rx_skbuff_dma[i];
573 578
574 priv->hw->ring->init_desc3(des3_as_data_buf, p); 579 if ((priv->mode == STMMAC_RING_MODE) &&
580 (bfsize == BUF_SIZE_16KiB))
581 priv->hw->ring->init_desc3(p);
575 582
576 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 583 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
577 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 584 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
@@ -589,17 +596,20 @@ static void init_dma_desc_rings(struct net_device *dev)
589 596
590 /* In case of Chained mode this sets the des3 to the next 597 /* In case of Chained mode this sets the des3 to the next
591 * element in the chain */ 598 * element in the chain */
592 priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize); 599 if (priv->mode == STMMAC_CHAIN_MODE) {
593 priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize); 600 priv->hw->chain->init_dma_chain(priv->dma_rx, priv->dma_rx_phy,
594 601 rxsize);
602 priv->hw->chain->init_dma_chain(priv->dma_tx, priv->dma_tx_phy,
603 txsize);
604 }
595 priv->dirty_tx = 0; 605 priv->dirty_tx = 0;
596 priv->cur_tx = 0; 606 priv->cur_tx = 0;
597 607
598 if (priv->use_riwt) 608 if (priv->use_riwt)
599 dis_ic = 1; 609 dis_ic = 1;
600 /* Clear the Rx/Tx descriptors */ 610 /* Clear the Rx/Tx descriptors */
601 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic); 611 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic, priv->mode);
602 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize); 612 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize, priv->mode);
603 613
604 if (netif_msg_hw(priv)) { 614 if (netif_msg_hw(priv)) {
605 pr_info("RX descriptor ring:\n"); 615 pr_info("RX descriptor ring:\n");
@@ -726,14 +736,15 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
726 dma_unmap_single(priv->device, p->des2, 736 dma_unmap_single(priv->device, p->des2,
727 priv->hw->desc->get_tx_len(p), 737 priv->hw->desc->get_tx_len(p),
728 DMA_TO_DEVICE); 738 DMA_TO_DEVICE);
729 priv->hw->ring->clean_desc3(p); 739 if (priv->mode == STMMAC_RING_MODE)
740 priv->hw->ring->clean_desc3(p);
730 741
731 if (likely(skb != NULL)) { 742 if (likely(skb != NULL)) {
732 dev_kfree_skb(skb); 743 dev_kfree_skb(skb);
733 priv->tx_skbuff[entry] = NULL; 744 priv->tx_skbuff[entry] = NULL;
734 } 745 }
735 746
736 priv->hw->desc->release_tx_desc(p); 747 priv->hw->desc->release_tx_desc(p, priv->mode);
737 748
738 priv->dirty_tx++; 749 priv->dirty_tx++;
739 } 750 }
@@ -778,7 +789,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
778 789
779 priv->hw->dma->stop_tx(priv->ioaddr); 790 priv->hw->dma->stop_tx(priv->ioaddr);
780 dma_free_tx_skbufs(priv); 791 dma_free_tx_skbufs(priv);
781 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 792 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size,
793 priv->mode);
782 priv->dirty_tx = 0; 794 priv->dirty_tx = 0;
783 priv->cur_tx = 0; 795 priv->cur_tx = 0;
784 priv->hw->dma->start_tx(priv->ioaddr); 796 priv->hw->dma->start_tx(priv->ioaddr);
@@ -1190,7 +1202,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1190 struct stmmac_priv *priv = netdev_priv(dev); 1202 struct stmmac_priv *priv = netdev_priv(dev);
1191 unsigned int txsize = priv->dma_tx_size; 1203 unsigned int txsize = priv->dma_tx_size;
1192 unsigned int entry; 1204 unsigned int entry;
1193 int i, csum_insertion = 0; 1205 int i, csum_insertion = 0, is_jumbo = 0;
1194 int nfrags = skb_shinfo(skb)->nr_frags; 1206 int nfrags = skb_shinfo(skb)->nr_frags;
1195 struct dma_desc *desc, *first; 1207 struct dma_desc *desc, *first;
1196 unsigned int nopaged_len = skb_headlen(skb); 1208 unsigned int nopaged_len = skb_headlen(skb);
@@ -1236,15 +1248,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1236#endif 1248#endif
1237 priv->tx_skbuff[entry] = skb; 1249 priv->tx_skbuff[entry] = skb;
1238 1250
1239 if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) { 1251 /* To program the descriptors according to the size of the frame */
1240 entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion); 1252 if (priv->mode == STMMAC_RING_MODE) {
1241 desc = priv->dma_tx + entry; 1253 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1254 priv->plat->enh_desc);
1255 if (unlikely(is_jumbo))
1256 entry = priv->hw->ring->jumbo_frm(priv, skb,
1257 csum_insertion);
1242 } else { 1258 } else {
1259 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1260 priv->plat->enh_desc);
1261 if (unlikely(is_jumbo))
1262 entry = priv->hw->chain->jumbo_frm(priv, skb,
1263 csum_insertion);
1264 }
1265 if (likely(!is_jumbo)) {
1243 desc->des2 = dma_map_single(priv->device, skb->data, 1266 desc->des2 = dma_map_single(priv->device, skb->data,
1244 nopaged_len, DMA_TO_DEVICE); 1267 nopaged_len, DMA_TO_DEVICE);
1245 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1268 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1246 csum_insertion); 1269 csum_insertion, priv->mode);
1247 } 1270 } else
1271 desc = priv->dma_tx + entry;
1248 1272
1249 for (i = 0; i < nfrags; i++) { 1273 for (i = 0; i < nfrags; i++) {
1250 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1274 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1257,7 +1281,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1257 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1281 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1258 DMA_TO_DEVICE); 1282 DMA_TO_DEVICE);
1259 priv->tx_skbuff[entry] = NULL; 1283 priv->tx_skbuff[entry] = NULL;
1260 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); 1284 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1285 priv->mode);
1261 wmb(); 1286 wmb();
1262 priv->hw->desc->set_tx_owner(desc); 1287 priv->hw->desc->set_tx_owner(desc);
1263 wmb(); 1288 wmb();
@@ -1338,7 +1363,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1338 1363
1339 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1364 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1340 1365
1341 if (unlikely(priv->plat->has_gmac)) 1366 if (unlikely((priv->mode == STMMAC_RING_MODE) &&
1367 (priv->plat->has_gmac)))
1342 priv->hw->ring->refill_desc3(bfsize, p + entry); 1368 priv->hw->ring->refill_desc3(bfsize, p + entry);
1343 1369
1344 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1370 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
@@ -1884,12 +1910,20 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1884 1910
1885 priv->hw = mac; 1911 priv->hw = mac;
1886 1912
1887 /* To use the chained or ring mode */
1888 priv->hw->ring = &ring_mode_ops;
1889
1890 /* Get and dump the chip ID */ 1913 /* Get and dump the chip ID */
1891 priv->synopsys_id = stmmac_get_synopsys_id(priv); 1914 priv->synopsys_id = stmmac_get_synopsys_id(priv);
1892 1915
1916 /* To use the chained or ring mode */
1917 if (chain_mode) {
1918 priv->hw->chain = &chain_mode_ops;
1919 pr_info(" Chain mode enabled\n");
1920 priv->mode = STMMAC_CHAIN_MODE;
1921 } else {
1922 priv->hw->ring = &ring_mode_ops;
1923 pr_info(" Ring mode enabled\n");
1924 priv->mode = STMMAC_RING_MODE;
1925 }
1926
1893 /* Get the HW capability (new GMAC newer than 3.50a) */ 1927 /* Get the HW capability (new GMAC newer than 3.50a) */
1894 priv->hw_cap_support = stmmac_get_hw_features(priv); 1928 priv->hw_cap_support = stmmac_get_hw_features(priv);
1895 if (priv->hw_cap_support) { 1929 if (priv->hw_cap_support) {
@@ -2109,8 +2143,9 @@ int stmmac_suspend(struct net_device *ndev)
2109 priv->hw->dma->stop_rx(priv->ioaddr); 2143 priv->hw->dma->stop_rx(priv->ioaddr);
2110 /* Clear the Rx/Tx descriptors */ 2144 /* Clear the Rx/Tx descriptors */
2111 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 2145 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
2112 dis_ic); 2146 dis_ic, priv->mode);
2113 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 2147 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size,
2148 priv->mode);
2114 2149
2115 /* Enable Power down mode by programming the PMT regs */ 2150 /* Enable Power down mode by programming the PMT regs */
2116 if (device_may_wakeup(priv->device)) 2151 if (device_may_wakeup(priv->device))
@@ -2249,6 +2284,9 @@ static int __init stmmac_cmdline_opt(char *str)
2249 } else if (!strncmp(opt, "eee_timer:", 10)) { 2284 } else if (!strncmp(opt, "eee_timer:", 10)) {
2250 if (kstrtoint(opt + 10, 0, &eee_timer)) 2285 if (kstrtoint(opt + 10, 0, &eee_timer))
2251 goto err; 2286 goto err;
2287 } else if (!strncmp(opt, "chain_mode:", 11)) {
2288 if (kstrtoint(opt + 11, 0, &chain_mode))
2289 goto err;
2252 } 2290 }
2253 } 2291 }
2254 return 0; 2292 return 0;