aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2013-07-02 08:12:36 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-02 19:04:53 -0400
commit83d7af64ac9eaf4f4db7228677bc25f23c383790 (patch)
tree611351e9880b35ea5cfa136264cb106b07873bf9
parent06a23fe31ca3992863721f21bdb0307af93da807 (diff)
stmmac: dity-up and rework the driver debug levels
Prior this patch, the internal debugging was based on ifdef and also some printk were useless because many info are exposed via ethtool. This patch remove all the ifdef defines and now we only use netif_msg_XXX levels. Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c124
9 files changed, 117 insertions, 267 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7788fbe44f0a..9911b9323f00 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -38,16 +38,6 @@
38#include "descs.h" 38#include "descs.h"
39#include "mmc.h" 39#include "mmc.h"
40 40
41#undef CHIP_DEBUG_PRINT
42/* Turn-on extra printk debug for MAC core, dma and descriptors */
43/* #define CHIP_DEBUG_PRINT */
44
45#ifdef CHIP_DEBUG_PRINT
46#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
47#else
48#define CHIP_DBG(fmt, args...) do { } while (0)
49#endif
50
51/* Synopsys Core versions */ 41/* Synopsys Core versions */
52#define DWMAC_CORE_3_40 0x34 42#define DWMAC_CORE_3_40 0x34
53#define DWMAC_CORE_3_50 0x35 43#define DWMAC_CORE_3_50 0x35
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 7e05e8d0f1c2..cdd926832e27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -91,8 +91,8 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
91 unsigned int value = 0; 91 unsigned int value = 0;
92 unsigned int perfect_addr_number; 92 unsigned int perfect_addr_number;
93 93
94 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", 94 pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
95 __func__, netdev_mc_count(dev), netdev_uc_count(dev)); 95 netdev_mc_count(dev), netdev_uc_count(dev));
96 96
97 if (dev->flags & IFF_PROMISC) 97 if (dev->flags & IFF_PROMISC)
98 value = GMAC_FRAME_FILTER_PR; 98 value = GMAC_FRAME_FILTER_PR;
@@ -152,7 +152,7 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
152#endif 152#endif
153 writel(value, ioaddr + GMAC_FRAME_FILTER); 153 writel(value, ioaddr + GMAC_FRAME_FILTER);
154 154
155 CHIP_DBG(KERN_INFO "\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", 155 pr_debug("\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
156 readl(ioaddr + GMAC_FRAME_FILTER), 156 readl(ioaddr + GMAC_FRAME_FILTER),
157 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); 157 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
158} 158}
@@ -162,18 +162,18 @@ static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
162{ 162{
163 unsigned int flow = 0; 163 unsigned int flow = 0;
164 164
165 CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n"); 165 pr_debug("GMAC Flow-Control:\n");
166 if (fc & FLOW_RX) { 166 if (fc & FLOW_RX) {
167 CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); 167 pr_debug("\tReceive Flow-Control ON\n");
168 flow |= GMAC_FLOW_CTRL_RFE; 168 flow |= GMAC_FLOW_CTRL_RFE;
169 } 169 }
170 if (fc & FLOW_TX) { 170 if (fc & FLOW_TX) {
171 CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); 171 pr_debug("\tTransmit Flow-Control ON\n");
172 flow |= GMAC_FLOW_CTRL_TFE; 172 flow |= GMAC_FLOW_CTRL_TFE;
173 } 173 }
174 174
175 if (duplex) { 175 if (duplex) {
176 CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time); 176 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
177 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); 177 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
178 } 178 }
179 179
@@ -185,11 +185,11 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
185 unsigned int pmt = 0; 185 unsigned int pmt = 0;
186 186
187 if (mode & WAKE_MAGIC) { 187 if (mode & WAKE_MAGIC) {
188 CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); 188 pr_debug("GMAC: WOL Magic frame\n");
189 pmt |= power_down | magic_pkt_en; 189 pmt |= power_down | magic_pkt_en;
190 } 190 }
191 if (mode & WAKE_UCAST) { 191 if (mode & WAKE_UCAST) {
192 CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); 192 pr_debug("GMAC: WOL on global unicast\n");
193 pmt |= global_unicast; 193 pmt |= global_unicast;
194 } 194 }
195 195
@@ -203,23 +203,13 @@ static int dwmac1000_irq_status(void __iomem *ioaddr,
203 int ret = 0; 203 int ret = 0;
204 204
205 /* Not used events (e.g. MMC interrupts) are not handled. */ 205 /* Not used events (e.g. MMC interrupts) are not handled. */
206 if ((intr_status & mmc_tx_irq)) { 206 if ((intr_status & mmc_tx_irq))
207 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
208 readl(ioaddr + GMAC_MMC_TX_INTR));
209 x->mmc_tx_irq_n++; 207 x->mmc_tx_irq_n++;
210 } 208 if (unlikely(intr_status & mmc_rx_irq))
211 if (unlikely(intr_status & mmc_rx_irq)) {
212 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
213 readl(ioaddr + GMAC_MMC_RX_INTR));
214 x->mmc_rx_irq_n++; 209 x->mmc_rx_irq_n++;
215 } 210 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
216 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
217 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
218 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
219 x->mmc_rx_csum_offload_irq_n++; 211 x->mmc_rx_csum_offload_irq_n++;
220 }
221 if (unlikely(intr_status & pmt_irq)) { 212 if (unlikely(intr_status & pmt_irq)) {
222 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
223 /* clear the PMT bits 5 and 6 by reading the PMT status reg */ 213 /* clear the PMT bits 5 and 6 by reading the PMT status reg */
224 readl(ioaddr + GMAC_PMT); 214 readl(ioaddr + GMAC_PMT);
225 x->irq_receive_pmt_irq_n++; 215 x->irq_receive_pmt_irq_n++;
@@ -229,32 +219,22 @@ static int dwmac1000_irq_status(void __iomem *ioaddr,
229 /* Clean LPI interrupt by reading the Reg 12 */ 219 /* Clean LPI interrupt by reading the Reg 12 */
230 ret = readl(ioaddr + LPI_CTRL_STATUS); 220 ret = readl(ioaddr + LPI_CTRL_STATUS);
231 221
232 if (ret & LPI_CTRL_STATUS_TLPIEN) { 222 if (ret & LPI_CTRL_STATUS_TLPIEN)
233 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
234 x->irq_tx_path_in_lpi_mode_n++; 223 x->irq_tx_path_in_lpi_mode_n++;
235 } 224 if (ret & LPI_CTRL_STATUS_TLPIEX)
236 if (ret & LPI_CTRL_STATUS_TLPIEX) {
237 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
238 x->irq_tx_path_exit_lpi_mode_n++; 225 x->irq_tx_path_exit_lpi_mode_n++;
239 } 226 if (ret & LPI_CTRL_STATUS_RLPIEN)
240 if (ret & LPI_CTRL_STATUS_RLPIEN) {
241 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
242 x->irq_rx_path_in_lpi_mode_n++; 227 x->irq_rx_path_in_lpi_mode_n++;
243 } 228 if (ret & LPI_CTRL_STATUS_RLPIEX)
244 if (ret & LPI_CTRL_STATUS_RLPIEX) {
245 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
246 x->irq_rx_path_exit_lpi_mode_n++; 229 x->irq_rx_path_exit_lpi_mode_n++;
247 }
248 } 230 }
249 231
250 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { 232 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
251 CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n");
252 readl(ioaddr + GMAC_AN_STATUS); 233 readl(ioaddr + GMAC_AN_STATUS);
253 x->irq_pcs_ane_n++; 234 x->irq_pcs_ane_n++;
254 } 235 }
255 if (intr_status & rgmii_irq) { 236 if (intr_status & rgmii_irq) {
256 u32 status = readl(ioaddr + GMAC_S_R_GMII); 237 u32 status = readl(ioaddr + GMAC_S_R_GMII);
257 CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n");
258 x->irq_rgmii_n++; 238 x->irq_rgmii_n++;
259 239
260 /* Save and dump the link status. */ 240 /* Save and dump the link status. */
@@ -271,11 +251,12 @@ static int dwmac1000_irq_status(void __iomem *ioaddr,
271 x->pcs_speed = SPEED_10; 251 x->pcs_speed = SPEED_10;
272 252
273 x->pcs_link = 1; 253 x->pcs_link = 1;
274 pr_debug("Link is Up - %d/%s\n", (int)x->pcs_speed, 254 pr_debug("%s: Link is Up - %d/%s\n", __func__,
255 (int)x->pcs_speed,
275 x->pcs_duplex ? "Full" : "Half"); 256 x->pcs_duplex ? "Full" : "Half");
276 } else { 257 } else {
277 x->pcs_link = 0; 258 x->pcs_link = 0;
278 pr_debug("Link is Down\n"); 259 pr_debug("%s: Link is Down\n", __func__);
279 } 260 }
280 } 261 }
281 262
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 2c431b616058..0c2058a69fd2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -116,7 +116,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
116 u32 csr6 = readl(ioaddr + DMA_CONTROL); 116 u32 csr6 = readl(ioaddr + DMA_CONTROL);
117 117
118 if (txmode == SF_DMA_MODE) { 118 if (txmode == SF_DMA_MODE) {
119 CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n"); 119 pr_debug("GMAC: enable TX store and forward mode\n");
120 /* Transmit COE type 2 cannot be done in cut-through mode. */ 120 /* Transmit COE type 2 cannot be done in cut-through mode. */
121 csr6 |= DMA_CONTROL_TSF; 121 csr6 |= DMA_CONTROL_TSF;
122 /* Operating on second frame increase the performance 122 /* Operating on second frame increase the performance
@@ -124,8 +124,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
124 */ 124 */
125 csr6 |= DMA_CONTROL_OSF; 125 csr6 |= DMA_CONTROL_OSF;
126 } else { 126 } else {
127 CHIP_DBG(KERN_DEBUG "GMAC: disabling TX SF (threshold %d)\n", 127 pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
128 txmode);
129 csr6 &= ~DMA_CONTROL_TSF; 128 csr6 &= ~DMA_CONTROL_TSF;
130 csr6 &= DMA_CONTROL_TC_TX_MASK; 129 csr6 &= DMA_CONTROL_TC_TX_MASK;
131 /* Set the transmit threshold */ 130 /* Set the transmit threshold */
@@ -142,11 +141,10 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
142 } 141 }
143 142
144 if (rxmode == SF_DMA_MODE) { 143 if (rxmode == SF_DMA_MODE) {
145 CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n"); 144 pr_debug("GMAC: enable RX store and forward mode\n");
146 csr6 |= DMA_CONTROL_RSF; 145 csr6 |= DMA_CONTROL_RSF;
147 } else { 146 } else {
148 CHIP_DBG(KERN_DEBUG "GMAC: disable RX SF mode (threshold %d)\n", 147 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
149 rxmode);
150 csr6 &= ~DMA_CONTROL_RSF; 148 csr6 &= ~DMA_CONTROL_RSF;
151 csr6 &= DMA_CONTROL_TC_RX_MASK; 149 csr6 &= DMA_CONTROL_TC_RX_MASK;
152 if (rxmode <= 32) 150 if (rxmode <= 32)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 007bb2be3f10..5857d677dac1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -135,10 +135,6 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
135 } 135 }
136 136
137 writel(value, ioaddr + MAC_CONTROL); 137 writel(value, ioaddr + MAC_CONTROL);
138
139 CHIP_DBG(KERN_INFO "%s: Filter: 0x%08x Hash: HI 0x%08x, LO 0x%08x\n",
140 __func__, readl(ioaddr + MAC_CONTROL),
141 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
142} 138}
143 139
144static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, 140static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 67551c154138..7d1dce9e7ffc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -90,14 +90,14 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
90{ 90{
91 int i; 91 int i;
92 92
93 CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); 93 pr_debug("DWMAC 100 DMA CSR\n");
94 for (i = 0; i < 9; i++) 94 for (i = 0; i < 9; i++)
95 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, 95 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
96 (DMA_BUS_MODE + i * 4), 96 (DMA_BUS_MODE + i * 4),
97 readl(ioaddr + DMA_BUS_MODE + i * 4)); 97 readl(ioaddr + DMA_BUS_MODE + i * 4));
98 CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", 98
99 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); 99 pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n",
100 CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", 100 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR),
101 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); 101 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
102} 102}
103 103
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 491d7e930603..484e3cf9c414 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -24,13 +24,6 @@
24#include "common.h" 24#include "common.h"
25#include "dwmac_dma.h" 25#include "dwmac_dma.h"
26 26
27#undef DWMAC_DMA_DEBUG
28#ifdef DWMAC_DMA_DEBUG
29#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
30#else
31#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
32#endif
33
34#define GMAC_HI_REG_AE 0x80000000 27#define GMAC_HI_REG_AE 0x80000000
35 28
36/* CSR1 enables the transmit DMA to check for new descriptor */ 29/* CSR1 enables the transmit DMA to check for new descriptor */
@@ -85,24 +78,24 @@ static void show_tx_process_state(unsigned int status)
85 78
86 switch (state) { 79 switch (state) {
87 case 0: 80 case 0:
88 pr_info("- TX (Stopped): Reset or Stop command\n"); 81 pr_debug("- TX (Stopped): Reset or Stop command\n");
89 break; 82 break;
90 case 1: 83 case 1:
91 pr_info("- TX (Running):Fetching the Tx desc\n"); 84 pr_debug("- TX (Running):Fetching the Tx desc\n");
92 break; 85 break;
93 case 2: 86 case 2:
94 pr_info("- TX (Running): Waiting for end of tx\n"); 87 pr_debug("- TX (Running): Waiting for end of tx\n");
95 break; 88 break;
96 case 3: 89 case 3:
97 pr_info("- TX (Running): Reading the data " 90 pr_debug("- TX (Running): Reading the data "
98 "and queuing the data into the Tx buf\n"); 91 "and queuing the data into the Tx buf\n");
99 break; 92 break;
100 case 6: 93 case 6:
101 pr_info("- TX (Suspended): Tx Buff Underflow " 94 pr_debug("- TX (Suspended): Tx Buff Underflow "
102 "or an unavailable Transmit descriptor\n"); 95 "or an unavailable Transmit descriptor\n");
103 break; 96 break;
104 case 7: 97 case 7:
105 pr_info("- TX (Running): Closing Tx descriptor\n"); 98 pr_debug("- TX (Running): Closing Tx descriptor\n");
106 break; 99 break;
107 default: 100 default:
108 break; 101 break;
@@ -116,29 +109,29 @@ static void show_rx_process_state(unsigned int status)
116 109
117 switch (state) { 110 switch (state) {
118 case 0: 111 case 0:
119 pr_info("- RX (Stopped): Reset or Stop command\n"); 112 pr_debug("- RX (Stopped): Reset or Stop command\n");
120 break; 113 break;
121 case 1: 114 case 1:
122 pr_info("- RX (Running): Fetching the Rx desc\n"); 115 pr_debug("- RX (Running): Fetching the Rx desc\n");
123 break; 116 break;
124 case 2: 117 case 2:
125 pr_info("- RX (Running):Checking for end of pkt\n"); 118 pr_debug("- RX (Running):Checking for end of pkt\n");
126 break; 119 break;
127 case 3: 120 case 3:
128 pr_info("- RX (Running): Waiting for Rx pkt\n"); 121 pr_debug("- RX (Running): Waiting for Rx pkt\n");
129 break; 122 break;
130 case 4: 123 case 4:
131 pr_info("- RX (Suspended): Unavailable Rx buf\n"); 124 pr_debug("- RX (Suspended): Unavailable Rx buf\n");
132 break; 125 break;
133 case 5: 126 case 5:
134 pr_info("- RX (Running): Closing Rx descriptor\n"); 127 pr_debug("- RX (Running): Closing Rx descriptor\n");
135 break; 128 break;
136 case 6: 129 case 6:
137 pr_info("- RX(Running): Flushing the current frame" 130 pr_debug("- RX(Running): Flushing the current frame"
138 " from the Rx buf\n"); 131 " from the Rx buf\n");
139 break; 132 break;
140 case 7: 133 case 7:
141 pr_info("- RX (Running): Queuing the Rx frame" 134 pr_debug("- RX (Running): Queuing the Rx frame"
142 " from the Rx buf into memory\n"); 135 " from the Rx buf into memory\n");
143 break; 136 break;
144 default: 137 default:
@@ -154,51 +147,37 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
154 /* read the status register (CSR5) */ 147 /* read the status register (CSR5) */
155 u32 intr_status = readl(ioaddr + DMA_STATUS); 148 u32 intr_status = readl(ioaddr + DMA_STATUS);
156 149
157 DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
158#ifdef DWMAC_DMA_DEBUG 150#ifdef DWMAC_DMA_DEBUG
159 /* It displays the DMA process states (CSR5 register) */ 151 /* Enable it to monitor DMA rx/tx status in case of critical problems */
152 pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status);
160 show_tx_process_state(intr_status); 153 show_tx_process_state(intr_status);
161 show_rx_process_state(intr_status); 154 show_rx_process_state(intr_status);
162#endif 155#endif
163 /* ABNORMAL interrupts */ 156 /* ABNORMAL interrupts */
164 if (unlikely(intr_status & DMA_STATUS_AIS)) { 157 if (unlikely(intr_status & DMA_STATUS_AIS)) {
165 DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
166 if (unlikely(intr_status & DMA_STATUS_UNF)) { 158 if (unlikely(intr_status & DMA_STATUS_UNF)) {
167 DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
168 ret = tx_hard_error_bump_tc; 159 ret = tx_hard_error_bump_tc;
169 x->tx_undeflow_irq++; 160 x->tx_undeflow_irq++;
170 } 161 }
171 if (unlikely(intr_status & DMA_STATUS_TJT)) { 162 if (unlikely(intr_status & DMA_STATUS_TJT))
172 DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
173 x->tx_jabber_irq++; 163 x->tx_jabber_irq++;
174 } 164
175 if (unlikely(intr_status & DMA_STATUS_OVF)) { 165 if (unlikely(intr_status & DMA_STATUS_OVF))
176 DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
177 x->rx_overflow_irq++; 166 x->rx_overflow_irq++;
178 } 167
179 if (unlikely(intr_status & DMA_STATUS_RU)) { 168 if (unlikely(intr_status & DMA_STATUS_RU))
180 DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
181 x->rx_buf_unav_irq++; 169 x->rx_buf_unav_irq++;
182 } 170 if (unlikely(intr_status & DMA_STATUS_RPS))
183 if (unlikely(intr_status & DMA_STATUS_RPS)) {
184 DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
185 x->rx_process_stopped_irq++; 171 x->rx_process_stopped_irq++;
186 } 172 if (unlikely(intr_status & DMA_STATUS_RWT))
187 if (unlikely(intr_status & DMA_STATUS_RWT)) {
188 DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
189 x->rx_watchdog_irq++; 173 x->rx_watchdog_irq++;
190 } 174 if (unlikely(intr_status & DMA_STATUS_ETI))
191 if (unlikely(intr_status & DMA_STATUS_ETI)) {
192 DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
193 x->tx_early_irq++; 175 x->tx_early_irq++;
194 }
195 if (unlikely(intr_status & DMA_STATUS_TPS)) { 176 if (unlikely(intr_status & DMA_STATUS_TPS)) {
196 DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
197 x->tx_process_stopped_irq++; 177 x->tx_process_stopped_irq++;
198 ret = tx_hard_error; 178 ret = tx_hard_error;
199 } 179 }
200 if (unlikely(intr_status & DMA_STATUS_FBI)) { 180 if (unlikely(intr_status & DMA_STATUS_FBI)) {
201 DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
202 x->fatal_bus_error_irq++; 181 x->fatal_bus_error_irq++;
203 ret = tx_hard_error; 182 ret = tx_hard_error;
204 } 183 }
@@ -224,12 +203,11 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
224 /* Optional hardware blocks, interrupts should be disabled */ 203 /* Optional hardware blocks, interrupts should be disabled */
225 if (unlikely(intr_status & 204 if (unlikely(intr_status &
226 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) 205 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
227 pr_info("%s: unexpected status %08x\n", __func__, intr_status); 206 pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
228 207
229 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ 208 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
230 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); 209 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
231 210
232 DWMAC_LIB_DBG(KERN_INFO "\n\n");
233 return ret; 211 return ret;
234} 212}
235 213
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 0fbc8fafa706..7e6628a91514 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -33,54 +33,40 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
33 struct net_device_stats *stats = (struct net_device_stats *)data; 33 struct net_device_stats *stats = (struct net_device_stats *)data;
34 34
35 if (unlikely(p->des01.etx.error_summary)) { 35 if (unlikely(p->des01.etx.error_summary)) {
36 CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); 36 if (unlikely(p->des01.etx.jabber_timeout))
37 if (unlikely(p->des01.etx.jabber_timeout)) {
38 CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
39 x->tx_jabber++; 37 x->tx_jabber++;
40 }
41 38
42 if (unlikely(p->des01.etx.frame_flushed)) { 39 if (unlikely(p->des01.etx.frame_flushed)) {
43 CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
44 x->tx_frame_flushed++; 40 x->tx_frame_flushed++;
45 dwmac_dma_flush_tx_fifo(ioaddr); 41 dwmac_dma_flush_tx_fifo(ioaddr);
46 } 42 }
47 43
48 if (unlikely(p->des01.etx.loss_carrier)) { 44 if (unlikely(p->des01.etx.loss_carrier)) {
49 CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
50 x->tx_losscarrier++; 45 x->tx_losscarrier++;
51 stats->tx_carrier_errors++; 46 stats->tx_carrier_errors++;
52 } 47 }
53 if (unlikely(p->des01.etx.no_carrier)) { 48 if (unlikely(p->des01.etx.no_carrier)) {
54 CHIP_DBG(KERN_ERR "\tno_carrier error\n");
55 x->tx_carrier++; 49 x->tx_carrier++;
56 stats->tx_carrier_errors++; 50 stats->tx_carrier_errors++;
57 } 51 }
58 if (unlikely(p->des01.etx.late_collision)) { 52 if (unlikely(p->des01.etx.late_collision))
59 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
60 stats->collisions += p->des01.etx.collision_count; 53 stats->collisions += p->des01.etx.collision_count;
61 } 54
62 if (unlikely(p->des01.etx.excessive_collisions)) { 55 if (unlikely(p->des01.etx.excessive_collisions))
63 CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
64 stats->collisions += p->des01.etx.collision_count; 56 stats->collisions += p->des01.etx.collision_count;
65 } 57
66 if (unlikely(p->des01.etx.excessive_deferral)) { 58 if (unlikely(p->des01.etx.excessive_deferral))
67 CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
68 x->tx_deferred++; 59 x->tx_deferred++;
69 }
70 60
71 if (unlikely(p->des01.etx.underflow_error)) { 61 if (unlikely(p->des01.etx.underflow_error)) {
72 CHIP_DBG(KERN_ERR "\tunderflow error\n");
73 dwmac_dma_flush_tx_fifo(ioaddr); 62 dwmac_dma_flush_tx_fifo(ioaddr);
74 x->tx_underflow++; 63 x->tx_underflow++;
75 } 64 }
76 65
77 if (unlikely(p->des01.etx.ip_header_error)) { 66 if (unlikely(p->des01.etx.ip_header_error))
78 CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
79 x->tx_ip_header_error++; 67 x->tx_ip_header_error++;
80 }
81 68
82 if (unlikely(p->des01.etx.payload_error)) { 69 if (unlikely(p->des01.etx.payload_error)) {
83 CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
84 x->tx_payload_error++; 70 x->tx_payload_error++;
85 dwmac_dma_flush_tx_fifo(ioaddr); 71 dwmac_dma_flush_tx_fifo(ioaddr);
86 } 72 }
@@ -88,15 +74,12 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
88 ret = -1; 74 ret = -1;
89 } 75 }
90 76
91 if (unlikely(p->des01.etx.deferred)) { 77 if (unlikely(p->des01.etx.deferred))
92 CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
93 x->tx_deferred++; 78 x->tx_deferred++;
94 } 79
95#ifdef STMMAC_VLAN_TAG_USED 80#ifdef STMMAC_VLAN_TAG_USED
96 if (p->des01.etx.vlan_frame) { 81 if (p->des01.etx.vlan_frame)
97 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
98 x->tx_vlan++; 82 x->tx_vlan++;
99 }
100#endif 83#endif
101 84
102 return ret; 85 return ret;
@@ -123,30 +106,20 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
123 * 0 1 1 | COE bypassed.. no IPv4/6 frame 106 * 0 1 1 | COE bypassed.. no IPv4/6 frame
124 * 0 1 0 | Reserved. 107 * 0 1 0 | Reserved.
125 */ 108 */
126 if (status == 0x0) { 109 if (status == 0x0)
127 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
128 ret = llc_snap; 110 ret = llc_snap;
129 } else if (status == 0x4) { 111 else if (status == 0x4)
130 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
131 ret = good_frame; 112 ret = good_frame;
132 } else if (status == 0x5) { 113 else if (status == 0x5)
133 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
134 ret = csum_none; 114 ret = csum_none;
135 } else if (status == 0x6) { 115 else if (status == 0x6)
136 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
137 ret = csum_none; 116 ret = csum_none;
138 } else if (status == 0x7) { 117 else if (status == 0x7)
139 CHIP_DBG(KERN_ERR
140 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
141 ret = csum_none; 118 ret = csum_none;
142 } else if (status == 0x1) { 119 else if (status == 0x1)
143 CHIP_DBG(KERN_ERR
144 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
145 ret = discard_frame; 120 ret = discard_frame;
146 } else if (status == 0x3) { 121 else if (status == 0x3)
147 CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
148 ret = discard_frame; 122 ret = discard_frame;
149 }
150 return ret; 123 return ret;
151} 124}
152 125
@@ -208,36 +181,26 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
208 struct net_device_stats *stats = (struct net_device_stats *)data; 181 struct net_device_stats *stats = (struct net_device_stats *)data;
209 182
210 if (unlikely(p->des01.erx.error_summary)) { 183 if (unlikely(p->des01.erx.error_summary)) {
211 CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
212 p->des01.erx);
213 if (unlikely(p->des01.erx.descriptor_error)) { 184 if (unlikely(p->des01.erx.descriptor_error)) {
214 CHIP_DBG(KERN_ERR "\tdescriptor error\n");
215 x->rx_desc++; 185 x->rx_desc++;
216 stats->rx_length_errors++; 186 stats->rx_length_errors++;
217 } 187 }
218 if (unlikely(p->des01.erx.overflow_error)) { 188 if (unlikely(p->des01.erx.overflow_error))
219 CHIP_DBG(KERN_ERR "\toverflow error\n");
220 x->rx_gmac_overflow++; 189 x->rx_gmac_overflow++;
221 }
222 190
223 if (unlikely(p->des01.erx.ipc_csum_error)) 191 if (unlikely(p->des01.erx.ipc_csum_error))
224 CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); 192 pr_err("\tIPC Csum Error/Giant frame\n");
225 193
226 if (unlikely(p->des01.erx.late_collision)) { 194 if (unlikely(p->des01.erx.late_collision)) {
227 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
228 stats->collisions++;
229 stats->collisions++; 195 stats->collisions++;
230 } 196 }
231 if (unlikely(p->des01.erx.receive_watchdog)) { 197 if (unlikely(p->des01.erx.receive_watchdog))
232 CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
233 x->rx_watchdog++; 198 x->rx_watchdog++;
234 } 199
235 if (unlikely(p->des01.erx.error_gmii)) { 200 if (unlikely(p->des01.erx.error_gmii))
236 CHIP_DBG(KERN_ERR "\tReceive Error\n");
237 x->rx_mii++; 201 x->rx_mii++;
238 } 202
239 if (unlikely(p->des01.erx.crc_error)) { 203 if (unlikely(p->des01.erx.crc_error)) {
240 CHIP_DBG(KERN_ERR "\tCRC error\n");
241 x->rx_crc++; 204 x->rx_crc++;
242 stats->rx_crc_errors++; 205 stats->rx_crc_errors++;
243 } 206 }
@@ -251,30 +214,24 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
251 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 214 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
252 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); 215 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
253 216
254 if (unlikely(p->des01.erx.dribbling)) { 217 if (unlikely(p->des01.erx.dribbling))
255 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
256 x->dribbling_bit++; 218 x->dribbling_bit++;
257 } 219
258 if (unlikely(p->des01.erx.sa_filter_fail)) { 220 if (unlikely(p->des01.erx.sa_filter_fail)) {
259 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
260 x->sa_rx_filter_fail++; 221 x->sa_rx_filter_fail++;
261 ret = discard_frame; 222 ret = discard_frame;
262 } 223 }
263 if (unlikely(p->des01.erx.da_filter_fail)) { 224 if (unlikely(p->des01.erx.da_filter_fail)) {
264 CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
265 x->da_rx_filter_fail++; 225 x->da_rx_filter_fail++;
266 ret = discard_frame; 226 ret = discard_frame;
267 } 227 }
268 if (unlikely(p->des01.erx.length_error)) { 228 if (unlikely(p->des01.erx.length_error)) {
269 CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
270 x->rx_length++; 229 x->rx_length++;
271 ret = discard_frame; 230 ret = discard_frame;
272 } 231 }
273#ifdef STMMAC_VLAN_TAG_USED 232#ifdef STMMAC_VLAN_TAG_USED
274 if (p->des01.erx.vlan_tag) { 233 if (p->des01.erx.vlan_tag)
275 CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
276 x->rx_vlan++; 234 x->rx_vlan++;
277 }
278#endif 235#endif
279 236
280 return ret; 237 return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 11775b99afc5..35ad4f427ae2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -52,10 +52,8 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
52 ret = -1; 52 ret = -1;
53 } 53 }
54 54
55 if (p->des01.etx.vlan_frame) { 55 if (p->des01.etx.vlan_frame)
56 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
57 x->tx_vlan++; 56 x->tx_vlan++;
58 }
59 57
60 if (unlikely(p->des01.tx.deferred)) 58 if (unlikely(p->des01.tx.deferred))
61 x->tx_deferred++; 59 x->tx_deferred++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 520693385d8d..62e31054bd24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -51,32 +51,6 @@
51#include "stmmac_ptp.h" 51#include "stmmac_ptp.h"
52#include "stmmac.h" 52#include "stmmac.h"
53 53
54#undef STMMAC_DEBUG
55/*#define STMMAC_DEBUG*/
56#ifdef STMMAC_DEBUG
57#define DBG(nlevel, klevel, fmt, args...) \
58 ((void)(netif_msg_##nlevel(priv) && \
59 printk(KERN_##klevel fmt, ## args)))
60#else
61#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
62#endif
63
64#undef STMMAC_RX_DEBUG
65/*#define STMMAC_RX_DEBUG*/
66#ifdef STMMAC_RX_DEBUG
67#define RX_DBG(fmt, args...) printk(fmt, ## args)
68#else
69#define RX_DBG(fmt, args...) do { } while (0)
70#endif
71
72#undef STMMAC_XMIT_DEBUG
73/*#define STMMAC_XMIT_DEBUG*/
74#ifdef STMMAC_XMIT_DEBUG
75#define TX_DBG(fmt, args...) printk(fmt, ## args)
76#else
77#define TX_DBG(fmt, args...) do { } while (0)
78#endif
79
80#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 54#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
81#define JUMBO_LEN 9000 55#define JUMBO_LEN 9000
82 56
@@ -214,19 +188,17 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
214 } 188 }
215} 189}
216 190
217#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
218static void print_pkt(unsigned char *buf, int len) 191static void print_pkt(unsigned char *buf, int len)
219{ 192{
220 int j; 193 int j;
221 pr_info("len = %d byte, buf addr: 0x%p", len, buf); 194 pr_debug("len = %d byte, buf addr: 0x%p", len, buf);
222 for (j = 0; j < len; j++) { 195 for (j = 0; j < len; j++) {
223 if ((j % 16) == 0) 196 if ((j % 16) == 0)
224 pr_info("\n %03x:", j); 197 pr_debug("\n %03x:", j);
225 pr_info(" %02x", buf[j]); 198 pr_debug(" %02x", buf[j]);
226 } 199 }
227 pr_info("\n"); 200 pr_debug("\n");
228} 201}
229#endif
230 202
231/* minimum number of free TX descriptors required to wake up TX process */ 203/* minimum number of free TX descriptors required to wake up TX process */
232#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) 204#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
@@ -698,9 +670,6 @@ static void stmmac_adjust_link(struct net_device *dev)
698 if (phydev == NULL) 670 if (phydev == NULL)
699 return; 671 return;
700 672
701 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
702 phydev->addr, phydev->link);
703
704 spin_lock_irqsave(&priv->lock, flags); 673 spin_lock_irqsave(&priv->lock, flags);
705 674
706 if (phydev->link) { 675 if (phydev->link) {
@@ -772,8 +741,6 @@ static void stmmac_adjust_link(struct net_device *dev)
772 stmmac_eee_adjust(priv); 741 stmmac_eee_adjust(priv);
773 742
774 spin_unlock_irqrestore(&priv->lock, flags); 743 spin_unlock_irqrestore(&priv->lock, flags);
775
776 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
777} 744}
778 745
779/** 746/**
@@ -1014,8 +981,9 @@ static void init_dma_desc_rings(struct net_device *dev)
1014 if (bfsize < BUF_SIZE_16KiB) 981 if (bfsize < BUF_SIZE_16KiB)
1015 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 982 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1016 983
1017 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 984 if (netif_msg_probe(priv))
1018 txsize, rxsize, bfsize); 985 pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
986 txsize, rxsize, bfsize);
1019 987
1020 if (priv->extend_desc) { 988 if (priv->extend_desc) {
1021 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * 989 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
@@ -1051,12 +1019,13 @@ static void init_dma_desc_rings(struct net_device *dev)
1051 GFP_KERNEL); 1019 GFP_KERNEL);
1052 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1020 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1053 GFP_KERNEL); 1021 GFP_KERNEL);
1054 if (netif_msg_drv(priv)) 1022 if (netif_msg_probe(priv)) {
1055 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1023 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1056 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); 1024 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1057 1025
1058 /* RX INITIALIZATION */ 1026 /* RX INITIALIZATION */
1059 DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n"); 1027 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1028 }
1060 for (i = 0; i < rxsize; i++) { 1029 for (i = 0; i < rxsize; i++) {
1061 struct dma_desc *p; 1030 struct dma_desc *p;
1062 if (priv->extend_desc) 1031 if (priv->extend_desc)
@@ -1067,8 +1036,10 @@ static void init_dma_desc_rings(struct net_device *dev)
1067 if (stmmac_init_rx_buffers(priv, p, i)) 1036 if (stmmac_init_rx_buffers(priv, p, i))
1068 break; 1037 break;
1069 1038
1070 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1039 if (netif_msg_probe(priv))
1071 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 1040 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1041 priv->rx_skbuff[i]->data,
1042 (unsigned int)priv->rx_skbuff_dma[i]);
1072 } 1043 }
1073 priv->cur_rx = 0; 1044 priv->cur_rx = 0;
1074 priv->dirty_rx = (unsigned int)(i - rxsize); 1045 priv->dirty_rx = (unsigned int)(i - rxsize);
@@ -1243,8 +1214,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1243 1214
1244 stmmac_get_tx_hwtstamp(priv, entry, skb); 1215 stmmac_get_tx_hwtstamp(priv, entry, skb);
1245 } 1216 }
1246 TX_DBG("%s: curr %d, dirty %d\n", __func__, 1217 if (netif_msg_tx_done(priv))
1247 priv->cur_tx, priv->dirty_tx); 1218 pr_debug("%s: curr %d, dirty %d\n", __func__,
1219 priv->cur_tx, priv->dirty_tx);
1248 1220
1249 if (likely(priv->tx_skbuff_dma[entry])) { 1221 if (likely(priv->tx_skbuff_dma[entry])) {
1250 dma_unmap_single(priv->device, 1222 dma_unmap_single(priv->device,
@@ -1269,7 +1241,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1269 netif_tx_lock(priv->dev); 1241 netif_tx_lock(priv->dev);
1270 if (netif_queue_stopped(priv->dev) && 1242 if (netif_queue_stopped(priv->dev) &&
1271 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { 1243 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1272 TX_DBG("%s: restart transmit\n", __func__); 1244 if (netif_msg_tx_done(priv))
1245 pr_debug("%s: restart transmit\n", __func__);
1273 netif_wake_queue(priv->dev); 1246 netif_wake_queue(priv->dev);
1274 } 1247 }
1275 netif_tx_unlock(priv->dev); 1248 netif_tx_unlock(priv->dev);
@@ -1658,7 +1631,7 @@ static int stmmac_open(struct net_device *dev)
1658 pr_warn("%s: failed debugFS registration\n", __func__); 1631 pr_warn("%s: failed debugFS registration\n", __func__);
1659#endif 1632#endif
1660 /* Start the ball rolling... */ 1633 /* Start the ball rolling... */
1661 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 1634 pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1662 priv->hw->dma->start_tx(priv->ioaddr); 1635 priv->hw->dma->start_tx(priv->ioaddr);
1663 priv->hw->dma->start_rx(priv->ioaddr); 1636 priv->hw->dma->start_rx(priv->ioaddr);
1664 1637
@@ -1800,16 +1773,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1800 1773
1801 entry = priv->cur_tx % txsize; 1774 entry = priv->cur_tx % txsize;
1802 1775
1803#ifdef STMMAC_XMIT_DEBUG
1804 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1805 pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n"
1806 "\tn_frags: %d - ip_summed: %d - %s gso\n"
1807 "\ttx_count_frames %d\n", __func__, entry,
1808 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1809 !skb_is_gso(skb) ? "isn't" : "is",
1810 priv->tx_count_frames);
1811#endif
1812
1813 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1776 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1814 1777
1815 if (priv->extend_desc) 1778 if (priv->extend_desc)
@@ -1819,12 +1782,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1819 1782
1820 first = desc; 1783 first = desc;
1821 1784
1822#ifdef STMMAC_XMIT_DEBUG
1823 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1824 pr_debug("\tskb len: %d, nopaged_len: %d,\n"
1825 "\t\tn_frags: %d, ip_summed: %d\n",
1826 skb->len, nopaged_len, nfrags, skb->ip_summed);
1827#endif
1828 priv->tx_skbuff[entry] = skb; 1785 priv->tx_skbuff[entry] = skb;
1829 1786
1830 /* To program the descriptors according to the size of the frame */ 1787 /* To program the descriptors according to the size of the frame */
@@ -1860,7 +1817,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1860 else 1817 else
1861 desc = priv->dma_tx + entry; 1818 desc = priv->dma_tx + entry;
1862 1819
1863 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1864 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1820 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1865 DMA_TO_DEVICE); 1821 DMA_TO_DEVICE);
1866 priv->tx_skbuff_dma[entry] = desc->des2; 1822 priv->tx_skbuff_dma[entry] = desc->des2;
@@ -1884,8 +1840,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1884 if (priv->tx_coal_frames > priv->tx_count_frames) { 1840 if (priv->tx_coal_frames > priv->tx_count_frames) {
1885 priv->hw->desc->clear_tx_ic(desc); 1841 priv->hw->desc->clear_tx_ic(desc);
1886 priv->xstats.tx_reset_ic_bit++; 1842 priv->xstats.tx_reset_ic_bit++;
1887 TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
1888 priv->tx_count_frames);
1889 mod_timer(&priv->txtimer, 1843 mod_timer(&priv->txtimer,
1890 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 1844 STMMAC_COAL_TIMER(priv->tx_coal_timer));
1891 } else 1845 } else
@@ -1897,22 +1851,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1897 1851
1898 priv->cur_tx++; 1852 priv->cur_tx++;
1899 1853
1900#ifdef STMMAC_XMIT_DEBUG
1901 if (netif_msg_pktdata(priv)) { 1854 if (netif_msg_pktdata(priv)) {
1902 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", 1855 pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1903 __func__, (priv->cur_tx % txsize), 1856 __func__, (priv->cur_tx % txsize),
1904 (priv->dirty_tx % txsize), entry, first, nfrags); 1857 (priv->dirty_tx % txsize), entry, first, nfrags);
1858
1905 if (priv->extend_desc) 1859 if (priv->extend_desc)
1906 stmmac_display_ring((void *)priv->dma_etx, txsize, 1); 1860 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1907 else 1861 else
1908 stmmac_display_ring((void *)priv->dma_tx, txsize, 0); 1862 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1909 1863
1910 pr_info(">>> frame to be transmitted: "); 1864 pr_debug(">>> frame to be transmitted: ");
1911 print_pkt(skb->data, skb->len); 1865 print_pkt(skb->data, skb->len);
1912 } 1866 }
1913#endif
1914 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { 1867 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1915 TX_DBG("%s: stop transmitted packets\n", __func__); 1868 if (netif_msg_hw(priv))
1869 pr_debug("%s: stop transmitted packets\n", __func__);
1916 netif_stop_queue(dev); 1870 netif_stop_queue(dev);
1917 } 1871 }
1918 1872
@@ -1972,7 +1926,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1972 1926
1973 priv->hw->ring->refill_desc3(priv, p); 1927 priv->hw->ring->refill_desc3(priv, p);
1974 1928
1975 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1929 if (netif_msg_rx_status(priv))
1930 pr_debug("\trefill entry #%d\n", entry);
1976 } 1931 }
1977 wmb(); 1932 wmb();
1978 priv->hw->desc->set_rx_owner(p); 1933 priv->hw->desc->set_rx_owner(p);
@@ -1995,15 +1950,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1995 unsigned int count = 0; 1950 unsigned int count = 0;
1996 int coe = priv->plat->rx_coe; 1951 int coe = priv->plat->rx_coe;
1997 1952
1998#ifdef STMMAC_RX_DEBUG 1953 if (netif_msg_rx_status(priv)) {
1999 if (netif_msg_hw(priv)) { 1954 pr_debug("%s: descriptor ring:\n", __func__);
2000 pr_debug(">>> stmmac_rx: descriptor ring:\n");
2001 if (priv->extend_desc) 1955 if (priv->extend_desc)
2002 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); 1956 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
2003 else 1957 else
2004 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); 1958 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
2005 } 1959 }
2006#endif
2007 while (count < limit) { 1960 while (count < limit) {
2008 int status; 1961 int status;
2009 struct dma_desc *p; 1962 struct dma_desc *p;
@@ -2057,15 +2010,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2057 */ 2010 */
2058 if (unlikely(status != llc_snap)) 2011 if (unlikely(status != llc_snap))
2059 frame_len -= ETH_FCS_LEN; 2012 frame_len -= ETH_FCS_LEN;
2060#ifdef STMMAC_RX_DEBUG
2061 if (frame_len > ETH_FRAME_LEN)
2062 pr_debug("\tRX frame size %d, COE status: %d\n",
2063 frame_len, status);
2064 2013
2065 if (netif_msg_hw(priv)) 2014 if (netif_msg_rx_status(priv)) {
2066 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2015 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2067 p, entry, p->des2); 2016 p, entry, p->des2);
2068#endif 2017 if (frame_len > ETH_FRAME_LEN)
2018 pr_debug("\tframe size %d, COE: %d\n",
2019 frame_len, status);
2020 }
2069 skb = priv->rx_skbuff[entry]; 2021 skb = priv->rx_skbuff[entry];
2070 if (unlikely(!skb)) { 2022 if (unlikely(!skb)) {
2071 pr_err("%s: Inconsistent Rx descriptor chain\n", 2023 pr_err("%s: Inconsistent Rx descriptor chain\n",
@@ -2082,12 +2034,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2082 dma_unmap_single(priv->device, 2034 dma_unmap_single(priv->device,
2083 priv->rx_skbuff_dma[entry], 2035 priv->rx_skbuff_dma[entry],
2084 priv->dma_buf_sz, DMA_FROM_DEVICE); 2036 priv->dma_buf_sz, DMA_FROM_DEVICE);
2085#ifdef STMMAC_RX_DEBUG 2037
2086 if (netif_msg_pktdata(priv)) { 2038 if (netif_msg_pktdata(priv)) {
2087 pr_info(" frame received (%dbytes)", frame_len); 2039 pr_debug("frame received (%dbytes)", frame_len);
2088 print_pkt(skb->data, frame_len); 2040 print_pkt(skb->data, frame_len);
2089 } 2041 }
2090#endif 2042
2091 skb->protocol = eth_type_trans(skb, priv->dev); 2043 skb->protocol = eth_type_trans(skb, priv->dev);
2092 2044
2093 if (unlikely(!coe)) 2045 if (unlikely(!coe))