aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sb1250-mac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sb1250-mac.c')
-rw-r--r--drivers/net/sb1250-mac.c294
1 files changed, 202 insertions, 92 deletions
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 0a3a379b634c..132e2148b21c 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -95,19 +95,28 @@ MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS));
95#endif 95#endif
96 96
97#ifdef CONFIG_SBMAC_COALESCE 97#ifdef CONFIG_SBMAC_COALESCE
98static int int_pktcnt = 0; 98static int int_pktcnt_tx = 255;
99module_param(int_pktcnt, int, S_IRUGO); 99module_param(int_pktcnt_tx, int, S_IRUGO);
100MODULE_PARM_DESC(int_pktcnt, "Packet count"); 100MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
101 101
102static int int_timeout = 0; 102static int int_timeout_tx = 255;
103module_param(int_timeout, int, S_IRUGO); 103module_param(int_timeout_tx, int, S_IRUGO);
104MODULE_PARM_DESC(int_timeout, "Timeout value"); 104MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
105
106static int int_pktcnt_rx = 64;
107module_param(int_pktcnt_rx, int, S_IRUGO);
108MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
109
110static int int_timeout_rx = 64;
111module_param(int_timeout_rx, int, S_IRUGO);
112MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
105#endif 113#endif
106 114
107#include <asm/sibyte/sb1250.h> 115#include <asm/sibyte/sb1250.h>
108#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 116#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
109#include <asm/sibyte/bcm1480_regs.h> 117#include <asm/sibyte/bcm1480_regs.h>
110#include <asm/sibyte/bcm1480_int.h> 118#include <asm/sibyte/bcm1480_int.h>
119#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
111#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 120#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
112#include <asm/sibyte/sb1250_regs.h> 121#include <asm/sibyte/sb1250_regs.h>
113#include <asm/sibyte/sb1250_int.h> 122#include <asm/sibyte/sb1250_int.h>
@@ -155,8 +164,8 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
155 164
156#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) 165#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
157 166
158#define SBMAC_MAX_TXDESCR 32 167#define SBMAC_MAX_TXDESCR 256
159#define SBMAC_MAX_RXDESCR 32 168#define SBMAC_MAX_RXDESCR 256
160 169
161#define ETHER_ALIGN 2 170#define ETHER_ALIGN 2
162#define ETHER_ADDR_LEN 6 171#define ETHER_ADDR_LEN 6
@@ -185,10 +194,10 @@ typedef struct sbmacdma_s {
185 * associated with it. 194 * associated with it.
186 */ 195 */
187 196
188 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ 197 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
189 int sbdma_channel; /* channel number */ 198 int sbdma_channel; /* channel number */
190 int sbdma_txdir; /* direction (1=transmit) */ 199 int sbdma_txdir; /* direction (1=transmit) */
191 int sbdma_maxdescr; /* total # of descriptors in ring */ 200 int sbdma_maxdescr; /* total # of descriptors in ring */
192#ifdef CONFIG_SBMAC_COALESCE 201#ifdef CONFIG_SBMAC_COALESCE
193 int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/ 202 int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/
194 int sbdma_int_timeout; /* # usec rx/tx interrupt */ 203 int sbdma_int_timeout; /* # usec rx/tx interrupt */
@@ -197,13 +206,16 @@ typedef struct sbmacdma_s {
197 volatile void __iomem *sbdma_config0; /* DMA config register 0 */ 206 volatile void __iomem *sbdma_config0; /* DMA config register 0 */
198 volatile void __iomem *sbdma_config1; /* DMA config register 1 */ 207 volatile void __iomem *sbdma_config1; /* DMA config register 1 */
199 volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */ 208 volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */
200 volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */ 209 volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */
201 volatile void __iomem *sbdma_curdscr; /* current descriptor address */ 210 volatile void __iomem *sbdma_curdscr; /* current descriptor address */
211 volatile void __iomem *sbdma_oodpktlost;/* pkt drop (rx only) */
212
202 213
203 /* 214 /*
204 * This stuff is for maintenance of the ring 215 * This stuff is for maintenance of the ring
205 */ 216 */
206 217
218 sbdmadscr_t *sbdma_dscrtable_unaligned;
207 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */ 219 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
208 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */ 220 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */
209 221
@@ -286,8 +298,8 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m);
286static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m); 298static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m);
287static void sbdma_emptyring(sbmacdma_t *d); 299static void sbdma_emptyring(sbmacdma_t *d);
288static void sbdma_fillring(sbmacdma_t *d); 300static void sbdma_fillring(sbmacdma_t *d);
289static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d); 301static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, int work_to_do, int poll);
290static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d); 302static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll);
291static int sbmac_initctx(struct sbmac_softc *s); 303static int sbmac_initctx(struct sbmac_softc *s);
292static void sbmac_channel_start(struct sbmac_softc *s); 304static void sbmac_channel_start(struct sbmac_softc *s);
293static void sbmac_channel_stop(struct sbmac_softc *s); 305static void sbmac_channel_stop(struct sbmac_softc *s);
@@ -308,6 +320,8 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev);
308static void sbmac_set_rx_mode(struct net_device *dev); 320static void sbmac_set_rx_mode(struct net_device *dev);
309static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 321static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
310static int sbmac_close(struct net_device *dev); 322static int sbmac_close(struct net_device *dev);
323static int sbmac_poll(struct net_device *poll_dev, int *budget);
324
311static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); 325static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
312static int sbmac_mii_probe(struct net_device *dev); 326static int sbmac_mii_probe(struct net_device *dev);
313 327
@@ -679,6 +693,10 @@ static void sbdma_initctx(sbmacdma_t *d,
679 int txrx, 693 int txrx,
680 int maxdescr) 694 int maxdescr)
681{ 695{
696#ifdef CONFIG_SBMAC_COALESCE
697 int int_pktcnt, int_timeout;
698#endif
699
682 /* 700 /*
683 * Save away interesting stuff in the structure 701 * Save away interesting stuff in the structure
684 */ 702 */
@@ -728,6 +746,11 @@ static void sbdma_initctx(sbmacdma_t *d,
728 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); 746 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
729 d->sbdma_curdscr = 747 d->sbdma_curdscr =
730 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); 748 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
749 if (d->sbdma_txdir)
750 d->sbdma_oodpktlost = NULL;
751 else
752 d->sbdma_oodpktlost =
753 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
731 754
732 /* 755 /*
733 * Allocate memory for the ring 756 * Allocate memory for the ring
@@ -735,6 +758,7 @@ static void sbdma_initctx(sbmacdma_t *d,
735 758
736 d->sbdma_maxdescr = maxdescr; 759 d->sbdma_maxdescr = maxdescr;
737 760
761 d->sbdma_dscrtable_unaligned =
738 d->sbdma_dscrtable = (sbdmadscr_t *) 762 d->sbdma_dscrtable = (sbdmadscr_t *)
739 kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL); 763 kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
740 764
@@ -765,12 +789,14 @@ static void sbdma_initctx(sbmacdma_t *d,
765 * Setup Rx/Tx DMA coalescing defaults 789 * Setup Rx/Tx DMA coalescing defaults
766 */ 790 */
767 791
792 int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
768 if ( int_pktcnt ) { 793 if ( int_pktcnt ) {
769 d->sbdma_int_pktcnt = int_pktcnt; 794 d->sbdma_int_pktcnt = int_pktcnt;
770 } else { 795 } else {
771 d->sbdma_int_pktcnt = 1; 796 d->sbdma_int_pktcnt = 1;
772 } 797 }
773 798
799 int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
774 if ( int_timeout ) { 800 if ( int_timeout ) {
775 d->sbdma_int_timeout = int_timeout; 801 d->sbdma_int_timeout = int_timeout;
776 } else { 802 } else {
@@ -1125,32 +1151,63 @@ static void sbdma_fillring(sbmacdma_t *d)
1125 } 1151 }
1126} 1152}
1127 1153
1154#ifdef CONFIG_NET_POLL_CONTROLLER
1155static void sbmac_netpoll(struct net_device *netdev)
1156{
1157 struct sbmac_softc *sc = netdev_priv(netdev);
1158 int irq = sc->sbm_dev->irq;
1159
1160 __raw_writeq(0, sc->sbm_imr);
1161
1162 sbmac_intr(irq, netdev, NULL);
1163
1164#ifdef CONFIG_SBMAC_COALESCE
1165 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1166 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
1167 sc->sbm_imr);
1168#else
1169 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1170 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
1171#endif
1172}
1173#endif
1128 1174
1129/********************************************************************** 1175/**********************************************************************
1130 * SBDMA_RX_PROCESS(sc,d) 1176 * SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
1131 * 1177 *
1132 * Process "completed" receive buffers on the specified DMA channel. 1178 * Process "completed" receive buffers on the specified DMA channel.
1133 * Note that this isn't really ideal for priority channels, since
1134 * it processes all of the packets on a given channel before
1135 * returning.
1136 * 1179 *
1137 * Input parameters: 1180 * Input parameters:
1138 * sc - softc structure 1181 * sc - softc structure
1139 * d - DMA channel context 1182 * d - DMA channel context
1183 * work_to_do - no. of packets to process before enabling interrupt
1184 * again (for NAPI)
1185 * poll - 1: using polling (for NAPI)
1140 * 1186 *
1141 * Return value: 1187 * Return value:
1142 * nothing 1188 * nothing
1143 ********************************************************************* */ 1189 ********************************************************************* */
1144 1190
1145static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d) 1191static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d,
1192 int work_to_do, int poll)
1146{ 1193{
1147 int curidx; 1194 int curidx;
1148 int hwidx; 1195 int hwidx;
1149 sbdmadscr_t *dsc; 1196 sbdmadscr_t *dsc;
1150 struct sk_buff *sb; 1197 struct sk_buff *sb;
1151 int len; 1198 int len;
1199 int work_done = 0;
1200 int dropped = 0;
1152 1201
1153 for (;;) { 1202 prefetch(d);
1203
1204again:
1205 /* Check if the HW dropped any frames */
1206 sc->sbm_stats.rx_fifo_errors
1207 += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
1208 __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
1209
1210 while (work_to_do-- > 0) {
1154 /* 1211 /*
1155 * figure out where we are (as an index) and where 1212 * figure out where we are (as an index) and where
1156 * the hardware is (also as an index) 1213 * the hardware is (also as an index)
@@ -1162,7 +1219,12 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1162 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) 1219 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1163 */ 1220 */
1164 1221
1165 curidx = d->sbdma_remptr - d->sbdma_dscrtable; 1222 dsc = d->sbdma_remptr;
1223 curidx = dsc - d->sbdma_dscrtable;
1224
1225 prefetch(dsc);
1226 prefetch(&d->sbdma_ctxtable[curidx]);
1227
1166 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 1228 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1167 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); 1229 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1168 1230
@@ -1173,13 +1235,12 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1173 */ 1235 */
1174 1236
1175 if (curidx == hwidx) 1237 if (curidx == hwidx)
1176 break; 1238 goto done;
1177 1239
1178 /* 1240 /*
1179 * Otherwise, get the packet's sk_buff ptr back 1241 * Otherwise, get the packet's sk_buff ptr back
1180 */ 1242 */
1181 1243
1182 dsc = &(d->sbdma_dscrtable[curidx]);
1183 sb = d->sbdma_ctxtable[curidx]; 1244 sb = d->sbdma_ctxtable[curidx];
1184 d->sbdma_ctxtable[curidx] = NULL; 1245 d->sbdma_ctxtable[curidx] = NULL;
1185 1246
@@ -1191,7 +1252,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1191 * receive ring. 1252 * receive ring.
1192 */ 1253 */
1193 1254
1194 if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) { 1255 if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
1195 1256
1196 /* 1257 /*
1197 * Add a new buffer to replace the old one. If we fail 1258 * Add a new buffer to replace the old one. If we fail
@@ -1199,9 +1260,14 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1199 * packet and put it right back on the receive ring. 1260 * packet and put it right back on the receive ring.
1200 */ 1261 */
1201 1262
1202 if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) { 1263 if (unlikely (sbdma_add_rcvbuffer(d,NULL) ==
1203 sc->sbm_stats.rx_dropped++; 1264 -ENOBUFS)) {
1265 sc->sbm_stats.rx_dropped++;
1204 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ 1266 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
1267 /* No point in continuing at the moment */
1268 printk(KERN_ERR "dropped packet (1)\n");
1269 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1270 goto done;
1205 } else { 1271 } else {
1206 /* 1272 /*
1207 * Set length into the packet 1273 * Set length into the packet
@@ -1213,8 +1279,6 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1213 * receive ring. Pass the buffer to 1279 * receive ring. Pass the buffer to
1214 * the kernel 1280 * the kernel
1215 */ 1281 */
1216 sc->sbm_stats.rx_bytes += len;
1217 sc->sbm_stats.rx_packets++;
1218 sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); 1282 sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
1219 /* Check hw IPv4/TCP checksum if supported */ 1283 /* Check hw IPv4/TCP checksum if supported */
1220 if (sc->rx_hw_checksum == ENABLE) { 1284 if (sc->rx_hw_checksum == ENABLE) {
@@ -1226,8 +1290,22 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1226 sb->ip_summed = CHECKSUM_NONE; 1290 sb->ip_summed = CHECKSUM_NONE;
1227 } 1291 }
1228 } 1292 }
1229 1293 prefetch(sb->data);
1230 netif_rx(sb); 1294 prefetch((const void *)(((char *)sb->data)+32));
1295 if (poll)
1296 dropped = netif_receive_skb(sb);
1297 else
1298 dropped = netif_rx(sb);
1299
1300 if (dropped == NET_RX_DROP) {
1301 sc->sbm_stats.rx_dropped++;
1302 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1303 goto done;
1304 }
1305 else {
1306 sc->sbm_stats.rx_bytes += len;
1307 sc->sbm_stats.rx_packets++;
1308 }
1231 } 1309 }
1232 } else { 1310 } else {
1233 /* 1311 /*
@@ -1244,12 +1322,16 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1244 */ 1322 */
1245 1323
1246 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1324 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1247 1325 work_done++;
1248 } 1326 }
1327 if (!poll) {
1328 work_to_do = 32;
1329 goto again; /* collect fifo drop statistics again */
1330 }
1331done:
1332 return work_done;
1249} 1333}
1250 1334
1251
1252
1253/********************************************************************** 1335/**********************************************************************
1254 * SBDMA_TX_PROCESS(sc,d) 1336 * SBDMA_TX_PROCESS(sc,d)
1255 * 1337 *
@@ -1261,22 +1343,30 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1261 * 1343 *
1262 * Input parameters: 1344 * Input parameters:
1263 * sc - softc structure 1345 * sc - softc structure
1264 * d - DMA channel context 1346 * d - DMA channel context
1347 * poll - 1: using polling (for NAPI)
1265 * 1348 *
1266 * Return value: 1349 * Return value:
1267 * nothing 1350 * nothing
1268 ********************************************************************* */ 1351 ********************************************************************* */
1269 1352
1270static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d) 1353static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll)
1271{ 1354{
1272 int curidx; 1355 int curidx;
1273 int hwidx; 1356 int hwidx;
1274 sbdmadscr_t *dsc; 1357 sbdmadscr_t *dsc;
1275 struct sk_buff *sb; 1358 struct sk_buff *sb;
1276 unsigned long flags; 1359 unsigned long flags;
1360 int packets_handled = 0;
1277 1361
1278 spin_lock_irqsave(&(sc->sbm_lock), flags); 1362 spin_lock_irqsave(&(sc->sbm_lock), flags);
1279 1363
1364 if (d->sbdma_remptr == d->sbdma_addptr)
1365 goto end_unlock;
1366
1367 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1368 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1369
1280 for (;;) { 1370 for (;;) {
1281 /* 1371 /*
1282 * figure out where we are (as an index) and where 1372 * figure out where we are (as an index) and where
@@ -1290,8 +1380,6 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1290 */ 1380 */
1291 1381
1292 curidx = d->sbdma_remptr - d->sbdma_dscrtable; 1382 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1293 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1294 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1295 1383
1296 /* 1384 /*
1297 * If they're the same, that means we've processed all 1385 * If they're the same, that means we've processed all
@@ -1329,6 +1417,8 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1329 1417
1330 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1418 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1331 1419
1420 packets_handled++;
1421
1332 } 1422 }
1333 1423
1334 /* 1424 /*
@@ -1337,8 +1427,10 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1337 * watermark on the transmit queue. 1427 * watermark on the transmit queue.
1338 */ 1428 */
1339 1429
1340 netif_wake_queue(d->sbdma_eth->sbm_dev); 1430 if (packets_handled)
1431 netif_wake_queue(d->sbdma_eth->sbm_dev);
1341 1432
1433end_unlock:
1342 spin_unlock_irqrestore(&(sc->sbm_lock), flags); 1434 spin_unlock_irqrestore(&(sc->sbm_lock), flags);
1343 1435
1344} 1436}
@@ -1412,9 +1504,9 @@ static int sbmac_initctx(struct sbmac_softc *s)
1412 1504
1413static void sbdma_uninitctx(struct sbmacdma_s *d) 1505static void sbdma_uninitctx(struct sbmacdma_s *d)
1414{ 1506{
1415 if (d->sbdma_dscrtable) { 1507 if (d->sbdma_dscrtable_unaligned) {
1416 kfree(d->sbdma_dscrtable); 1508 kfree(d->sbdma_dscrtable_unaligned);
1417 d->sbdma_dscrtable = NULL; 1509 d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
1418 } 1510 }
1419 1511
1420 if (d->sbdma_ctxtable) { 1512 if (d->sbdma_ctxtable) {
@@ -1612,15 +1704,9 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1612#endif 1704#endif
1613 1705
1614#ifdef CONFIG_SBMAC_COALESCE 1706#ifdef CONFIG_SBMAC_COALESCE
1615 /*
1616 * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
1617 */
1618 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 1707 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1619 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); 1708 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
1620#else 1709#else
1621 /*
1622 * Accept any kind of interrupt on TX and RX DMA channel 0
1623 */
1624 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 1710 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1625 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); 1711 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
1626#endif 1712#endif
@@ -2053,57 +2139,46 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
2053 uint64_t isr; 2139 uint64_t isr;
2054 int handled = 0; 2140 int handled = 0;
2055 2141
2056 for (;;) { 2142 /*
2057 2143 * Read the ISR (this clears the bits in the real
2058 /* 2144 * register, except for counter addr)
2059 * Read the ISR (this clears the bits in the real 2145 */
2060 * register, except for counter addr)
2061 */
2062 2146
2063 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; 2147 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
2064 2148
2065 if (isr == 0) 2149 if (isr == 0)
2066 break; 2150 return IRQ_RETVAL(0);
2151 handled = 1;
2067 2152
2068 handled = 1; 2153 /*
2069 2154 * Transmits on channel 0
2070 /* 2155 */
2071 * Transmits on channel 0
2072 */
2073 2156
2074 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { 2157 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
2075 sbdma_tx_process(sc,&(sc->sbm_txdma)); 2158 sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
2159#ifdef CONFIG_NETPOLL_TRAP
2160 if (netpoll_trap()) {
2161 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
2162 __netif_schedule(dev);
2076 } 2163 }
2164#endif
2165 }
2077 2166
2078 /* 2167 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2079 * Receives on channel 0 2168 if (netif_rx_schedule_prep(dev)) {
2080 */ 2169 __raw_writeq(0, sc->sbm_imr);
2081 2170 __netif_rx_schedule(dev);
2082 /* 2171 /* Depend on the exit from poll to reenable intr */
2083 * It's important to test all the bits (or at least the 2172 }
2084 * EOP_SEEN bit) when deciding to do the RX process 2173 else {
2085 * particularly when coalescing, to make sure we 2174 /* may leave some packets behind */
2086 * take care of the following: 2175 sbdma_rx_process(sc,&(sc->sbm_rxdma),
2087 * 2176 SBMAC_MAX_RXDESCR * 2, 0);
2088 * If you have some packets waiting (have been received
2089 * but no interrupt) and get a TX interrupt before
2090 * the RX timer or counter expires, reading the ISR
2091 * above will clear the timer and counter, and you
2092 * won't get another interrupt until a packet shows
2093 * up to start the timer again. Testing
2094 * EOP_SEEN here takes care of this case.
2095 * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
2096 */
2097
2098
2099 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2100 sbdma_rx_process(sc,&(sc->sbm_rxdma));
2101 } 2177 }
2102 } 2178 }
2103 return IRQ_RETVAL(handled); 2179 return IRQ_RETVAL(handled);
2104} 2180}
2105 2181
2106
2107/********************************************************************** 2182/**********************************************************************
2108 * SBMAC_START_TX(skb,dev) 2183 * SBMAC_START_TX(skb,dev)
2109 * 2184 *
@@ -2233,8 +2308,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2233 } 2308 }
2234} 2309}
2235 2310
2236
2237
2238#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) 2311#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2239/********************************************************************** 2312/**********************************************************************
2240 * SBMAC_PARSE_XDIGIT(str) 2313 * SBMAC_PARSE_XDIGIT(str)
@@ -2397,8 +2470,13 @@ static int sbmac_init(struct net_device *dev, int idx)
2397 dev->do_ioctl = sbmac_mii_ioctl; 2470 dev->do_ioctl = sbmac_mii_ioctl;
2398 dev->tx_timeout = sbmac_tx_timeout; 2471 dev->tx_timeout = sbmac_tx_timeout;
2399 dev->watchdog_timeo = TX_TIMEOUT; 2472 dev->watchdog_timeo = TX_TIMEOUT;
2473 dev->poll = sbmac_poll;
2474 dev->weight = 16;
2400 2475
2401 dev->change_mtu = sb1250_change_mtu; 2476 dev->change_mtu = sb1250_change_mtu;
2477#ifdef CONFIG_NET_POLL_CONTROLLER
2478 dev->poll_controller = sbmac_netpoll;
2479#endif
2402 2480
2403 /* This is needed for PASS2 for Rx H/W checksum feature */ 2481 /* This is needed for PASS2 for Rx H/W checksum feature */
2404 sbmac_set_iphdr_offset(sc); 2482 sbmac_set_iphdr_offset(sc);
@@ -2796,7 +2874,39 @@ static int sbmac_close(struct net_device *dev)
2796 return 0; 2874 return 0;
2797} 2875}
2798 2876
2877static int sbmac_poll(struct net_device *dev, int *budget)
2878{
2879 int work_to_do;
2880 int work_done;
2881 struct sbmac_softc *sc = netdev_priv(dev);
2882
2883 work_to_do = min(*budget, dev->quota);
2884 work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), work_to_do, 1);
2799 2885
2886 if (work_done > work_to_do)
2887 printk(KERN_ERR "%s exceeded work_to_do budget=%d quota=%d work-done=%d\n",
2888 sc->sbm_dev->name, *budget, dev->quota, work_done);
2889
2890 sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
2891
2892 *budget -= work_done;
2893 dev->quota -= work_done;
2894
2895 if (work_done < work_to_do) {
2896 netif_rx_complete(dev);
2897
2898#ifdef CONFIG_SBMAC_COALESCE
2899 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
2900 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
2901 sc->sbm_imr);
2902#else
2903 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
2904 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
2905#endif
2906 }
2907
2908 return (work_done >= work_to_do);
2909}
2800 2910
2801#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) 2911#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2802static void 2912static void
@@ -2883,7 +2993,7 @@ sbmac_init_module(void)
2883 2993
2884 /* 2994 /*
2885 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero 2995 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
2886 * value for us by the firmware if we're going to use this MAC. 2996 * value for us by the firmware if we are going to use this MAC.
2887 * If we find a zero, skip this MAC. 2997 * If we find a zero, skip this MAC.
2888 */ 2998 */
2889 2999