aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMark Mason <mmason@upwardaccess.com>2007-04-26 03:23:22 -0400
committerJeff Garzik <jeff@garzik.org>2007-04-28 11:01:07 -0400
commit693aa9470d8273a0ded8b211a8f5f7c0835adf30 (patch)
treeefad4a82527e9285ed6dae51e775d58360cc622f /drivers/net
parent55e924cf5772cbcf00549e448be35b392ff3084c (diff)
add NAPI support to sb1250-mac.c
Patch to add NAPI support to sb1250-mac.c (rev 2). This patch differs from the last in that the NAPI support isn't marked as experimental, nor is it configurable (ie. once applied - NAPI is enabled all the time). This was based on feedback from Ralf and others. Signed-off-by: Mark Mason <mason@broadcom.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/sb1250-mac.c271
1 files changed, 179 insertions, 92 deletions
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index e3f1a0707267..132e2148b21c 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -95,19 +95,28 @@ MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS));
95#endif 95#endif
96 96
97#ifdef CONFIG_SBMAC_COALESCE 97#ifdef CONFIG_SBMAC_COALESCE
98static int int_pktcnt = 0; 98static int int_pktcnt_tx = 255;
99module_param(int_pktcnt, int, S_IRUGO); 99module_param(int_pktcnt_tx, int, S_IRUGO);
100MODULE_PARM_DESC(int_pktcnt, "Packet count"); 100MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
101 101
102static int int_timeout = 0; 102static int int_timeout_tx = 255;
103module_param(int_timeout, int, S_IRUGO); 103module_param(int_timeout_tx, int, S_IRUGO);
104MODULE_PARM_DESC(int_timeout, "Timeout value"); 104MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
105
106static int int_pktcnt_rx = 64;
107module_param(int_pktcnt_rx, int, S_IRUGO);
108MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
109
110static int int_timeout_rx = 64;
111module_param(int_timeout_rx, int, S_IRUGO);
112MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
105#endif 113#endif
106 114
107#include <asm/sibyte/sb1250.h> 115#include <asm/sibyte/sb1250.h>
108#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 116#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
109#include <asm/sibyte/bcm1480_regs.h> 117#include <asm/sibyte/bcm1480_regs.h>
110#include <asm/sibyte/bcm1480_int.h> 118#include <asm/sibyte/bcm1480_int.h>
119#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
111#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 120#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
112#include <asm/sibyte/sb1250_regs.h> 121#include <asm/sibyte/sb1250_regs.h>
113#include <asm/sibyte/sb1250_int.h> 122#include <asm/sibyte/sb1250_int.h>
@@ -155,8 +164,8 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
155 164
156#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) 165#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
157 166
158#define SBMAC_MAX_TXDESCR 32 167#define SBMAC_MAX_TXDESCR 256
159#define SBMAC_MAX_RXDESCR 32 168#define SBMAC_MAX_RXDESCR 256
160 169
161#define ETHER_ALIGN 2 170#define ETHER_ALIGN 2
162#define ETHER_ADDR_LEN 6 171#define ETHER_ADDR_LEN 6
@@ -185,10 +194,10 @@ typedef struct sbmacdma_s {
185 * associated with it. 194 * associated with it.
186 */ 195 */
187 196
188 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ 197 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
189 int sbdma_channel; /* channel number */ 198 int sbdma_channel; /* channel number */
190 int sbdma_txdir; /* direction (1=transmit) */ 199 int sbdma_txdir; /* direction (1=transmit) */
191 int sbdma_maxdescr; /* total # of descriptors in ring */ 200 int sbdma_maxdescr; /* total # of descriptors in ring */
192#ifdef CONFIG_SBMAC_COALESCE 201#ifdef CONFIG_SBMAC_COALESCE
193 int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/ 202 int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/
194 int sbdma_int_timeout; /* # usec rx/tx interrupt */ 203 int sbdma_int_timeout; /* # usec rx/tx interrupt */
@@ -197,13 +206,16 @@ typedef struct sbmacdma_s {
197 volatile void __iomem *sbdma_config0; /* DMA config register 0 */ 206 volatile void __iomem *sbdma_config0; /* DMA config register 0 */
198 volatile void __iomem *sbdma_config1; /* DMA config register 1 */ 207 volatile void __iomem *sbdma_config1; /* DMA config register 1 */
199 volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */ 208 volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */
200 volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */ 209 volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */
201 volatile void __iomem *sbdma_curdscr; /* current descriptor address */ 210 volatile void __iomem *sbdma_curdscr; /* current descriptor address */
211 volatile void __iomem *sbdma_oodpktlost;/* pkt drop (rx only) */
212
202 213
203 /* 214 /*
204 * This stuff is for maintenance of the ring 215 * This stuff is for maintenance of the ring
205 */ 216 */
206 217
218 sbdmadscr_t *sbdma_dscrtable_unaligned;
207 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */ 219 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
208 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */ 220 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */
209 221
@@ -286,8 +298,8 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m);
286static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m); 298static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m);
287static void sbdma_emptyring(sbmacdma_t *d); 299static void sbdma_emptyring(sbmacdma_t *d);
288static void sbdma_fillring(sbmacdma_t *d); 300static void sbdma_fillring(sbmacdma_t *d);
289static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d); 301static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, int work_to_do, int poll);
290static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d); 302static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll);
291static int sbmac_initctx(struct sbmac_softc *s); 303static int sbmac_initctx(struct sbmac_softc *s);
292static void sbmac_channel_start(struct sbmac_softc *s); 304static void sbmac_channel_start(struct sbmac_softc *s);
293static void sbmac_channel_stop(struct sbmac_softc *s); 305static void sbmac_channel_stop(struct sbmac_softc *s);
@@ -308,6 +320,8 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev);
308static void sbmac_set_rx_mode(struct net_device *dev); 320static void sbmac_set_rx_mode(struct net_device *dev);
309static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 321static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
310static int sbmac_close(struct net_device *dev); 322static int sbmac_close(struct net_device *dev);
323static int sbmac_poll(struct net_device *poll_dev, int *budget);
324
311static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); 325static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
312static int sbmac_mii_probe(struct net_device *dev); 326static int sbmac_mii_probe(struct net_device *dev);
313 327
@@ -679,6 +693,10 @@ static void sbdma_initctx(sbmacdma_t *d,
679 int txrx, 693 int txrx,
680 int maxdescr) 694 int maxdescr)
681{ 695{
696#ifdef CONFIG_SBMAC_COALESCE
697 int int_pktcnt, int_timeout;
698#endif
699
682 /* 700 /*
683 * Save away interesting stuff in the structure 701 * Save away interesting stuff in the structure
684 */ 702 */
@@ -728,6 +746,11 @@ static void sbdma_initctx(sbmacdma_t *d,
728 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); 746 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
729 d->sbdma_curdscr = 747 d->sbdma_curdscr =
730 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); 748 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
749 if (d->sbdma_txdir)
750 d->sbdma_oodpktlost = NULL;
751 else
752 d->sbdma_oodpktlost =
753 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
731 754
732 /* 755 /*
733 * Allocate memory for the ring 756 * Allocate memory for the ring
@@ -735,6 +758,7 @@ static void sbdma_initctx(sbmacdma_t *d,
735 758
736 d->sbdma_maxdescr = maxdescr; 759 d->sbdma_maxdescr = maxdescr;
737 760
761 d->sbdma_dscrtable_unaligned =
738 d->sbdma_dscrtable = (sbdmadscr_t *) 762 d->sbdma_dscrtable = (sbdmadscr_t *)
739 kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL); 763 kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
740 764
@@ -765,12 +789,14 @@ static void sbdma_initctx(sbmacdma_t *d,
765 * Setup Rx/Tx DMA coalescing defaults 789 * Setup Rx/Tx DMA coalescing defaults
766 */ 790 */
767 791
792 int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
768 if ( int_pktcnt ) { 793 if ( int_pktcnt ) {
769 d->sbdma_int_pktcnt = int_pktcnt; 794 d->sbdma_int_pktcnt = int_pktcnt;
770 } else { 795 } else {
771 d->sbdma_int_pktcnt = 1; 796 d->sbdma_int_pktcnt = 1;
772 } 797 }
773 798
799 int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
774 if ( int_timeout ) { 800 if ( int_timeout ) {
775 d->sbdma_int_timeout = int_timeout; 801 d->sbdma_int_timeout = int_timeout;
776 } else { 802 } else {
@@ -1147,30 +1173,41 @@ static void sbmac_netpoll(struct net_device *netdev)
1147#endif 1173#endif
1148 1174
1149/********************************************************************** 1175/**********************************************************************
1150 * SBDMA_RX_PROCESS(sc,d) 1176 * SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
1151 * 1177 *
1152 * Process "completed" receive buffers on the specified DMA channel. 1178 * Process "completed" receive buffers on the specified DMA channel.
1153 * Note that this isn't really ideal for priority channels, since
1154 * it processes all of the packets on a given channel before
1155 * returning.
1156 * 1179 *
1157 * Input parameters: 1180 * Input parameters:
1158 * sc - softc structure 1181 * sc - softc structure
1159 * d - DMA channel context 1182 * d - DMA channel context
1183 * work_to_do - no. of packets to process before enabling interrupt
1184 * again (for NAPI)
1185 * poll - 1: using polling (for NAPI)
1160 * 1186 *
1161 * Return value: 1187 * Return value:
1162 * nothing 1188 * nothing
1163 ********************************************************************* */ 1189 ********************************************************************* */
1164 1190
1165static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d) 1191static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d,
1192 int work_to_do, int poll)
1166{ 1193{
1167 int curidx; 1194 int curidx;
1168 int hwidx; 1195 int hwidx;
1169 sbdmadscr_t *dsc; 1196 sbdmadscr_t *dsc;
1170 struct sk_buff *sb; 1197 struct sk_buff *sb;
1171 int len; 1198 int len;
1199 int work_done = 0;
1200 int dropped = 0;
1172 1201
1173 for (;;) { 1202 prefetch(d);
1203
1204again:
1205 /* Check if the HW dropped any frames */
1206 sc->sbm_stats.rx_fifo_errors
1207 += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
1208 __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
1209
1210 while (work_to_do-- > 0) {
1174 /* 1211 /*
1175 * figure out where we are (as an index) and where 1212 * figure out where we are (as an index) and where
1176 * the hardware is (also as an index) 1213 * the hardware is (also as an index)
@@ -1182,7 +1219,12 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1182 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) 1219 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1183 */ 1220 */
1184 1221
1185 curidx = d->sbdma_remptr - d->sbdma_dscrtable; 1222 dsc = d->sbdma_remptr;
1223 curidx = dsc - d->sbdma_dscrtable;
1224
1225 prefetch(dsc);
1226 prefetch(&d->sbdma_ctxtable[curidx]);
1227
1186 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 1228 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1187 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); 1229 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1188 1230
@@ -1193,13 +1235,12 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1193 */ 1235 */
1194 1236
1195 if (curidx == hwidx) 1237 if (curidx == hwidx)
1196 break; 1238 goto done;
1197 1239
1198 /* 1240 /*
1199 * Otherwise, get the packet's sk_buff ptr back 1241 * Otherwise, get the packet's sk_buff ptr back
1200 */ 1242 */
1201 1243
1202 dsc = &(d->sbdma_dscrtable[curidx]);
1203 sb = d->sbdma_ctxtable[curidx]; 1244 sb = d->sbdma_ctxtable[curidx];
1204 d->sbdma_ctxtable[curidx] = NULL; 1245 d->sbdma_ctxtable[curidx] = NULL;
1205 1246
@@ -1211,7 +1252,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1211 * receive ring. 1252 * receive ring.
1212 */ 1253 */
1213 1254
1214 if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) { 1255 if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
1215 1256
1216 /* 1257 /*
1217 * Add a new buffer to replace the old one. If we fail 1258 * Add a new buffer to replace the old one. If we fail
@@ -1219,9 +1260,14 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1219 * packet and put it right back on the receive ring. 1260 * packet and put it right back on the receive ring.
1220 */ 1261 */
1221 1262
1222 if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) { 1263 if (unlikely (sbdma_add_rcvbuffer(d,NULL) ==
1223 sc->sbm_stats.rx_dropped++; 1264 -ENOBUFS)) {
1265 sc->sbm_stats.rx_dropped++;
1224 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ 1266 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
1267 /* No point in continuing at the moment */
1268 printk(KERN_ERR "dropped packet (1)\n");
1269 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1270 goto done;
1225 } else { 1271 } else {
1226 /* 1272 /*
1227 * Set length into the packet 1273 * Set length into the packet
@@ -1233,8 +1279,6 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1233 * receive ring. Pass the buffer to 1279 * receive ring. Pass the buffer to
1234 * the kernel 1280 * the kernel
1235 */ 1281 */
1236 sc->sbm_stats.rx_bytes += len;
1237 sc->sbm_stats.rx_packets++;
1238 sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); 1282 sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
1239 /* Check hw IPv4/TCP checksum if supported */ 1283 /* Check hw IPv4/TCP checksum if supported */
1240 if (sc->rx_hw_checksum == ENABLE) { 1284 if (sc->rx_hw_checksum == ENABLE) {
@@ -1246,8 +1290,22 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1246 sb->ip_summed = CHECKSUM_NONE; 1290 sb->ip_summed = CHECKSUM_NONE;
1247 } 1291 }
1248 } 1292 }
1249 1293 prefetch(sb->data);
1250 netif_rx(sb); 1294 prefetch((const void *)(((char *)sb->data)+32));
1295 if (poll)
1296 dropped = netif_receive_skb(sb);
1297 else
1298 dropped = netif_rx(sb);
1299
1300 if (dropped == NET_RX_DROP) {
1301 sc->sbm_stats.rx_dropped++;
1302 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1303 goto done;
1304 }
1305 else {
1306 sc->sbm_stats.rx_bytes += len;
1307 sc->sbm_stats.rx_packets++;
1308 }
1251 } 1309 }
1252 } else { 1310 } else {
1253 /* 1311 /*
@@ -1264,12 +1322,16 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1264 */ 1322 */
1265 1323
1266 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1324 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1267 1325 work_done++;
1326 }
1327 if (!poll) {
1328 work_to_do = 32;
1329 goto again; /* collect fifo drop statistics again */
1268 } 1330 }
1331done:
1332 return work_done;
1269} 1333}
1270 1334
1271
1272
1273/********************************************************************** 1335/**********************************************************************
1274 * SBDMA_TX_PROCESS(sc,d) 1336 * SBDMA_TX_PROCESS(sc,d)
1275 * 1337 *
@@ -1281,22 +1343,30 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1281 * 1343 *
1282 * Input parameters: 1344 * Input parameters:
1283 * sc - softc structure 1345 * sc - softc structure
1284 * d - DMA channel context 1346 * d - DMA channel context
1347 * poll - 1: using polling (for NAPI)
1285 * 1348 *
1286 * Return value: 1349 * Return value:
1287 * nothing 1350 * nothing
1288 ********************************************************************* */ 1351 ********************************************************************* */
1289 1352
1290static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d) 1353static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll)
1291{ 1354{
1292 int curidx; 1355 int curidx;
1293 int hwidx; 1356 int hwidx;
1294 sbdmadscr_t *dsc; 1357 sbdmadscr_t *dsc;
1295 struct sk_buff *sb; 1358 struct sk_buff *sb;
1296 unsigned long flags; 1359 unsigned long flags;
1360 int packets_handled = 0;
1297 1361
1298 spin_lock_irqsave(&(sc->sbm_lock), flags); 1362 spin_lock_irqsave(&(sc->sbm_lock), flags);
1299 1363
1364 if (d->sbdma_remptr == d->sbdma_addptr)
1365 goto end_unlock;
1366
1367 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1368 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1369
1300 for (;;) { 1370 for (;;) {
1301 /* 1371 /*
1302 * figure out where we are (as an index) and where 1372 * figure out where we are (as an index) and where
@@ -1310,8 +1380,6 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1310 */ 1380 */
1311 1381
1312 curidx = d->sbdma_remptr - d->sbdma_dscrtable; 1382 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1313 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1314 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1315 1383
1316 /* 1384 /*
1317 * If they're the same, that means we've processed all 1385 * If they're the same, that means we've processed all
@@ -1349,6 +1417,8 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1349 1417
1350 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1418 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1351 1419
1420 packets_handled++;
1421
1352 } 1422 }
1353 1423
1354 /* 1424 /*
@@ -1357,8 +1427,10 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1357 * watermark on the transmit queue. 1427 * watermark on the transmit queue.
1358 */ 1428 */
1359 1429
1360 netif_wake_queue(d->sbdma_eth->sbm_dev); 1430 if (packets_handled)
1431 netif_wake_queue(d->sbdma_eth->sbm_dev);
1361 1432
1433end_unlock:
1362 spin_unlock_irqrestore(&(sc->sbm_lock), flags); 1434 spin_unlock_irqrestore(&(sc->sbm_lock), flags);
1363 1435
1364} 1436}
@@ -1432,9 +1504,9 @@ static int sbmac_initctx(struct sbmac_softc *s)
1432 1504
1433static void sbdma_uninitctx(struct sbmacdma_s *d) 1505static void sbdma_uninitctx(struct sbmacdma_s *d)
1434{ 1506{
1435 if (d->sbdma_dscrtable) { 1507 if (d->sbdma_dscrtable_unaligned) {
1436 kfree(d->sbdma_dscrtable); 1508 kfree(d->sbdma_dscrtable_unaligned);
1437 d->sbdma_dscrtable = NULL; 1509 d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
1438 } 1510 }
1439 1511
1440 if (d->sbdma_ctxtable) { 1512 if (d->sbdma_ctxtable) {
@@ -1632,15 +1704,9 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1632#endif 1704#endif
1633 1705
1634#ifdef CONFIG_SBMAC_COALESCE 1706#ifdef CONFIG_SBMAC_COALESCE
1635 /*
1636 * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
1637 */
1638 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 1707 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1639 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); 1708 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
1640#else 1709#else
1641 /*
1642 * Accept any kind of interrupt on TX and RX DMA channel 0
1643 */
1644 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 1710 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1645 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); 1711 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
1646#endif 1712#endif
@@ -2073,57 +2139,46 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
2073 uint64_t isr; 2139 uint64_t isr;
2074 int handled = 0; 2140 int handled = 0;
2075 2141
2076 for (;;) { 2142 /*
2077 2143 * Read the ISR (this clears the bits in the real
2078 /* 2144 * register, except for counter addr)
2079 * Read the ISR (this clears the bits in the real 2145 */
2080 * register, except for counter addr)
2081 */
2082
2083 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
2084 2146
2085 if (isr == 0) 2147 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
2086 break;
2087 2148
2088 handled = 1; 2149 if (isr == 0)
2150 return IRQ_RETVAL(0);
2151 handled = 1;
2089 2152
2090 /* 2153 /*
2091 * Transmits on channel 0 2154 * Transmits on channel 0
2092 */ 2155 */
2093 2156
2094 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { 2157 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
2095 sbdma_tx_process(sc,&(sc->sbm_txdma)); 2158 sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
2159#ifdef CONFIG_NETPOLL_TRAP
2160 if (netpoll_trap()) {
2161 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
2162 __netif_schedule(dev);
2096 } 2163 }
2164#endif
2165 }
2097 2166
2098 /* 2167 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2099 * Receives on channel 0 2168 if (netif_rx_schedule_prep(dev)) {
2100 */ 2169 __raw_writeq(0, sc->sbm_imr);
2101 2170 __netif_rx_schedule(dev);
2102 /* 2171 /* Depend on the exit from poll to reenable intr */
2103 * It's important to test all the bits (or at least the 2172 }
2104 * EOP_SEEN bit) when deciding to do the RX process 2173 else {
2105 * particularly when coalescing, to make sure we 2174 /* may leave some packets behind */
2106 * take care of the following: 2175 sbdma_rx_process(sc,&(sc->sbm_rxdma),
2107 * 2176 SBMAC_MAX_RXDESCR * 2, 0);
2108 * If you have some packets waiting (have been received
2109 * but no interrupt) and get a TX interrupt before
2110 * the RX timer or counter expires, reading the ISR
2111 * above will clear the timer and counter, and you
2112 * won't get another interrupt until a packet shows
2113 * up to start the timer again. Testing
2114 * EOP_SEEN here takes care of this case.
2115 * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
2116 */
2117
2118
2119 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2120 sbdma_rx_process(sc,&(sc->sbm_rxdma));
2121 } 2177 }
2122 } 2178 }
2123 return IRQ_RETVAL(handled); 2179 return IRQ_RETVAL(handled);
2124} 2180}
2125 2181
2126
2127/********************************************************************** 2182/**********************************************************************
2128 * SBMAC_START_TX(skb,dev) 2183 * SBMAC_START_TX(skb,dev)
2129 * 2184 *
@@ -2253,8 +2308,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2253 } 2308 }
2254} 2309}
2255 2310
2256
2257
2258#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) 2311#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2259/********************************************************************** 2312/**********************************************************************
2260 * SBMAC_PARSE_XDIGIT(str) 2313 * SBMAC_PARSE_XDIGIT(str)
@@ -2417,6 +2470,8 @@ static int sbmac_init(struct net_device *dev, int idx)
2417 dev->do_ioctl = sbmac_mii_ioctl; 2470 dev->do_ioctl = sbmac_mii_ioctl;
2418 dev->tx_timeout = sbmac_tx_timeout; 2471 dev->tx_timeout = sbmac_tx_timeout;
2419 dev->watchdog_timeo = TX_TIMEOUT; 2472 dev->watchdog_timeo = TX_TIMEOUT;
2473 dev->poll = sbmac_poll;
2474 dev->weight = 16;
2420 2475
2421 dev->change_mtu = sb1250_change_mtu; 2476 dev->change_mtu = sb1250_change_mtu;
2422#ifdef CONFIG_NET_POLL_CONTROLLER 2477#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2819,7 +2874,39 @@ static int sbmac_close(struct net_device *dev)
2819 return 0; 2874 return 0;
2820} 2875}
2821 2876
2877static int sbmac_poll(struct net_device *dev, int *budget)
2878{
2879 int work_to_do;
2880 int work_done;
2881 struct sbmac_softc *sc = netdev_priv(dev);
2882
2883 work_to_do = min(*budget, dev->quota);
2884 work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), work_to_do, 1);
2885
2886 if (work_done > work_to_do)
2887 printk(KERN_ERR "%s exceeded work_to_do budget=%d quota=%d work-done=%d\n",
2888 sc->sbm_dev->name, *budget, dev->quota, work_done);
2822 2889
2890 sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
2891
2892 *budget -= work_done;
2893 dev->quota -= work_done;
2894
2895 if (work_done < work_to_do) {
2896 netif_rx_complete(dev);
2897
2898#ifdef CONFIG_SBMAC_COALESCE
2899 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
2900 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
2901 sc->sbm_imr);
2902#else
2903 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
2904 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
2905#endif
2906 }
2907
2908 return (work_done >= work_to_do);
2909}
2823 2910
2824#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) 2911#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2825static void 2912static void
@@ -2906,7 +2993,7 @@ sbmac_init_module(void)
2906 2993
2907 /* 2994 /*
2908 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero 2995 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
2909 * value for us by the firmware if we're going to use this MAC. 2996 * value for us by the firmware if we are going to use this MAC.
2910 * If we find a zero, skip this MAC. 2997 * If we find a zero, skip this MAC.
2911 */ 2998 */
2912 2999