aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/pasemi_mac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/pasemi_mac.c')
-rw-r--r--drivers/net/pasemi_mac.c355
1 files changed, 299 insertions, 56 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index bcd7f9814ed8..3b2a6c598088 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -55,15 +55,10 @@
55 * - Multiqueue RX/TX 55 * - Multiqueue RX/TX
56 */ 56 */
57 57
58
59/* Must be a power of two */
60#define RX_RING_SIZE 2048
61#define TX_RING_SIZE 4096
62
63#define LRO_MAX_AGGR 64 58#define LRO_MAX_AGGR 64
64 59
65#define PE_MIN_MTU 64 60#define PE_MIN_MTU 64
66#define PE_MAX_MTU 1500 61#define PE_MAX_MTU 9000
67#define PE_DEF_MTU ETH_DATA_LEN 62#define PE_DEF_MTU ETH_DATA_LEN
68 63
69#define DEFAULT_MSG_ENABLE \ 64#define DEFAULT_MSG_ENABLE \
@@ -76,16 +71,6 @@
76 NETIF_MSG_RX_ERR | \ 71 NETIF_MSG_RX_ERR | \
77 NETIF_MSG_TX_ERR) 72 NETIF_MSG_TX_ERR)
78 73
79#define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
80#define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
81#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
82#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
83#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
84
85#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
86 & ((ring)->size - 1))
87#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
88
89MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
90MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); 75MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
91MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); 76MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
@@ -94,6 +79,8 @@ static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
94module_param(debug, int, 0); 79module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); 80MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
96 81
82extern const struct ethtool_ops pasemi_mac_ethtool_ops;
83
97static int translation_enabled(void) 84static int translation_enabled(void)
98{ 85{
99#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) 86#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
@@ -322,6 +309,104 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
322 return (nfrags + 3) & ~1; 309 return (nfrags + 3) & ~1;
323} 310}
324 311
312static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac)
313{
314 struct pasemi_mac_csring *ring;
315 u32 val;
316 unsigned int cfg;
317 int chno;
318
319 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
320 offsetof(struct pasemi_mac_csring, chan));
321
322 if (!ring) {
323 dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n");
324 goto out_chan;
325 }
326
327 chno = ring->chan.chno;
328
329 ring->size = CS_RING_SIZE;
330 ring->next_to_fill = 0;
331
332 /* Allocate descriptors */
333 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
334 goto out_ring_desc;
335
336 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
337 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
338 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
339 val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3);
340
341 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
342
343 ring->events[0] = pasemi_dma_alloc_flag();
344 ring->events[1] = pasemi_dma_alloc_flag();
345 if (ring->events[0] < 0 || ring->events[1] < 0)
346 goto out_flags;
347
348 pasemi_dma_clear_flag(ring->events[0]);
349 pasemi_dma_clear_flag(ring->events[1]);
350
351 ring->fun = pasemi_dma_alloc_fun();
352 if (ring->fun < 0)
353 goto out_fun;
354
355 cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP |
356 PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
357 PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ;
358
359 if (translation_enabled())
360 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
361
362 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
363
364 /* enable channel */
365 pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
366 PAS_DMA_TXCHAN_TCMDSTA_DB |
367 PAS_DMA_TXCHAN_TCMDSTA_DE |
368 PAS_DMA_TXCHAN_TCMDSTA_DA);
369
370 return ring;
371
372out_fun:
373out_flags:
374 if (ring->events[0] >= 0)
375 pasemi_dma_free_flag(ring->events[0]);
376 if (ring->events[1] >= 0)
377 pasemi_dma_free_flag(ring->events[1]);
378 pasemi_dma_free_ring(&ring->chan);
379out_ring_desc:
380 pasemi_dma_free_chan(&ring->chan);
381out_chan:
382
383 return NULL;
384}
385
386static void pasemi_mac_setup_csrings(struct pasemi_mac *mac)
387{
388 int i;
389 mac->cs[0] = pasemi_mac_setup_csring(mac);
390 if (mac->type == MAC_TYPE_XAUI)
391 mac->cs[1] = pasemi_mac_setup_csring(mac);
392 else
393 mac->cs[1] = 0;
394
395 for (i = 0; i < MAX_CS; i++)
396 if (mac->cs[i])
397 mac->num_cs++;
398}
399
400static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring)
401{
402 pasemi_dma_stop_chan(&csring->chan);
403 pasemi_dma_free_flag(csring->events[0]);
404 pasemi_dma_free_flag(csring->events[1]);
405 pasemi_dma_free_ring(&csring->chan);
406 pasemi_dma_free_chan(&csring->chan);
407 pasemi_dma_free_fun(csring->fun);
408}
409
325static int pasemi_mac_setup_rx_resources(const struct net_device *dev) 410static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
326{ 411{
327 struct pasemi_mac_rxring *ring; 412 struct pasemi_mac_rxring *ring;
@@ -445,7 +530,7 @@ pasemi_mac_setup_tx_resources(const struct net_device *dev)
445 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | 530 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
446 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | 531 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
447 PAS_DMA_TXCHAN_CFG_UP | 532 PAS_DMA_TXCHAN_CFG_UP |
448 PAS_DMA_TXCHAN_CFG_WT(2); 533 PAS_DMA_TXCHAN_CFG_WT(4);
449 534
450 if (translation_enabled()) 535 if (translation_enabled())
451 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; 536 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
@@ -810,13 +895,21 @@ restart:
810 u64 mactx = TX_DESC(txring, i); 895 u64 mactx = TX_DESC(txring, i);
811 struct sk_buff *skb; 896 struct sk_buff *skb;
812 897
813 skb = TX_DESC_INFO(txring, i+1).skb;
814 nr_frags = TX_DESC_INFO(txring, i).dma;
815
816 if ((mactx & XCT_MACTX_E) || 898 if ((mactx & XCT_MACTX_E) ||
817 (*chan->status & PAS_STATUS_ERROR)) 899 (*chan->status & PAS_STATUS_ERROR))
818 pasemi_mac_tx_error(mac, mactx); 900 pasemi_mac_tx_error(mac, mactx);
819 901
902 /* Skip over control descriptors */
903 if (!(mactx & XCT_MACTX_LLEN_M)) {
904 TX_DESC(txring, i) = 0;
905 TX_DESC(txring, i+1) = 0;
906 buf_count = 2;
907 continue;
908 }
909
910 skb = TX_DESC_INFO(txring, i+1).skb;
911 nr_frags = TX_DESC_INFO(txring, i).dma;
912
820 if (unlikely(mactx & XCT_MACTX_O)) 913 if (unlikely(mactx & XCT_MACTX_O))
821 /* Not yet transmitted */ 914 /* Not yet transmitted */
822 break; 915 break;
@@ -1041,13 +1134,7 @@ static int pasemi_mac_open(struct net_device *dev)
1041{ 1134{
1042 struct pasemi_mac *mac = netdev_priv(dev); 1135 struct pasemi_mac *mac = netdev_priv(dev);
1043 unsigned int flags; 1136 unsigned int flags;
1044 int ret; 1137 int i, ret;
1045
1046 /* enable rx section */
1047 write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
1048
1049 /* enable tx section */
1050 write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
1051 1138
1052 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | 1139 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
1053 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | 1140 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
@@ -1064,6 +1151,19 @@ static int pasemi_mac_open(struct net_device *dev)
1064 if (!mac->tx) 1151 if (!mac->tx)
1065 goto out_tx_ring; 1152 goto out_tx_ring;
1066 1153
1154 /* We might already have allocated rings in case mtu was changed
1155 * before interface was brought up.
1156 */
1157 if (dev->mtu > 1500 && !mac->num_cs) {
1158 pasemi_mac_setup_csrings(mac);
1159 if (!mac->num_cs)
1160 goto out_tx_ring;
1161 }
1162
1163 /* Zero out rmon counters */
1164 for (i = 0; i < 32; i++)
1165 write_mac_reg(mac, PAS_MAC_RMON(i), 0);
1166
1067 /* 0x3ff with 33MHz clock is about 31us */ 1167 /* 0x3ff with 33MHz clock is about 31us */
1068 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, 1168 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
1069 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); 1169 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
@@ -1247,7 +1347,7 @@ static int pasemi_mac_close(struct net_device *dev)
1247{ 1347{
1248 struct pasemi_mac *mac = netdev_priv(dev); 1348 struct pasemi_mac *mac = netdev_priv(dev);
1249 unsigned int sta; 1349 unsigned int sta;
1250 int rxch, txch; 1350 int rxch, txch, i;
1251 1351
1252 rxch = rx_ring(mac)->chan.chno; 1352 rxch = rx_ring(mac)->chan.chno;
1253 txch = tx_ring(mac)->chan.chno; 1353 txch = tx_ring(mac)->chan.chno;
@@ -1292,6 +1392,13 @@ static int pasemi_mac_close(struct net_device *dev)
1292 free_irq(mac->tx->chan.irq, mac->tx); 1392 free_irq(mac->tx->chan.irq, mac->tx);
1293 free_irq(mac->rx->chan.irq, mac->rx); 1393 free_irq(mac->rx->chan.irq, mac->rx);
1294 1394
1395 for (i = 0; i < mac->num_cs; i++) {
1396 pasemi_mac_free_csring(mac->cs[i]);
1397 mac->cs[i] = NULL;
1398 }
1399
1400 mac->num_cs = 0;
1401
1295 /* Free resources */ 1402 /* Free resources */
1296 pasemi_mac_free_rx_resources(mac); 1403 pasemi_mac_free_rx_resources(mac);
1297 pasemi_mac_free_tx_resources(mac); 1404 pasemi_mac_free_tx_resources(mac);
@@ -1299,35 +1406,113 @@ static int pasemi_mac_close(struct net_device *dev)
1299 return 0; 1406 return 0;
1300} 1407}
1301 1408
1409static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
1410 const dma_addr_t *map,
1411 const unsigned int *map_size,
1412 struct pasemi_mac_txring *txring,
1413 struct pasemi_mac_csring *csring)
1414{
1415 u64 fund;
1416 dma_addr_t cs_dest;
1417 const int nh_off = skb_network_offset(skb);
1418 const int nh_len = skb_network_header_len(skb);
1419 const int nfrags = skb_shinfo(skb)->nr_frags;
1420 int cs_size, i, fill, hdr, cpyhdr, evt;
1421 dma_addr_t csdma;
1422
1423 fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
1424 XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1425 XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
1426 XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE;
1427
1428 switch (ip_hdr(skb)->protocol) {
1429 case IPPROTO_TCP:
1430 fund |= XCT_FUN_SIG_TCP4;
1431 /* TCP checksum is 16 bytes into the header */
1432 cs_dest = map[0] + skb_transport_offset(skb) + 16;
1433 break;
1434 case IPPROTO_UDP:
1435 fund |= XCT_FUN_SIG_UDP4;
1436 /* UDP checksum is 6 bytes into the header */
1437 cs_dest = map[0] + skb_transport_offset(skb) + 6;
1438 break;
1439 default:
1440 BUG();
1441 }
1442
1443 /* Do the checksum offloaded */
1444 fill = csring->next_to_fill;
1445 hdr = fill;
1446
1447 CS_DESC(csring, fill++) = fund;
1448 /* Room for 8BRES. Checksum result is really 2 bytes into it */
1449 csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2;
1450 CS_DESC(csring, fill++) = 0;
1451
1452 CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
1453 for (i = 1; i <= nfrags; i++)
1454 CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1455
1456 fill += i;
1457 if (fill & 1)
1458 fill++;
1459
1460 /* Copy the result into the TCP packet */
1461 cpyhdr = fill;
1462 CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1463 XCT_FUN_LLEN(2) | XCT_FUN_SE;
1464 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
1465 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma);
1466 fill++;
1467
1468 evt = !csring->last_event;
1469 csring->last_event = evt;
1470
1471 /* Event handshaking with MAC TX */
1472 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1473 CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]);
1474 CS_DESC(csring, fill++) = 0;
1475 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1476 CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]);
1477 CS_DESC(csring, fill++) = 0;
1478 csring->next_to_fill = fill & (CS_RING_SIZE-1);
1479
1480 cs_size = fill - hdr;
1481 write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1);
1482
1483 /* TX-side event handshaking */
1484 fill = txring->next_to_fill;
1485 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1486 CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]);
1487 TX_DESC(txring, fill++) = 0;
1488 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1489 CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]);
1490 TX_DESC(txring, fill++) = 0;
1491 txring->next_to_fill = fill;
1492
1493 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
1494
1495 return;
1496}
1497
1302static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) 1498static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1303{ 1499{
1304 struct pasemi_mac *mac = netdev_priv(dev); 1500 struct pasemi_mac * const mac = netdev_priv(dev);
1305 struct pasemi_mac_txring *txring; 1501 struct pasemi_mac_txring * const txring = tx_ring(mac);
1306 u64 dflags, mactx; 1502 struct pasemi_mac_csring *csring;
1503 u64 dflags = 0;
1504 u64 mactx;
1307 dma_addr_t map[MAX_SKB_FRAGS+1]; 1505 dma_addr_t map[MAX_SKB_FRAGS+1];
1308 unsigned int map_size[MAX_SKB_FRAGS+1]; 1506 unsigned int map_size[MAX_SKB_FRAGS+1];
1309 unsigned long flags; 1507 unsigned long flags;
1310 int i, nfrags; 1508 int i, nfrags;
1311 int fill; 1509 int fill;
1510 const int nh_off = skb_network_offset(skb);
1511 const int nh_len = skb_network_header_len(skb);
1312 1512
1313 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; 1513 prefetch(&txring->ring_info);
1314
1315 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1316 const unsigned char *nh = skb_network_header(skb);
1317 1514
1318 switch (ip_hdr(skb)->protocol) { 1515 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1319 case IPPROTO_TCP:
1320 dflags |= XCT_MACTX_CSUM_TCP;
1321 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1322 dflags |= XCT_MACTX_IPO(nh - skb->data);
1323 break;
1324 case IPPROTO_UDP:
1325 dflags |= XCT_MACTX_CSUM_UDP;
1326 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1327 dflags |= XCT_MACTX_IPO(nh - skb->data);
1328 break;
1329 }
1330 }
1331 1516
1332 nfrags = skb_shinfo(skb)->nr_frags; 1517 nfrags = skb_shinfo(skb)->nr_frags;
1333 1518
@@ -1350,24 +1535,46 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1350 } 1535 }
1351 } 1536 }
1352 1537
1353 mactx = dflags | XCT_MACTX_LLEN(skb->len); 1538 if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
1539 switch (ip_hdr(skb)->protocol) {
1540 case IPPROTO_TCP:
1541 dflags |= XCT_MACTX_CSUM_TCP;
1542 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1543 dflags |= XCT_MACTX_IPO(nh_off);
1544 break;
1545 case IPPROTO_UDP:
1546 dflags |= XCT_MACTX_CSUM_UDP;
1547 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1548 dflags |= XCT_MACTX_IPO(nh_off);
1549 break;
1550 default:
1551 WARN_ON(1);
1552 }
1553 }
1354 1554
1355 txring = tx_ring(mac); 1555 mactx = dflags | XCT_MACTX_LLEN(skb->len);
1356 1556
1357 spin_lock_irqsave(&txring->lock, flags); 1557 spin_lock_irqsave(&txring->lock, flags);
1358 1558
1359 fill = txring->next_to_fill;
1360
1361 /* Avoid stepping on the same cache line that the DMA controller 1559 /* Avoid stepping on the same cache line that the DMA controller
1362 * is currently about to send, so leave at least 8 words available. 1560 * is currently about to send, so leave at least 8 words available.
1363 * Total free space needed is mactx + fragments + 8 1561 * Total free space needed is mactx + fragments + 8
1364 */ 1562 */
1365 if (RING_AVAIL(txring) < nfrags + 10) { 1563 if (RING_AVAIL(txring) < nfrags + 14) {
1366 /* no room -- stop the queue and wait for tx intr */ 1564 /* no room -- stop the queue and wait for tx intr */
1367 netif_stop_queue(dev); 1565 netif_stop_queue(dev);
1368 goto out_err; 1566 goto out_err;
1369 } 1567 }
1370 1568
1569 /* Queue up checksum + event descriptors, if needed */
1570 if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
1571 csring = mac->cs[mac->last_cs];
1572 mac->last_cs = (mac->last_cs + 1) % mac->num_cs;
1573
1574 pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
1575 }
1576
1577 fill = txring->next_to_fill;
1371 TX_DESC(txring, fill) = mactx; 1578 TX_DESC(txring, fill) = mactx;
1372 TX_DESC_INFO(txring, fill).dma = nfrags; 1579 TX_DESC_INFO(txring, fill).dma = nfrags;
1373 fill++; 1580 fill++;
@@ -1441,12 +1648,33 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1441 return pkts; 1648 return pkts;
1442} 1649}
1443 1650
1651#ifdef CONFIG_NET_POLL_CONTROLLER
1652/*
1653 * Polling 'interrupt' - used by things like netconsole to send skbs
1654 * without having to re-enable interrupts. It's not called while
1655 * the interrupt routine is executing.
1656 */
1657static void pasemi_mac_netpoll(struct net_device *dev)
1658{
1659 const struct pasemi_mac *mac = netdev_priv(dev);
1660
1661 disable_irq(mac->tx->chan.irq);
1662 pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx);
1663 enable_irq(mac->tx->chan.irq);
1664
1665 disable_irq(mac->rx->chan.irq);
1666 pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx);
1667 enable_irq(mac->rx->chan.irq);
1668}
1669#endif
1670
1444static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) 1671static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1445{ 1672{
1446 struct pasemi_mac *mac = netdev_priv(dev); 1673 struct pasemi_mac *mac = netdev_priv(dev);
1447 unsigned int reg; 1674 unsigned int reg;
1448 unsigned int rcmdsta; 1675 unsigned int rcmdsta = 0;
1449 int running; 1676 int running;
1677 int ret = 0;
1450 1678
1451 if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) 1679 if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
1452 return -EINVAL; 1680 return -EINVAL;
@@ -1468,6 +1696,16 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1468 pasemi_mac_pause_rxint(mac); 1696 pasemi_mac_pause_rxint(mac);
1469 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); 1697 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1470 pasemi_mac_free_rx_buffers(mac); 1698 pasemi_mac_free_rx_buffers(mac);
1699
1700 }
1701
1702 /* Setup checksum channels if large MTU and none already allocated */
1703 if (new_mtu > 1500 && !mac->num_cs) {
1704 pasemi_mac_setup_csrings(mac);
1705 if (!mac->num_cs) {
1706 ret = -ENOMEM;
1707 goto out;
1708 }
1471 } 1709 }
1472 1710
1473 /* Change maxf, i.e. what size frames are accepted. 1711 /* Change maxf, i.e. what size frames are accepted.
@@ -1482,6 +1720,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1482 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1720 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1483 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1721 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1484 1722
1723out:
1485 if (running) { 1724 if (running) {
1486 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1725 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1487 rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); 1726 rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
@@ -1494,7 +1733,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1494 pasemi_mac_intf_enable(mac); 1733 pasemi_mac_intf_enable(mac);
1495 } 1734 }
1496 1735
1497 return 0; 1736 return ret;
1498} 1737}
1499 1738
1500static int __devinit 1739static int __devinit
@@ -1528,7 +1767,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1528 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); 1767 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1529 1768
1530 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | 1769 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
1531 NETIF_F_HIGHDMA; 1770 NETIF_F_HIGHDMA | NETIF_F_GSO;
1532 1771
1533 mac->lro_mgr.max_aggr = LRO_MAX_AGGR; 1772 mac->lro_mgr.max_aggr = LRO_MAX_AGGR;
1534 mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; 1773 mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
@@ -1588,8 +1827,12 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1588 dev->mtu = PE_DEF_MTU; 1827 dev->mtu = PE_DEF_MTU;
1589 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1828 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1590 mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1829 mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1830#ifdef CONFIG_NET_POLL_CONTROLLER
1831 dev->poll_controller = pasemi_mac_netpoll;
1832#endif
1591 1833
1592 dev->change_mtu = pasemi_mac_change_mtu; 1834 dev->change_mtu = pasemi_mac_change_mtu;
1835 dev->ethtool_ops = &pasemi_mac_ethtool_ops;
1593 1836
1594 if (err) 1837 if (err)
1595 goto out; 1838 goto out;