aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/pasemi_mac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/pasemi_mac.c')
-rw-r--r--drivers/net/pasemi_mac.c294
1 files changed, 261 insertions, 33 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 6ea822addde5..54904ad29ea7 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -59,11 +59,12 @@
59/* Must be a power of two */ 59/* Must be a power of two */
60#define RX_RING_SIZE 2048 60#define RX_RING_SIZE 2048
61#define TX_RING_SIZE 4096 61#define TX_RING_SIZE 4096
62#define CS_RING_SIZE (TX_RING_SIZE*2)
62 63
63#define LRO_MAX_AGGR 64 64#define LRO_MAX_AGGR 64
64 65
65#define PE_MIN_MTU 64 66#define PE_MIN_MTU 64
66#define PE_MAX_MTU 1500 67#define PE_MAX_MTU 9000
67#define PE_DEF_MTU ETH_DATA_LEN 68#define PE_DEF_MTU ETH_DATA_LEN
68 69
69#define DEFAULT_MSG_ENABLE \ 70#define DEFAULT_MSG_ENABLE \
@@ -81,6 +82,7 @@
81#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) 82#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
82#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) 83#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
83#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) 84#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
85#define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)])
84 86
85#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ 87#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
86 & ((ring)->size - 1)) 88 & ((ring)->size - 1))
@@ -322,6 +324,103 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
322 return (nfrags + 3) & ~1; 324 return (nfrags + 3) & ~1;
323} 325}
324 326
327static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac)
328{
329 struct pasemi_mac_csring *ring;
330 u32 val;
331 unsigned int cfg;
332 int chno;
333
334 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
335 offsetof(struct pasemi_mac_csring, chan));
336
337 if (!ring) {
338 dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n");
339 goto out_chan;
340 }
341
342 chno = ring->chan.chno;
343
344 ring->size = CS_RING_SIZE;
345 ring->next_to_fill = 0;
346
347 /* Allocate descriptors */
348 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
349 goto out_ring_desc;
350
351 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
352 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
353 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
354 val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3);
355
356 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
357
358 ring->events[0] = pasemi_dma_alloc_flag();
359 ring->events[1] = pasemi_dma_alloc_flag();
360 if (ring->events[0] < 0 || ring->events[1] < 0)
361 goto out_flags;
362
363 pasemi_dma_clear_flag(ring->events[0]);
364 pasemi_dma_clear_flag(ring->events[1]);
365
366 ring->fun = pasemi_dma_alloc_fun();
367 if (ring->fun < 0)
368 goto out_fun;
369
370 cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP |
371 PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
372 PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ;
373
374 if (translation_enabled())
375 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
376
377 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
378
379 /* enable channel */
380 pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
381 PAS_DMA_TXCHAN_TCMDSTA_DB |
382 PAS_DMA_TXCHAN_TCMDSTA_DE |
383 PAS_DMA_TXCHAN_TCMDSTA_DA);
384
385 return ring;
386
387out_fun:
388out_flags:
389 if (ring->events[0] >= 0)
390 pasemi_dma_free_flag(ring->events[0]);
391 if (ring->events[1] >= 0)
392 pasemi_dma_free_flag(ring->events[1]);
393 pasemi_dma_free_ring(&ring->chan);
394out_ring_desc:
395 pasemi_dma_free_chan(&ring->chan);
396out_chan:
397
398 return NULL;
399}
400
401static void pasemi_mac_setup_csrings(struct pasemi_mac *mac)
402{
403 int i;
404 mac->cs[0] = pasemi_mac_setup_csring(mac);
405 if (mac->type == MAC_TYPE_XAUI)
406 mac->cs[1] = pasemi_mac_setup_csring(mac);
407 else
408 mac->cs[1] = 0;
409
410 for (i = 0; i < MAX_CS; i++)
411 if (mac->cs[i])
412 mac->num_cs++;
413}
414
415static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring)
416{
417 pasemi_dma_stop_chan(&csring->chan);
418 pasemi_dma_free_flag(csring->events[0]);
419 pasemi_dma_free_flag(csring->events[1]);
420 pasemi_dma_free_ring(&csring->chan);
421 pasemi_dma_free_chan(&csring->chan);
422}
423
325static int pasemi_mac_setup_rx_resources(const struct net_device *dev) 424static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
326{ 425{
327 struct pasemi_mac_rxring *ring; 426 struct pasemi_mac_rxring *ring;
@@ -445,7 +544,7 @@ pasemi_mac_setup_tx_resources(const struct net_device *dev)
445 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | 544 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
446 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | 545 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
447 PAS_DMA_TXCHAN_CFG_UP | 546 PAS_DMA_TXCHAN_CFG_UP |
448 PAS_DMA_TXCHAN_CFG_WT(2); 547 PAS_DMA_TXCHAN_CFG_WT(4);
449 548
450 if (translation_enabled()) 549 if (translation_enabled())
451 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; 550 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
@@ -810,13 +909,21 @@ restart:
810 u64 mactx = TX_DESC(txring, i); 909 u64 mactx = TX_DESC(txring, i);
811 struct sk_buff *skb; 910 struct sk_buff *skb;
812 911
813 skb = TX_DESC_INFO(txring, i+1).skb;
814 nr_frags = TX_DESC_INFO(txring, i).dma;
815
816 if ((mactx & XCT_MACTX_E) || 912 if ((mactx & XCT_MACTX_E) ||
817 (*chan->status & PAS_STATUS_ERROR)) 913 (*chan->status & PAS_STATUS_ERROR))
818 pasemi_mac_tx_error(mac, mactx); 914 pasemi_mac_tx_error(mac, mactx);
819 915
916 /* Skip over control descriptors */
917 if (!(mactx & XCT_MACTX_LLEN_M)) {
918 TX_DESC(txring, i) = 0;
919 TX_DESC(txring, i+1) = 0;
920 buf_count = 2;
921 continue;
922 }
923
924 skb = TX_DESC_INFO(txring, i+1).skb;
925 nr_frags = TX_DESC_INFO(txring, i).dma;
926
820 if (unlikely(mactx & XCT_MACTX_O)) 927 if (unlikely(mactx & XCT_MACTX_O))
821 /* Not yet transmitted */ 928 /* Not yet transmitted */
822 break; 929 break;
@@ -1058,6 +1165,12 @@ static int pasemi_mac_open(struct net_device *dev)
1058 if (!mac->tx) 1165 if (!mac->tx)
1059 goto out_tx_ring; 1166 goto out_tx_ring;
1060 1167
1168 if (dev->mtu > 1500) {
1169 pasemi_mac_setup_csrings(mac);
1170 if (!mac->num_cs)
1171 goto out_tx_ring;
1172 }
1173
1061 /* 0x3ff with 33MHz clock is about 31us */ 1174 /* 0x3ff with 33MHz clock is about 31us */
1062 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, 1175 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
1063 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); 1176 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
@@ -1241,7 +1354,7 @@ static int pasemi_mac_close(struct net_device *dev)
1241{ 1354{
1242 struct pasemi_mac *mac = netdev_priv(dev); 1355 struct pasemi_mac *mac = netdev_priv(dev);
1243 unsigned int sta; 1356 unsigned int sta;
1244 int rxch, txch; 1357 int rxch, txch, i;
1245 1358
1246 rxch = rx_ring(mac)->chan.chno; 1359 rxch = rx_ring(mac)->chan.chno;
1247 txch = tx_ring(mac)->chan.chno; 1360 txch = tx_ring(mac)->chan.chno;
@@ -1286,6 +1399,9 @@ static int pasemi_mac_close(struct net_device *dev)
1286 free_irq(mac->tx->chan.irq, mac->tx); 1399 free_irq(mac->tx->chan.irq, mac->tx);
1287 free_irq(mac->rx->chan.irq, mac->rx); 1400 free_irq(mac->rx->chan.irq, mac->rx);
1288 1401
1402 for (i = 0; i < mac->num_cs; i++)
1403 pasemi_mac_free_csring(mac->cs[i]);
1404
1289 /* Free resources */ 1405 /* Free resources */
1290 pasemi_mac_free_rx_resources(mac); 1406 pasemi_mac_free_rx_resources(mac);
1291 pasemi_mac_free_tx_resources(mac); 1407 pasemi_mac_free_tx_resources(mac);
@@ -1293,35 +1409,113 @@ static int pasemi_mac_close(struct net_device *dev)
1293 return 0; 1409 return 0;
1294} 1410}
1295 1411
1412static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
1413 const dma_addr_t *map,
1414 const unsigned int *map_size,
1415 struct pasemi_mac_txring *txring,
1416 struct pasemi_mac_csring *csring)
1417{
1418 u64 fund;
1419 dma_addr_t cs_dest;
1420 const int nh_off = skb_network_offset(skb);
1421 const int nh_len = skb_network_header_len(skb);
1422 const int nfrags = skb_shinfo(skb)->nr_frags;
1423 int cs_size, i, fill, hdr, cpyhdr, evt;
1424 dma_addr_t csdma;
1425
1426 fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
1427 XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1428 XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
1429 XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE;
1430
1431 switch (ip_hdr(skb)->protocol) {
1432 case IPPROTO_TCP:
1433 fund |= XCT_FUN_SIG_TCP4;
1434 /* TCP checksum is 16 bytes into the header */
1435 cs_dest = map[0] + skb_transport_offset(skb) + 16;
1436 break;
1437 case IPPROTO_UDP:
1438 fund |= XCT_FUN_SIG_UDP4;
1439 /* UDP checksum is 6 bytes into the header */
1440 cs_dest = map[0] + skb_transport_offset(skb) + 6;
1441 break;
1442 default:
1443 BUG();
1444 }
1445
1446 /* Do the checksum offloaded */
1447 fill = csring->next_to_fill;
1448 hdr = fill;
1449
1450 CS_DESC(csring, fill++) = fund;
1451 /* Room for 8BRES. Checksum result is really 2 bytes into it */
1452 csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2;
1453 CS_DESC(csring, fill++) = 0;
1454
1455 CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
1456 for (i = 1; i <= nfrags; i++)
1457 CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1458
1459 fill += i;
1460 if (fill & 1)
1461 fill++;
1462
1463 /* Copy the result into the TCP packet */
1464 cpyhdr = fill;
1465 CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1466 XCT_FUN_LLEN(2) | XCT_FUN_SE;
1467 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
1468 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma);
1469 fill++;
1470
1471 evt = !csring->last_event;
1472 csring->last_event = evt;
1473
1474 /* Event handshaking with MAC TX */
1475 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1476 CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]);
1477 CS_DESC(csring, fill++) = 0;
1478 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1479 CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]);
1480 CS_DESC(csring, fill++) = 0;
1481 csring->next_to_fill = fill & (CS_RING_SIZE-1);
1482
1483 cs_size = fill - hdr;
1484 write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1);
1485
1486 /* TX-side event handshaking */
1487 fill = txring->next_to_fill;
1488 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1489 CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]);
1490 TX_DESC(txring, fill++) = 0;
1491 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1492 CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]);
1493 TX_DESC(txring, fill++) = 0;
1494 txring->next_to_fill = fill;
1495
1496 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
1497
1498 return;
1499}
1500
1296static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) 1501static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1297{ 1502{
1298 struct pasemi_mac *mac = netdev_priv(dev); 1503 struct pasemi_mac * const mac = netdev_priv(dev);
1299 struct pasemi_mac_txring *txring; 1504 struct pasemi_mac_txring * const txring = tx_ring(mac);
1300 u64 dflags, mactx; 1505 struct pasemi_mac_csring *csring;
1506 u64 dflags = 0;
1507 u64 mactx;
1301 dma_addr_t map[MAX_SKB_FRAGS+1]; 1508 dma_addr_t map[MAX_SKB_FRAGS+1];
1302 unsigned int map_size[MAX_SKB_FRAGS+1]; 1509 unsigned int map_size[MAX_SKB_FRAGS+1];
1303 unsigned long flags; 1510 unsigned long flags;
1304 int i, nfrags; 1511 int i, nfrags;
1305 int fill; 1512 int fill;
1513 const int nh_off = skb_network_offset(skb);
1514 const int nh_len = skb_network_header_len(skb);
1306 1515
1307 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; 1516 prefetch(&txring->ring_info);
1308
1309 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1310 const unsigned char *nh = skb_network_header(skb);
1311 1517
1312 switch (ip_hdr(skb)->protocol) { 1518 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1313 case IPPROTO_TCP:
1314 dflags |= XCT_MACTX_CSUM_TCP;
1315 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1316 dflags |= XCT_MACTX_IPO(nh - skb->data);
1317 break;
1318 case IPPROTO_UDP:
1319 dflags |= XCT_MACTX_CSUM_UDP;
1320 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1321 dflags |= XCT_MACTX_IPO(nh - skb->data);
1322 break;
1323 }
1324 }
1325 1519
1326 nfrags = skb_shinfo(skb)->nr_frags; 1520 nfrags = skb_shinfo(skb)->nr_frags;
1327 1521
@@ -1344,24 +1538,46 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1344 } 1538 }
1345 } 1539 }
1346 1540
1347 mactx = dflags | XCT_MACTX_LLEN(skb->len); 1541 if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
1542 switch (ip_hdr(skb)->protocol) {
1543 case IPPROTO_TCP:
1544 dflags |= XCT_MACTX_CSUM_TCP;
1545 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1546 dflags |= XCT_MACTX_IPO(nh_off);
1547 break;
1548 case IPPROTO_UDP:
1549 dflags |= XCT_MACTX_CSUM_UDP;
1550 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1551 dflags |= XCT_MACTX_IPO(nh_off);
1552 break;
1553 default:
1554 WARN_ON(1);
1555 }
1556 }
1348 1557
1349 txring = tx_ring(mac); 1558 mactx = dflags | XCT_MACTX_LLEN(skb->len);
1350 1559
1351 spin_lock_irqsave(&txring->lock, flags); 1560 spin_lock_irqsave(&txring->lock, flags);
1352 1561
1353 fill = txring->next_to_fill;
1354
1355 /* Avoid stepping on the same cache line that the DMA controller 1562 /* Avoid stepping on the same cache line that the DMA controller
1356 * is currently about to send, so leave at least 8 words available. 1563 * is currently about to send, so leave at least 8 words available.
1357 * Total free space needed is mactx + fragments + 8 1564 * Total free space needed is mactx + fragments + 8
1358 */ 1565 */
1359 if (RING_AVAIL(txring) < nfrags + 10) { 1566 if (RING_AVAIL(txring) < nfrags + 14) {
1360 /* no room -- stop the queue and wait for tx intr */ 1567 /* no room -- stop the queue and wait for tx intr */
1361 netif_stop_queue(dev); 1568 netif_stop_queue(dev);
1362 goto out_err; 1569 goto out_err;
1363 } 1570 }
1364 1571
1572 /* Queue up checksum + event descriptors, if needed */
1573 if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
1574 csring = mac->cs[mac->last_cs];
1575 mac->last_cs = (mac->last_cs + 1) % mac->num_cs;
1576
1577 pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
1578 }
1579
1580 fill = txring->next_to_fill;
1365 TX_DESC(txring, fill) = mactx; 1581 TX_DESC(txring, fill) = mactx;
1366 TX_DESC_INFO(txring, fill).dma = nfrags; 1582 TX_DESC_INFO(txring, fill).dma = nfrags;
1367 fill++; 1583 fill++;
@@ -1439,8 +1655,9 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1439{ 1655{
1440 struct pasemi_mac *mac = netdev_priv(dev); 1656 struct pasemi_mac *mac = netdev_priv(dev);
1441 unsigned int reg; 1657 unsigned int reg;
1442 unsigned int rcmdsta; 1658 unsigned int rcmdsta = 0;
1443 int running; 1659 int running;
1660 int ret = 0;
1444 1661
1445 if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) 1662 if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
1446 return -EINVAL; 1663 return -EINVAL;
@@ -1462,6 +1679,16 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1462 pasemi_mac_pause_rxint(mac); 1679 pasemi_mac_pause_rxint(mac);
1463 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); 1680 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1464 pasemi_mac_free_rx_buffers(mac); 1681 pasemi_mac_free_rx_buffers(mac);
1682
1683 }
1684
1685 /* Setup checksum channels if large MTU and none already allocated */
1686 if (new_mtu > 1500 && !mac->num_cs) {
1687 pasemi_mac_setup_csrings(mac);
1688 if (!mac->num_cs) {
1689 ret = -ENOMEM;
1690 goto out;
1691 }
1465 } 1692 }
1466 1693
1467 /* Change maxf, i.e. what size frames are accepted. 1694 /* Change maxf, i.e. what size frames are accepted.
@@ -1476,6 +1703,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1476 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1703 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1477 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1704 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1478 1705
1706out:
1479 if (running) { 1707 if (running) {
1480 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1708 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1481 rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); 1709 rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
@@ -1488,7 +1716,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1488 pasemi_mac_intf_enable(mac); 1716 pasemi_mac_intf_enable(mac);
1489 } 1717 }
1490 1718
1491 return 0; 1719 return ret;
1492} 1720}
1493 1721
1494static int __devinit 1722static int __devinit