aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c515.c10
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c8
-rw-r--r--drivers/net/ethernet/apple/bmac.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c20
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c21
-rw-r--r--drivers/net/ethernet/s6gmac.c9
6 files changed, 42 insertions, 37 deletions
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index f67a5d3a200c..59e1e001bc3f 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -826,11 +826,10 @@ static int corkscrew_open(struct net_device *dev)
826 vp->rx_ring[i].next = 0; 826 vp->rx_ring[i].next = 0;
827 vp->rx_ring[i].status = 0; /* Clear complete bit. */ 827 vp->rx_ring[i].status = 0; /* Clear complete bit. */
828 vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000; 828 vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000;
829 skb = dev_alloc_skb(PKT_BUF_SZ); 829 skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
830 vp->rx_skbuff[i] = skb; 830 vp->rx_skbuff[i] = skb;
831 if (skb == NULL) 831 if (skb == NULL)
832 break; /* Bad news! */ 832 break; /* Bad news! */
833 skb->dev = dev; /* Mark as being used by this device. */
834 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 833 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
835 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); 834 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
836 } 835 }
@@ -1295,7 +1294,7 @@ static int corkscrew_rx(struct net_device *dev)
1295 short pkt_len = rx_status & 0x1fff; 1294 short pkt_len = rx_status & 0x1fff;
1296 struct sk_buff *skb; 1295 struct sk_buff *skb;
1297 1296
1298 skb = dev_alloc_skb(pkt_len + 5 + 2); 1297 skb = netdev_alloc_skb(dev, pkt_len + 5 + 2);
1299 if (corkscrew_debug > 4) 1298 if (corkscrew_debug > 4)
1300 pr_debug("Receiving packet size %d status %4.4x.\n", 1299 pr_debug("Receiving packet size %d status %4.4x.\n",
1301 pkt_len, rx_status); 1300 pkt_len, rx_status);
@@ -1368,7 +1367,7 @@ static int boomerang_rx(struct net_device *dev)
1368 /* Check if the packet is long enough to just accept without 1367 /* Check if the packet is long enough to just accept without
1369 copying to a properly sized skbuff. */ 1368 copying to a properly sized skbuff. */
1370 if (pkt_len < rx_copybreak && 1369 if (pkt_len < rx_copybreak &&
1371 (skb = dev_alloc_skb(pkt_len + 4)) != NULL) { 1370 (skb = netdev_alloc_skb(dev, pkt_len + 4)) != NULL) {
1372 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1371 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1373 /* 'skb_put()' points to the start of sk_buff data area. */ 1372 /* 'skb_put()' points to the start of sk_buff data area. */
1374 memcpy(skb_put(skb, pkt_len), 1373 memcpy(skb_put(skb, pkt_len),
@@ -1403,10 +1402,9 @@ static int boomerang_rx(struct net_device *dev)
1403 struct sk_buff *skb; 1402 struct sk_buff *skb;
1404 entry = vp->dirty_rx % RX_RING_SIZE; 1403 entry = vp->dirty_rx % RX_RING_SIZE;
1405 if (vp->rx_skbuff[entry] == NULL) { 1404 if (vp->rx_skbuff[entry] == NULL) {
1406 skb = dev_alloc_skb(PKT_BUF_SZ); 1405 skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
1407 if (skb == NULL) 1406 if (skb == NULL)
1408 break; /* Bad news! */ 1407 break; /* Bad news! */
1409 skb->dev = dev; /* Mark as being used by this device. */
1410 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1408 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1411 vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data); 1409 vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
1412 vp->rx_skbuff[entry] = skb; 1410 vp->rx_skbuff[entry] = skb;
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 525a9768bb54..49733696703a 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -113,7 +113,7 @@ static void desc_list_free(void)
113 } 113 }
114} 114}
115 115
116static int desc_list_init(void) 116static int desc_list_init(struct net_device *dev)
117{ 117{
118 int i; 118 int i;
119 struct sk_buff *new_skb; 119 struct sk_buff *new_skb;
@@ -187,7 +187,7 @@ static int desc_list_init(void)
187 struct dma_descriptor *b = &(r->desc_b); 187 struct dma_descriptor *b = &(r->desc_b);
188 188
189 /* allocate a new skb for next time receive */ 189 /* allocate a new skb for next time receive */
190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) { 191 if (!new_skb) {
192 pr_notice("init: low on mem - packet dropped\n"); 192 pr_notice("init: low on mem - packet dropped\n");
193 goto init_error; 193 goto init_error;
@@ -1090,7 +1090,7 @@ static void bfin_mac_rx(struct net_device *dev)
1090 /* allocate a new skb for next time receive */ 1090 /* allocate a new skb for next time receive */
1091 skb = current_rx_ptr->skb; 1091 skb = current_rx_ptr->skb;
1092 1092
1093 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 1093 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
1094 if (!new_skb) { 1094 if (!new_skb) {
1095 netdev_notice(dev, "rx: low on mem - packet dropped\n"); 1095 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1096 dev->stats.rx_dropped++; 1096 dev->stats.rx_dropped++;
@@ -1397,7 +1397,7 @@ static int bfin_mac_open(struct net_device *dev)
1397 } 1397 }
1398 1398
1399 /* initial rx and tx list */ 1399 /* initial rx and tx list */
1400 ret = desc_list_init(); 1400 ret = desc_list_init(dev);
1401 if (ret) 1401 if (ret)
1402 return ret; 1402 return ret;
1403 1403
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index ebc0dba5ba33..855bdafb1a87 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -607,8 +607,9 @@ bmac_init_tx_ring(struct bmac_data *bp)
607} 607}
608 608
609static int 609static int
610bmac_init_rx_ring(struct bmac_data *bp) 610bmac_init_rx_ring(struct net_device *dev)
611{ 611{
612 struct bmac_data *bp = netdev_priv(dev);
612 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 613 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
613 int i; 614 int i;
614 struct sk_buff *skb; 615 struct sk_buff *skb;
@@ -618,7 +619,7 @@ bmac_init_rx_ring(struct bmac_data *bp)
618 (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); 619 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
619 for (i = 0; i < N_RX_RING; i++) { 620 for (i = 0; i < N_RX_RING; i++) {
620 if ((skb = bp->rx_bufs[i]) == NULL) { 621 if ((skb = bp->rx_bufs[i]) == NULL) {
621 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 622 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
622 if (skb != NULL) 623 if (skb != NULL)
623 skb_reserve(skb, 2); 624 skb_reserve(skb, 2);
624 } 625 }
@@ -722,7 +723,7 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
722 ++dev->stats.rx_dropped; 723 ++dev->stats.rx_dropped;
723 } 724 }
724 if ((skb = bp->rx_bufs[i]) == NULL) { 725 if ((skb = bp->rx_bufs[i]) == NULL) {
725 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 726 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
726 if (skb != NULL) 727 if (skb != NULL)
727 skb_reserve(bp->rx_bufs[i], 2); 728 skb_reserve(bp->rx_bufs[i], 2);
728 } 729 }
@@ -1208,7 +1209,7 @@ static void bmac_reset_and_enable(struct net_device *dev)
1208 spin_lock_irqsave(&bp->lock, flags); 1209 spin_lock_irqsave(&bp->lock, flags);
1209 bmac_enable_and_reset_chip(dev); 1210 bmac_enable_and_reset_chip(dev);
1210 bmac_init_tx_ring(bp); 1211 bmac_init_tx_ring(bp);
1211 bmac_init_rx_ring(bp); 1212 bmac_init_rx_ring(dev);
1212 bmac_init_chip(dev); 1213 bmac_init_chip(dev);
1213 bmac_start_chip(dev); 1214 bmac_start_chip(dev);
1214 bmwrite(dev, INTDISABLE, EnableNormal); 1215 bmwrite(dev, INTDISABLE, EnableNormal);
@@ -1218,7 +1219,7 @@ static void bmac_reset_and_enable(struct net_device *dev)
1218 * It seems that the bmac can't receive until it's transmitted 1219 * It seems that the bmac can't receive until it's transmitted
1219 * a packet. So we give it a dummy packet to transmit. 1220 * a packet. So we give it a dummy packet to transmit.
1220 */ 1221 */
1221 skb = dev_alloc_skb(ETHERMINPACKET); 1222 skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1222 if (skb != NULL) { 1223 if (skb != NULL) {
1223 data = skb_put(skb, ETHERMINPACKET); 1224 data = skb_put(skb, ETHERMINPACKET);
1224 memset(data, 0, ETHERMINPACKET); 1225 memset(data, 0, ETHERMINPACKET);
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 51f7542eb451..1eccf4945485 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -325,8 +325,8 @@ static irqreturn_t dmfe_interrupt(int , void *);
325#ifdef CONFIG_NET_POLL_CONTROLLER 325#ifdef CONFIG_NET_POLL_CONTROLLER
326static void poll_dmfe (struct net_device *dev); 326static void poll_dmfe (struct net_device *dev);
327#endif 327#endif
328static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long); 328static void dmfe_descriptor_init(struct net_device *, unsigned long);
329static void allocate_rx_buffer(struct dmfe_board_info *); 329static void allocate_rx_buffer(struct net_device *);
330static void update_cr6(u32, unsigned long); 330static void update_cr6(u32, unsigned long);
331static void send_filter_frame(struct DEVICE *); 331static void send_filter_frame(struct DEVICE *);
332static void dm9132_id_table(struct DEVICE *); 332static void dm9132_id_table(struct DEVICE *);
@@ -649,7 +649,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
649 db->op_mode = db->media_mode; /* Force Mode */ 649 db->op_mode = db->media_mode; /* Force Mode */
650 650
651 /* Initialize Transmit/Receive decriptor and CR3/4 */ 651 /* Initialize Transmit/Receive decriptor and CR3/4 */
652 dmfe_descriptor_init(db, ioaddr); 652 dmfe_descriptor_init(dev, ioaddr);
653 653
654 /* Init CR6 to program DM910x operation */ 654 /* Init CR6 to program DM910x operation */
655 update_cr6(db->cr6_data, ioaddr); 655 update_cr6(db->cr6_data, ioaddr);
@@ -828,7 +828,7 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
828 828
829 /* reallocate rx descriptor buffer */ 829 /* reallocate rx descriptor buffer */
830 if (db->rx_avail_cnt<RX_DESC_CNT) 830 if (db->rx_avail_cnt<RX_DESC_CNT)
831 allocate_rx_buffer(db); 831 allocate_rx_buffer(dev);
832 832
833 /* Free the transmitted descriptor */ 833 /* Free the transmitted descriptor */
834 if ( db->cr5_data & 0x01) 834 if ( db->cr5_data & 0x01)
@@ -1008,7 +1008,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
1008 /* Good packet, send to upper layer */ 1008 /* Good packet, send to upper layer */
1009 /* Shorst packet used new SKB */ 1009 /* Shorst packet used new SKB */
1010 if ((rxlen < RX_COPY_SIZE) && 1010 if ((rxlen < RX_COPY_SIZE) &&
1011 ((newskb = dev_alloc_skb(rxlen + 2)) 1011 ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1012 != NULL)) { 1012 != NULL)) {
1013 1013
1014 skb = newskb; 1014 skb = newskb;
@@ -1364,8 +1364,9 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1364 * Using Chain structure, and allocate Tx/Rx buffer 1364 * Using Chain structure, and allocate Tx/Rx buffer
1365 */ 1365 */
1366 1366
1367static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr) 1367static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1368{ 1368{
1369 struct dmfe_board_info *db = netdev_priv(dev);
1369 struct tx_desc *tmp_tx; 1370 struct tx_desc *tmp_tx;
1370 struct rx_desc *tmp_rx; 1371 struct rx_desc *tmp_rx;
1371 unsigned char *tmp_buf; 1372 unsigned char *tmp_buf;
@@ -1421,7 +1422,7 @@ static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioadd
1421 tmp_rx->next_rx_desc = db->first_rx_desc; 1422 tmp_rx->next_rx_desc = db->first_rx_desc;
1422 1423
1423 /* pre-allocate Rx buffer */ 1424 /* pre-allocate Rx buffer */
1424 allocate_rx_buffer(db); 1425 allocate_rx_buffer(dev);
1425} 1426}
1426 1427
1427 1428
@@ -1551,15 +1552,16 @@ static void send_filter_frame(struct DEVICE *dev)
1551 * As possible as allocate maxiumn Rx buffer 1552 * As possible as allocate maxiumn Rx buffer
1552 */ 1553 */
1553 1554
1554static void allocate_rx_buffer(struct dmfe_board_info *db) 1555static void allocate_rx_buffer(struct net_device *dev)
1555{ 1556{
1557 struct dmfe_board_info *db = netdev_priv(dev);
1556 struct rx_desc *rxptr; 1558 struct rx_desc *rxptr;
1557 struct sk_buff *skb; 1559 struct sk_buff *skb;
1558 1560
1559 rxptr = db->rx_insert_ptr; 1561 rxptr = db->rx_insert_ptr;
1560 1562
1561 while(db->rx_avail_cnt < RX_DESC_CNT) { 1563 while(db->rx_avail_cnt < RX_DESC_CNT) {
1562 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1564 if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1563 break; 1565 break;
1564 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1566 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1565 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, 1567 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 48b0b6566eef..fc4001f6a5e4 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -232,8 +232,8 @@ static irqreturn_t uli526x_interrupt(int, void *);
232#ifdef CONFIG_NET_POLL_CONTROLLER 232#ifdef CONFIG_NET_POLL_CONTROLLER
233static void uli526x_poll(struct net_device *dev); 233static void uli526x_poll(struct net_device *dev);
234#endif 234#endif
235static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); 235static void uli526x_descriptor_init(struct net_device *, unsigned long);
236static void allocate_rx_buffer(struct uli526x_board_info *); 236static void allocate_rx_buffer(struct net_device *);
237static void update_cr6(u32, unsigned long); 237static void update_cr6(u32, unsigned long);
238static void send_filter_frame(struct net_device *, int); 238static void send_filter_frame(struct net_device *, int);
239static u16 phy_read(unsigned long, u8, u8, u32); 239static u16 phy_read(unsigned long, u8, u8, u32);
@@ -549,7 +549,7 @@ static void uli526x_init(struct net_device *dev)
549 db->op_mode = db->media_mode; /* Force Mode */ 549 db->op_mode = db->media_mode; /* Force Mode */
550 550
551 /* Initialize Transmit/Receive decriptor and CR3/4 */ 551 /* Initialize Transmit/Receive decriptor and CR3/4 */
552 uli526x_descriptor_init(db, ioaddr); 552 uli526x_descriptor_init(dev, ioaddr);
553 553
554 /* Init CR6 to program M526X operation */ 554 /* Init CR6 to program M526X operation */
555 update_cr6(db->cr6_data, ioaddr); 555 update_cr6(db->cr6_data, ioaddr);
@@ -711,7 +711,7 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
711 711
712 /* reallocate rx descriptor buffer */ 712 /* reallocate rx descriptor buffer */
713 if (db->rx_avail_cnt<RX_DESC_CNT) 713 if (db->rx_avail_cnt<RX_DESC_CNT)
714 allocate_rx_buffer(db); 714 allocate_rx_buffer(dev);
715 715
716 /* Free the transmitted descriptor */ 716 /* Free the transmitted descriptor */
717 if ( db->cr5_data & 0x01) 717 if ( db->cr5_data & 0x01)
@@ -844,7 +844,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
844 /* Good packet, send to upper layer */ 844 /* Good packet, send to upper layer */
845 /* Shorst packet used new SKB */ 845 /* Shorst packet used new SKB */
846 if ((rxlen < RX_COPY_SIZE) && 846 if ((rxlen < RX_COPY_SIZE) &&
847 (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) { 847 (((new_skb = netdev_alloc_skb(dev, rxlen + 2)) != NULL))) {
848 skb = new_skb; 848 skb = new_skb;
849 /* size less than COPY_SIZE, allocate a rxlen SKB */ 849 /* size less than COPY_SIZE, allocate a rxlen SKB */
850 skb_reserve(skb, 2); /* 16byte align */ 850 skb_reserve(skb, 2); /* 16byte align */
@@ -1289,8 +1289,9 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
1289 * Using Chain structure, and allocate Tx/Rx buffer 1289 * Using Chain structure, and allocate Tx/Rx buffer
1290 */ 1290 */
1291 1291
1292static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr) 1292static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1293{ 1293{
1294 struct uli526x_board_info *db = netdev_priv(dev);
1294 struct tx_desc *tmp_tx; 1295 struct tx_desc *tmp_tx;
1295 struct rx_desc *tmp_rx; 1296 struct rx_desc *tmp_rx;
1296 unsigned char *tmp_buf; 1297 unsigned char *tmp_buf;
@@ -1343,7 +1344,7 @@ static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long
1343 tmp_rx->next_rx_desc = db->first_rx_desc; 1344 tmp_rx->next_rx_desc = db->first_rx_desc;
1344 1345
1345 /* pre-allocate Rx buffer */ 1346 /* pre-allocate Rx buffer */
1346 allocate_rx_buffer(db); 1347 allocate_rx_buffer(dev);
1347} 1348}
1348 1349
1349 1350
@@ -1433,15 +1434,17 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1433 * As possible as allocate maxiumn Rx buffer 1434 * As possible as allocate maxiumn Rx buffer
1434 */ 1435 */
1435 1436
1436static void allocate_rx_buffer(struct uli526x_board_info *db) 1437static void allocate_rx_buffer(struct net_device *dev)
1437{ 1438{
1439 struct uli526x_board_info *db = netdev_priv(dev);
1438 struct rx_desc *rxptr; 1440 struct rx_desc *rxptr;
1439 struct sk_buff *skb; 1441 struct sk_buff *skb;
1440 1442
1441 rxptr = db->rx_insert_ptr; 1443 rxptr = db->rx_insert_ptr;
1442 1444
1443 while(db->rx_avail_cnt < RX_DESC_CNT) { 1445 while(db->rx_avail_cnt < RX_DESC_CNT) {
1444 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1446 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
1447 if (skb == NULL)
1445 break; 1448 break;
1446 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1449 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1447 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, 1450 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index bee97033167d..1895605abb35 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -370,12 +370,13 @@ struct s6gmac {
370 } link; 370 } link;
371}; 371};
372 372
373static void s6gmac_rx_fillfifo(struct s6gmac *pd) 373static void s6gmac_rx_fillfifo(struct net_device *dev)
374{ 374{
375 struct s6gmac *pd = netdev_priv(dev);
375 struct sk_buff *skb; 376 struct sk_buff *skb;
376 while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) && 377 while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
377 (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) && 378 (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
378 (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) { 379 (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
379 pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb; 380 pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
380 s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan, 381 s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
381 pd->io, (u32)skb->data, S6_MAX_FRLEN); 382 pd->io, (u32)skb->data, S6_MAX_FRLEN);
@@ -514,7 +515,7 @@ static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
514 spin_lock(&pd->lock); 515 spin_lock(&pd->lock);
515 if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan)) 516 if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
516 s6gmac_rx_interrupt(dev); 517 s6gmac_rx_interrupt(dev);
517 s6gmac_rx_fillfifo(pd); 518 s6gmac_rx_fillfifo(dev);
518 if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan)) 519 if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
519 s6gmac_tx_interrupt(dev); 520 s6gmac_tx_interrupt(dev);
520 s6gmac_stats_interrupt(pd, 0); 521 s6gmac_stats_interrupt(pd, 0);
@@ -894,7 +895,7 @@ static int s6gmac_open(struct net_device *dev)
894 s6gmac_init_device(dev); 895 s6gmac_init_device(dev);
895 s6gmac_init_stats(dev); 896 s6gmac_init_stats(dev);
896 s6gmac_init_dmac(dev); 897 s6gmac_init_dmac(dev);
897 s6gmac_rx_fillfifo(pd); 898 s6gmac_rx_fillfifo(dev);
898 s6dmac_enable_chan(pd->rx_dma, pd->rx_chan, 899 s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
899 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1); 900 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
900 s6dmac_enable_chan(pd->tx_dma, pd->tx_chan, 901 s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,