diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/b44.c | 7 | ||||
-rw-r--r-- | drivers/net/bonding/bond_alb.c | 4 | ||||
-rw-r--r-- | drivers/net/forcedeth.c | 31 | ||||
-rw-r--r-- | drivers/net/ibmveth.c | 58 | ||||
-rw-r--r-- | drivers/net/mv643xx_eth.c | 4 | ||||
-rw-r--r-- | drivers/net/sky2.c | 4 |
6 files changed, 77 insertions, 31 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index b124eee4eb10..ebb726e655ac 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -1706,14 +1706,15 @@ static void __b44_set_rx_mode(struct net_device *dev) | |||
1706 | 1706 | ||
1707 | __b44_set_mac_addr(bp); | 1707 | __b44_set_mac_addr(bp); |
1708 | 1708 | ||
1709 | if (dev->flags & IFF_ALLMULTI) | 1709 | if ((dev->flags & IFF_ALLMULTI) || |
1710 | (dev->mc_count > B44_MCAST_TABLE_SIZE)) | ||
1710 | val |= RXCONFIG_ALLMULTI; | 1711 | val |= RXCONFIG_ALLMULTI; |
1711 | else | 1712 | else |
1712 | i = __b44_load_mcast(bp, dev); | 1713 | i = __b44_load_mcast(bp, dev); |
1713 | 1714 | ||
1714 | for (; i < 64; i++) { | 1715 | for (; i < 64; i++) |
1715 | __b44_cam_write(bp, zero, i); | 1716 | __b44_cam_write(bp, zero, i); |
1716 | } | 1717 | |
1717 | bw32(bp, B44_RXCONFIG, val); | 1718 | bw32(bp, B44_RXCONFIG, val); |
1718 | val = br32(bp, B44_CAM_CTRL); | 1719 | val = br32(bp, B44_CAM_CTRL); |
1719 | bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); | 1720 | bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index e83bc825f6af..32923162179e 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1433,7 +1433,7 @@ void bond_alb_monitor(struct bonding *bond) | |||
1433 | * write lock to protect from other code that also | 1433 | * write lock to protect from other code that also |
1434 | * sets the promiscuity. | 1434 | * sets the promiscuity. |
1435 | */ | 1435 | */ |
1436 | write_lock(&bond->curr_slave_lock); | 1436 | write_lock_bh(&bond->curr_slave_lock); |
1437 | 1437 | ||
1438 | if (bond_info->primary_is_promisc && | 1438 | if (bond_info->primary_is_promisc && |
1439 | (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { | 1439 | (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { |
@@ -1448,7 +1448,7 @@ void bond_alb_monitor(struct bonding *bond) | |||
1448 | bond_info->primary_is_promisc = 0; | 1448 | bond_info->primary_is_promisc = 0; |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | write_unlock(&bond->curr_slave_lock); | 1451 | write_unlock_bh(&bond->curr_slave_lock); |
1452 | 1452 | ||
1453 | if (bond_info->rlb_rebalance) { | 1453 | if (bond_info->rlb_rebalance) { |
1454 | bond_info->rlb_rebalance = 0; | 1454 | bond_info->rlb_rebalance = 0; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 99b7a411db28..c5ed635bce36 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -2497,6 +2497,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
2497 | u8 __iomem *base = get_hwbase(dev); | 2497 | u8 __iomem *base = get_hwbase(dev); |
2498 | u32 events; | 2498 | u32 events; |
2499 | int i; | 2499 | int i; |
2500 | unsigned long flags; | ||
2500 | 2501 | ||
2501 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | 2502 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); |
2502 | 2503 | ||
@@ -2508,16 +2509,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
2508 | if (!(events & np->irqmask)) | 2509 | if (!(events & np->irqmask)) |
2509 | break; | 2510 | break; |
2510 | 2511 | ||
2511 | spin_lock_irq(&np->lock); | 2512 | spin_lock_irqsave(&np->lock, flags); |
2512 | nv_tx_done(dev); | 2513 | nv_tx_done(dev); |
2513 | spin_unlock_irq(&np->lock); | 2514 | spin_unlock_irqrestore(&np->lock, flags); |
2514 | 2515 | ||
2515 | if (events & (NVREG_IRQ_TX_ERR)) { | 2516 | if (events & (NVREG_IRQ_TX_ERR)) { |
2516 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2517 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
2517 | dev->name, events); | 2518 | dev->name, events); |
2518 | } | 2519 | } |
2519 | if (i > max_interrupt_work) { | 2520 | if (i > max_interrupt_work) { |
2520 | spin_lock_irq(&np->lock); | 2521 | spin_lock_irqsave(&np->lock, flags); |
2521 | /* disable interrupts on the nic */ | 2522 | /* disable interrupts on the nic */ |
2522 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | 2523 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); |
2523 | pci_push(base); | 2524 | pci_push(base); |
@@ -2527,7 +2528,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
2527 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2528 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2528 | } | 2529 | } |
2529 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | 2530 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); |
2530 | spin_unlock_irq(&np->lock); | 2531 | spin_unlock_irqrestore(&np->lock, flags); |
2531 | break; | 2532 | break; |
2532 | } | 2533 | } |
2533 | 2534 | ||
@@ -2601,6 +2602,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
2601 | u8 __iomem *base = get_hwbase(dev); | 2602 | u8 __iomem *base = get_hwbase(dev); |
2602 | u32 events; | 2603 | u32 events; |
2603 | int i; | 2604 | int i; |
2605 | unsigned long flags; | ||
2604 | 2606 | ||
2605 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | 2607 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); |
2606 | 2608 | ||
@@ -2614,14 +2616,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
2614 | 2616 | ||
2615 | nv_rx_process(dev, dev->weight); | 2617 | nv_rx_process(dev, dev->weight); |
2616 | if (nv_alloc_rx(dev)) { | 2618 | if (nv_alloc_rx(dev)) { |
2617 | spin_lock_irq(&np->lock); | 2619 | spin_lock_irqsave(&np->lock, flags); |
2618 | if (!np->in_shutdown) | 2620 | if (!np->in_shutdown) |
2619 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2621 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2620 | spin_unlock_irq(&np->lock); | 2622 | spin_unlock_irqrestore(&np->lock, flags); |
2621 | } | 2623 | } |
2622 | 2624 | ||
2623 | if (i > max_interrupt_work) { | 2625 | if (i > max_interrupt_work) { |
2624 | spin_lock_irq(&np->lock); | 2626 | spin_lock_irqsave(&np->lock, flags); |
2625 | /* disable interrupts on the nic */ | 2627 | /* disable interrupts on the nic */ |
2626 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 2628 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
2627 | pci_push(base); | 2629 | pci_push(base); |
@@ -2631,7 +2633,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
2631 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2633 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2632 | } | 2634 | } |
2633 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | 2635 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); |
2634 | spin_unlock_irq(&np->lock); | 2636 | spin_unlock_irqrestore(&np->lock, flags); |
2635 | break; | 2637 | break; |
2636 | } | 2638 | } |
2637 | } | 2639 | } |
@@ -2648,6 +2650,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
2648 | u8 __iomem *base = get_hwbase(dev); | 2650 | u8 __iomem *base = get_hwbase(dev); |
2649 | u32 events; | 2651 | u32 events; |
2650 | int i; | 2652 | int i; |
2653 | unsigned long flags; | ||
2651 | 2654 | ||
2652 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | 2655 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); |
2653 | 2656 | ||
@@ -2660,14 +2663,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
2660 | break; | 2663 | break; |
2661 | 2664 | ||
2662 | if (events & NVREG_IRQ_LINK) { | 2665 | if (events & NVREG_IRQ_LINK) { |
2663 | spin_lock_irq(&np->lock); | 2666 | spin_lock_irqsave(&np->lock, flags); |
2664 | nv_link_irq(dev); | 2667 | nv_link_irq(dev); |
2665 | spin_unlock_irq(&np->lock); | 2668 | spin_unlock_irqrestore(&np->lock, flags); |
2666 | } | 2669 | } |
2667 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | 2670 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { |
2668 | spin_lock_irq(&np->lock); | 2671 | spin_lock_irqsave(&np->lock, flags); |
2669 | nv_linkchange(dev); | 2672 | nv_linkchange(dev); |
2670 | spin_unlock_irq(&np->lock); | 2673 | spin_unlock_irqrestore(&np->lock, flags); |
2671 | np->link_timeout = jiffies + LINK_TIMEOUT; | 2674 | np->link_timeout = jiffies + LINK_TIMEOUT; |
2672 | } | 2675 | } |
2673 | if (events & (NVREG_IRQ_UNKNOWN)) { | 2676 | if (events & (NVREG_IRQ_UNKNOWN)) { |
@@ -2675,7 +2678,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
2675 | dev->name, events); | 2678 | dev->name, events); |
2676 | } | 2679 | } |
2677 | if (i > max_interrupt_work) { | 2680 | if (i > max_interrupt_work) { |
2678 | spin_lock_irq(&np->lock); | 2681 | spin_lock_irqsave(&np->lock, flags); |
2679 | /* disable interrupts on the nic */ | 2682 | /* disable interrupts on the nic */ |
2680 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | 2683 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); |
2681 | pci_push(base); | 2684 | pci_push(base); |
@@ -2685,7 +2688,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
2685 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2688 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2686 | } | 2689 | } |
2687 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | 2690 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); |
2688 | spin_unlock_irq(&np->lock); | 2691 | spin_unlock_irqrestore(&np->lock, flags); |
2689 | break; | 2692 | break; |
2690 | } | 2693 | } |
2691 | 2694 | ||
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 4bac3cd8f235..bf414a93facb 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -213,6 +213,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
213 | } | 213 | } |
214 | 214 | ||
215 | free_index = pool->consumer_index++ % pool->size; | 215 | free_index = pool->consumer_index++ % pool->size; |
216 | pool->consumer_index = free_index; | ||
216 | index = pool->free_map[free_index]; | 217 | index = pool->free_map[free_index]; |
217 | 218 | ||
218 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); | 219 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); |
@@ -238,7 +239,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
238 | if(lpar_rc != H_SUCCESS) { | 239 | if(lpar_rc != H_SUCCESS) { |
239 | pool->free_map[free_index] = index; | 240 | pool->free_map[free_index] = index; |
240 | pool->skbuff[index] = NULL; | 241 | pool->skbuff[index] = NULL; |
241 | pool->consumer_index--; | 242 | if (pool->consumer_index == 0) |
243 | pool->consumer_index = pool->size - 1; | ||
244 | else | ||
245 | pool->consumer_index--; | ||
242 | dma_unmap_single(&adapter->vdev->dev, | 246 | dma_unmap_single(&adapter->vdev->dev, |
243 | pool->dma_addr[index], pool->buff_size, | 247 | pool->dma_addr[index], pool->buff_size, |
244 | DMA_FROM_DEVICE); | 248 | DMA_FROM_DEVICE); |
@@ -326,6 +330,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 | |||
326 | DMA_FROM_DEVICE); | 330 | DMA_FROM_DEVICE); |
327 | 331 | ||
328 | free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size; | 332 | free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size; |
333 | adapter->rx_buff_pool[pool].producer_index = free_index; | ||
329 | adapter->rx_buff_pool[pool].free_map[free_index] = index; | 334 | adapter->rx_buff_pool[pool].free_map[free_index] = index; |
330 | 335 | ||
331 | mb(); | 336 | mb(); |
@@ -437,6 +442,31 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
437 | &adapter->rx_buff_pool[i]); | 442 | &adapter->rx_buff_pool[i]); |
438 | } | 443 | } |
439 | 444 | ||
445 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | ||
446 | union ibmveth_buf_desc rxq_desc, u64 mac_address) | ||
447 | { | ||
448 | int rc, try_again = 1; | ||
449 | |||
450 | /* After a kexec the adapter will still be open, so our attempt to | ||
451 | * open it will fail. So if we get a failure we free the adapter and | ||
452 | * try again, but only once. */ | ||
453 | retry: | ||
454 | rc = h_register_logical_lan(adapter->vdev->unit_address, | ||
455 | adapter->buffer_list_dma, rxq_desc.desc, | ||
456 | adapter->filter_list_dma, mac_address); | ||
457 | |||
458 | if (rc != H_SUCCESS && try_again) { | ||
459 | do { | ||
460 | rc = h_free_logical_lan(adapter->vdev->unit_address); | ||
461 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | ||
462 | |||
463 | try_again = 0; | ||
464 | goto retry; | ||
465 | } | ||
466 | |||
467 | return rc; | ||
468 | } | ||
469 | |||
440 | static int ibmveth_open(struct net_device *netdev) | 470 | static int ibmveth_open(struct net_device *netdev) |
441 | { | 471 | { |
442 | struct ibmveth_adapter *adapter = netdev->priv; | 472 | struct ibmveth_adapter *adapter = netdev->priv; |
@@ -502,12 +532,9 @@ static int ibmveth_open(struct net_device *netdev) | |||
502 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); | 532 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); |
503 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); | 533 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); |
504 | 534 | ||
535 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); | ||
505 | 536 | ||
506 | lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, | 537 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); |
507 | adapter->buffer_list_dma, | ||
508 | rxq_desc.desc, | ||
509 | adapter->filter_list_dma, | ||
510 | mac_address); | ||
511 | 538 | ||
512 | if(lpar_rc != H_SUCCESS) { | 539 | if(lpar_rc != H_SUCCESS) { |
513 | ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); | 540 | ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); |
@@ -905,6 +932,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
905 | return -EINVAL; | 932 | return -EINVAL; |
906 | } | 933 | } |
907 | 934 | ||
935 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
936 | static void ibmveth_poll_controller(struct net_device *dev) | ||
937 | { | ||
938 | ibmveth_replenish_task(dev->priv); | ||
939 | ibmveth_interrupt(dev->irq, dev, NULL); | ||
940 | } | ||
941 | #endif | ||
942 | |||
908 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 943 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
909 | { | 944 | { |
910 | int rc, i; | 945 | int rc, i; |
@@ -977,6 +1012,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
977 | netdev->ethtool_ops = &netdev_ethtool_ops; | 1012 | netdev->ethtool_ops = &netdev_ethtool_ops; |
978 | netdev->change_mtu = ibmveth_change_mtu; | 1013 | netdev->change_mtu = ibmveth_change_mtu; |
979 | SET_NETDEV_DEV(netdev, &dev->dev); | 1014 | SET_NETDEV_DEV(netdev, &dev->dev); |
1015 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1016 | netdev->poll_controller = ibmveth_poll_controller; | ||
1017 | #endif | ||
980 | netdev->features |= NETIF_F_LLTX; | 1018 | netdev->features |= NETIF_F_LLTX; |
981 | spin_lock_init(&adapter->stats_lock); | 1019 | spin_lock_init(&adapter->stats_lock); |
982 | 1020 | ||
@@ -1132,7 +1170,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | |||
1132 | { | 1170 | { |
1133 | struct proc_dir_entry *entry; | 1171 | struct proc_dir_entry *entry; |
1134 | if (ibmveth_proc_dir) { | 1172 | if (ibmveth_proc_dir) { |
1135 | entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir); | 1173 | char u_addr[10]; |
1174 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | ||
1175 | entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir); | ||
1136 | if (!entry) { | 1176 | if (!entry) { |
1137 | ibmveth_error_printk("Cannot create adapter proc entry"); | 1177 | ibmveth_error_printk("Cannot create adapter proc entry"); |
1138 | } else { | 1178 | } else { |
@@ -1147,7 +1187,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | |||
1147 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | 1187 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) |
1148 | { | 1188 | { |
1149 | if (ibmveth_proc_dir) { | 1189 | if (ibmveth_proc_dir) { |
1150 | remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir); | 1190 | char u_addr[10]; |
1191 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | ||
1192 | remove_proc_entry(u_addr, ibmveth_proc_dir); | ||
1151 | } | 1193 | } |
1152 | } | 1194 | } |
1153 | 1195 | ||
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 2ffa3a59e704..9997081c6dae 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -2155,7 +2155,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp) | |||
2155 | for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; | 2155 | for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; |
2156 | offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; | 2156 | offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; |
2157 | offset += 4) | 2157 | offset += 4) |
2158 | *(u32 *)((char *)p + offset) = read_mib(mp, offset); | 2158 | *(u32 *)((char *)p + offset) += read_mib(mp, offset); |
2159 | 2159 | ||
2160 | p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); | 2160 | p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); |
2161 | p->good_octets_sent += | 2161 | p->good_octets_sent += |
@@ -2164,7 +2164,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp) | |||
2164 | for (offset = ETH_MIB_GOOD_FRAMES_SENT; | 2164 | for (offset = ETH_MIB_GOOD_FRAMES_SENT; |
2165 | offset <= ETH_MIB_LATE_COLLISION; | 2165 | offset <= ETH_MIB_LATE_COLLISION; |
2166 | offset += 4) | 2166 | offset += 4) |
2167 | *(u32 *)((char *)p + offset) = read_mib(mp, offset); | 2167 | *(u32 *)((char *)p + offset) += read_mib(mp, offset); |
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | /* | 2170 | /* |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 459c845d6648..5a5289b7a885 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1907,7 +1907,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, | |||
1907 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, | 1907 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, |
1908 | length, PCI_DMA_FROMDEVICE); | 1908 | length, PCI_DMA_FROMDEVICE); |
1909 | re->skb->ip_summed = CHECKSUM_NONE; | 1909 | re->skb->ip_summed = CHECKSUM_NONE; |
1910 | __skb_put(skb, length); | 1910 | skb_put(skb, length); |
1911 | } | 1911 | } |
1912 | return skb; | 1912 | return skb; |
1913 | } | 1913 | } |
@@ -1970,7 +1970,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2, | |||
1970 | if (skb_shinfo(skb)->nr_frags) | 1970 | if (skb_shinfo(skb)->nr_frags) |
1971 | skb_put_frags(skb, hdr_space, length); | 1971 | skb_put_frags(skb, hdr_space, length); |
1972 | else | 1972 | else |
1973 | skb_put(skb, hdr_space); | 1973 | skb_put(skb, length); |
1974 | return skb; | 1974 | return skb; |
1975 | } | 1975 | } |
1976 | 1976 | ||