diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2008-07-08 18:52:13 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-07-11 01:20:29 -0400 |
commit | 03f83041d836022a17258c2731f6221f248bedcb (patch) | |
tree | ff1e1aba41622705023e879c920258d3fa6811ad /drivers/net/ixgb/ixgb_main.c | |
parent | 7490d71a9245fd59e6cd5732cba4d6b744db581a (diff) |
ixgb: format all if( to be if (
this patch is trivial but because I want to have everything be nice and
tidy I'm updating it.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ixgb/ixgb_main.c')
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 141 |
1 files changed, 70 insertions, 71 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index fc2cf0edb7e5..f7dda049dd86 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -250,7 +250,7 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
250 | return err; | 250 | return err; |
251 | } | 251 | } |
252 | 252 | ||
253 | if((hw->max_frame_size != max_frame) || | 253 | if ((hw->max_frame_size != max_frame) || |
254 | (hw->max_frame_size != | 254 | (hw->max_frame_size != |
255 | (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { | 255 | (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { |
256 | 256 | ||
@@ -258,11 +258,11 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
258 | 258 | ||
259 | IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); | 259 | IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); |
260 | 260 | ||
261 | if(hw->max_frame_size > | 261 | if (hw->max_frame_size > |
262 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { | 262 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { |
263 | u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); | 263 | u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); |
264 | 264 | ||
265 | if(!(ctrl0 & IXGB_CTRL0_JFE)) { | 265 | if (!(ctrl0 & IXGB_CTRL0_JFE)) { |
266 | ctrl0 |= IXGB_CTRL0_JFE; | 266 | ctrl0 |= IXGB_CTRL0_JFE; |
267 | IXGB_WRITE_REG(hw, CTRL0, ctrl0); | 267 | IXGB_WRITE_REG(hw, CTRL0, ctrl0); |
268 | } | 268 | } |
@@ -299,7 +299,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) | |||
299 | if (adapter->have_msi) | 299 | if (adapter->have_msi) |
300 | pci_disable_msi(adapter->pdev); | 300 | pci_disable_msi(adapter->pdev); |
301 | 301 | ||
302 | if(kill_watchdog) | 302 | if (kill_watchdog) |
303 | del_timer_sync(&adapter->watchdog_timer); | 303 | del_timer_sync(&adapter->watchdog_timer); |
304 | 304 | ||
305 | adapter->link_speed = 0; | 305 | adapter->link_speed = 0; |
@@ -356,14 +356,14 @@ ixgb_probe(struct pci_dev *pdev, | |||
356 | int i; | 356 | int i; |
357 | int err; | 357 | int err; |
358 | 358 | ||
359 | if((err = pci_enable_device(pdev))) | 359 | if ((err = pci_enable_device(pdev))) |
360 | return err; | 360 | return err; |
361 | 361 | ||
362 | if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && | 362 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && |
363 | !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { | 363 | !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { |
364 | pci_using_dac = 1; | 364 | pci_using_dac = 1; |
365 | } else { | 365 | } else { |
366 | if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || | 366 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || |
367 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { | 367 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { |
368 | printk(KERN_ERR | 368 | printk(KERN_ERR |
369 | "ixgb: No usable DMA configuration, aborting\n"); | 369 | "ixgb: No usable DMA configuration, aborting\n"); |
@@ -372,13 +372,13 @@ ixgb_probe(struct pci_dev *pdev, | |||
372 | pci_using_dac = 0; | 372 | pci_using_dac = 0; |
373 | } | 373 | } |
374 | 374 | ||
375 | if((err = pci_request_regions(pdev, ixgb_driver_name))) | 375 | if ((err = pci_request_regions(pdev, ixgb_driver_name))) |
376 | goto err_request_regions; | 376 | goto err_request_regions; |
377 | 377 | ||
378 | pci_set_master(pdev); | 378 | pci_set_master(pdev); |
379 | 379 | ||
380 | netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); | 380 | netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); |
381 | if(!netdev) { | 381 | if (!netdev) { |
382 | err = -ENOMEM; | 382 | err = -ENOMEM; |
383 | goto err_alloc_etherdev; | 383 | goto err_alloc_etherdev; |
384 | } | 384 | } |
@@ -400,9 +400,9 @@ ixgb_probe(struct pci_dev *pdev, | |||
400 | } | 400 | } |
401 | 401 | ||
402 | for(i = BAR_1; i <= BAR_5; i++) { | 402 | for(i = BAR_1; i <= BAR_5; i++) { |
403 | if(pci_resource_len(pdev, i) == 0) | 403 | if (pci_resource_len(pdev, i) == 0) |
404 | continue; | 404 | continue; |
405 | if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { | 405 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
406 | adapter->hw.io_base = pci_resource_start(pdev, i); | 406 | adapter->hw.io_base = pci_resource_start(pdev, i); |
407 | break; | 407 | break; |
408 | } | 408 | } |
@@ -436,7 +436,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
436 | 436 | ||
437 | /* setup the private structure */ | 437 | /* setup the private structure */ |
438 | 438 | ||
439 | if((err = ixgb_sw_init(adapter))) | 439 | if ((err = ixgb_sw_init(adapter))) |
440 | goto err_sw_init; | 440 | goto err_sw_init; |
441 | 441 | ||
442 | netdev->features = NETIF_F_SG | | 442 | netdev->features = NETIF_F_SG | |
@@ -446,12 +446,12 @@ ixgb_probe(struct pci_dev *pdev, | |||
446 | NETIF_F_HW_VLAN_FILTER; | 446 | NETIF_F_HW_VLAN_FILTER; |
447 | netdev->features |= NETIF_F_TSO; | 447 | netdev->features |= NETIF_F_TSO; |
448 | 448 | ||
449 | if(pci_using_dac) | 449 | if (pci_using_dac) |
450 | netdev->features |= NETIF_F_HIGHDMA; | 450 | netdev->features |= NETIF_F_HIGHDMA; |
451 | 451 | ||
452 | /* make sure the EEPROM is good */ | 452 | /* make sure the EEPROM is good */ |
453 | 453 | ||
454 | if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { | 454 | if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { |
455 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); | 455 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
456 | err = -EIO; | 456 | err = -EIO; |
457 | goto err_eeprom; | 457 | goto err_eeprom; |
@@ -460,7 +460,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
460 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); | 460 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); |
461 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | 461 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); |
462 | 462 | ||
463 | if(!is_valid_ether_addr(netdev->perm_addr)) { | 463 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
464 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | 464 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); |
465 | err = -EIO; | 465 | err = -EIO; |
466 | goto err_eeprom; | 466 | goto err_eeprom; |
@@ -475,7 +475,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
475 | INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); | 475 | INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); |
476 | 476 | ||
477 | strcpy(netdev->name, "eth%d"); | 477 | strcpy(netdev->name, "eth%d"); |
478 | if((err = register_netdev(netdev))) | 478 | if ((err = register_netdev(netdev))) |
479 | goto err_register; | 479 | goto err_register; |
480 | 480 | ||
481 | /* we're going to reset, so assume we have no link for now */ | 481 | /* we're going to reset, so assume we have no link for now */ |
@@ -558,7 +558,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter) | |||
558 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | 558 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; |
559 | adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ | 559 | adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ |
560 | 560 | ||
561 | if((hw->device_id == IXGB_DEVICE_ID_82597EX) | 561 | if ((hw->device_id == IXGB_DEVICE_ID_82597EX) |
562 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) | 562 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) |
563 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) | 563 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) |
564 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) | 564 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) |
@@ -596,15 +596,15 @@ ixgb_open(struct net_device *netdev) | |||
596 | 596 | ||
597 | /* allocate transmit descriptors */ | 597 | /* allocate transmit descriptors */ |
598 | 598 | ||
599 | if((err = ixgb_setup_tx_resources(adapter))) | 599 | if ((err = ixgb_setup_tx_resources(adapter))) |
600 | goto err_setup_tx; | 600 | goto err_setup_tx; |
601 | 601 | ||
602 | /* allocate receive descriptors */ | 602 | /* allocate receive descriptors */ |
603 | 603 | ||
604 | if((err = ixgb_setup_rx_resources(adapter))) | 604 | if ((err = ixgb_setup_rx_resources(adapter))) |
605 | goto err_setup_rx; | 605 | goto err_setup_rx; |
606 | 606 | ||
607 | if((err = ixgb_up(adapter))) | 607 | if ((err = ixgb_up(adapter))) |
608 | goto err_up; | 608 | goto err_up; |
609 | 609 | ||
610 | return 0; | 610 | return 0; |
@@ -660,7 +660,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
660 | 660 | ||
661 | size = sizeof(struct ixgb_buffer) * txdr->count; | 661 | size = sizeof(struct ixgb_buffer) * txdr->count; |
662 | txdr->buffer_info = vmalloc(size); | 662 | txdr->buffer_info = vmalloc(size); |
663 | if(!txdr->buffer_info) { | 663 | if (!txdr->buffer_info) { |
664 | DPRINTK(PROBE, ERR, | 664 | DPRINTK(PROBE, ERR, |
665 | "Unable to allocate transmit descriptor ring memory\n"); | 665 | "Unable to allocate transmit descriptor ring memory\n"); |
666 | return -ENOMEM; | 666 | return -ENOMEM; |
@@ -673,7 +673,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
673 | txdr->size = ALIGN(txdr->size, 4096); | 673 | txdr->size = ALIGN(txdr->size, 4096); |
674 | 674 | ||
675 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 675 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
676 | if(!txdr->desc) { | 676 | if (!txdr->desc) { |
677 | vfree(txdr->buffer_info); | 677 | vfree(txdr->buffer_info); |
678 | DPRINTK(PROBE, ERR, | 678 | DPRINTK(PROBE, ERR, |
679 | "Unable to allocate transmit descriptor memory\n"); | 679 | "Unable to allocate transmit descriptor memory\n"); |
@@ -749,7 +749,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
749 | 749 | ||
750 | size = sizeof(struct ixgb_buffer) * rxdr->count; | 750 | size = sizeof(struct ixgb_buffer) * rxdr->count; |
751 | rxdr->buffer_info = vmalloc(size); | 751 | rxdr->buffer_info = vmalloc(size); |
752 | if(!rxdr->buffer_info) { | 752 | if (!rxdr->buffer_info) { |
753 | DPRINTK(PROBE, ERR, | 753 | DPRINTK(PROBE, ERR, |
754 | "Unable to allocate receive descriptor ring\n"); | 754 | "Unable to allocate receive descriptor ring\n"); |
755 | return -ENOMEM; | 755 | return -ENOMEM; |
@@ -763,7 +763,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
763 | 763 | ||
764 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); | 764 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); |
765 | 765 | ||
766 | if(!rxdr->desc) { | 766 | if (!rxdr->desc) { |
767 | vfree(rxdr->buffer_info); | 767 | vfree(rxdr->buffer_info); |
768 | DPRINTK(PROBE, ERR, | 768 | DPRINTK(PROBE, ERR, |
769 | "Unable to allocate receive descriptors\n"); | 769 | "Unable to allocate receive descriptors\n"); |
@@ -984,7 +984,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter) | |||
984 | 984 | ||
985 | for(i = 0; i < rx_ring->count; i++) { | 985 | for(i = 0; i < rx_ring->count; i++) { |
986 | buffer_info = &rx_ring->buffer_info[i]; | 986 | buffer_info = &rx_ring->buffer_info[i]; |
987 | if(buffer_info->skb) { | 987 | if (buffer_info->skb) { |
988 | 988 | ||
989 | pci_unmap_single(pdev, | 989 | pci_unmap_single(pdev, |
990 | buffer_info->dma, | 990 | buffer_info->dma, |
@@ -1025,7 +1025,7 @@ ixgb_set_mac(struct net_device *netdev, void *p) | |||
1025 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1025 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
1026 | struct sockaddr *addr = p; | 1026 | struct sockaddr *addr = p; |
1027 | 1027 | ||
1028 | if(!is_valid_ether_addr(addr->sa_data)) | 1028 | if (!is_valid_ether_addr(addr->sa_data)) |
1029 | return -EADDRNOTAVAIL; | 1029 | return -EADDRNOTAVAIL; |
1030 | 1030 | ||
1031 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 1031 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
@@ -1058,16 +1058,16 @@ ixgb_set_multi(struct net_device *netdev) | |||
1058 | 1058 | ||
1059 | rctl = IXGB_READ_REG(hw, RCTL); | 1059 | rctl = IXGB_READ_REG(hw, RCTL); |
1060 | 1060 | ||
1061 | if(netdev->flags & IFF_PROMISC) { | 1061 | if (netdev->flags & IFF_PROMISC) { |
1062 | rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); | 1062 | rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); |
1063 | } else if(netdev->flags & IFF_ALLMULTI) { | 1063 | } else if (netdev->flags & IFF_ALLMULTI) { |
1064 | rctl |= IXGB_RCTL_MPE; | 1064 | rctl |= IXGB_RCTL_MPE; |
1065 | rctl &= ~IXGB_RCTL_UPE; | 1065 | rctl &= ~IXGB_RCTL_UPE; |
1066 | } else { | 1066 | } else { |
1067 | rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); | 1067 | rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { | 1070 | if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { |
1071 | rctl |= IXGB_RCTL_MPE; | 1071 | rctl |= IXGB_RCTL_MPE; |
1072 | IXGB_WRITE_REG(hw, RCTL, rctl); | 1072 | IXGB_WRITE_REG(hw, RCTL, rctl); |
1073 | } else { | 1073 | } else { |
@@ -1104,8 +1104,8 @@ ixgb_watchdog(unsigned long data) | |||
1104 | netif_stop_queue(netdev); | 1104 | netif_stop_queue(netdev); |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | if(adapter->hw.link_up) { | 1107 | if (adapter->hw.link_up) { |
1108 | if(!netif_carrier_ok(netdev)) { | 1108 | if (!netif_carrier_ok(netdev)) { |
1109 | DPRINTK(LINK, INFO, | 1109 | DPRINTK(LINK, INFO, |
1110 | "NIC Link is Up 10000 Mbps Full Duplex\n"); | 1110 | "NIC Link is Up 10000 Mbps Full Duplex\n"); |
1111 | adapter->link_speed = 10000; | 1111 | adapter->link_speed = 10000; |
@@ -1114,7 +1114,7 @@ ixgb_watchdog(unsigned long data) | |||
1114 | netif_wake_queue(netdev); | 1114 | netif_wake_queue(netdev); |
1115 | } | 1115 | } |
1116 | } else { | 1116 | } else { |
1117 | if(netif_carrier_ok(netdev)) { | 1117 | if (netif_carrier_ok(netdev)) { |
1118 | adapter->link_speed = 0; | 1118 | adapter->link_speed = 0; |
1119 | adapter->link_duplex = 0; | 1119 | adapter->link_duplex = 0; |
1120 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); | 1120 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); |
@@ -1126,8 +1126,8 @@ ixgb_watchdog(unsigned long data) | |||
1126 | 1126 | ||
1127 | ixgb_update_stats(adapter); | 1127 | ixgb_update_stats(adapter); |
1128 | 1128 | ||
1129 | if(!netif_carrier_ok(netdev)) { | 1129 | if (!netif_carrier_ok(netdev)) { |
1130 | if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { | 1130 | if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { |
1131 | /* We've lost link, so the controller stops DMA, | 1131 | /* We've lost link, so the controller stops DMA, |
1132 | * but we've got queued Tx work that's never going | 1132 | * but we've got queued Tx work that's never going |
1133 | * to get done, so reset controller to flush Tx. | 1133 | * to get done, so reset controller to flush Tx. |
@@ -1207,7 +1207,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1207 | | (skb->len - (hdr_len))); | 1207 | | (skb->len - (hdr_len))); |
1208 | 1208 | ||
1209 | 1209 | ||
1210 | if(++i == adapter->tx_ring.count) i = 0; | 1210 | if (++i == adapter->tx_ring.count) i = 0; |
1211 | adapter->tx_ring.next_to_use = i; | 1211 | adapter->tx_ring.next_to_use = i; |
1212 | 1212 | ||
1213 | return 1; | 1213 | return 1; |
@@ -1223,7 +1223,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1223 | unsigned int i; | 1223 | unsigned int i; |
1224 | u8 css, cso; | 1224 | u8 css, cso; |
1225 | 1225 | ||
1226 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 1226 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1227 | struct ixgb_buffer *buffer_info; | 1227 | struct ixgb_buffer *buffer_info; |
1228 | css = skb_transport_offset(skb); | 1228 | css = skb_transport_offset(skb); |
1229 | cso = css + skb->csum_offset; | 1229 | cso = css + skb->csum_offset; |
@@ -1245,7 +1245,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1245 | cpu_to_le32(IXGB_CONTEXT_DESC_TYPE | 1245 | cpu_to_le32(IXGB_CONTEXT_DESC_TYPE |
1246 | | IXGB_TX_DESC_CMD_IDE); | 1246 | | IXGB_TX_DESC_CMD_IDE); |
1247 | 1247 | ||
1248 | if(++i == adapter->tx_ring.count) i = 0; | 1248 | if (++i == adapter->tx_ring.count) i = 0; |
1249 | adapter->tx_ring.next_to_use = i; | 1249 | adapter->tx_ring.next_to_use = i; |
1250 | 1250 | ||
1251 | return true; | 1251 | return true; |
@@ -1295,7 +1295,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1295 | len -= size; | 1295 | len -= size; |
1296 | offset += size; | 1296 | offset += size; |
1297 | count++; | 1297 | count++; |
1298 | if(++i == tx_ring->count) i = 0; | 1298 | if (++i == tx_ring->count) i = 0; |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | for(f = 0; f < nr_frags; f++) { | 1301 | for(f = 0; f < nr_frags; f++) { |
@@ -1328,7 +1328,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1328 | len -= size; | 1328 | len -= size; |
1329 | offset += size; | 1329 | offset += size; |
1330 | count++; | 1330 | count++; |
1331 | if(++i == tx_ring->count) i = 0; | 1331 | if (++i == tx_ring->count) i = 0; |
1332 | } | 1332 | } |
1333 | } | 1333 | } |
1334 | i = (i == 0) ? tx_ring->count - 1 : i - 1; | 1334 | i = (i == 0) ? tx_ring->count - 1 : i - 1; |
@@ -1349,17 +1349,16 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1349 | u8 popts = 0; | 1349 | u8 popts = 0; |
1350 | unsigned int i; | 1350 | unsigned int i; |
1351 | 1351 | ||
1352 | if(tx_flags & IXGB_TX_FLAGS_TSO) { | 1352 | if (tx_flags & IXGB_TX_FLAGS_TSO) { |
1353 | cmd_type_len |= IXGB_TX_DESC_CMD_TSE; | 1353 | cmd_type_len |= IXGB_TX_DESC_CMD_TSE; |
1354 | popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); | 1354 | popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); |
1355 | } | 1355 | } |
1356 | 1356 | ||
1357 | if(tx_flags & IXGB_TX_FLAGS_CSUM) | 1357 | if (tx_flags & IXGB_TX_FLAGS_CSUM) |
1358 | popts |= IXGB_TX_DESC_POPTS_TXSM; | 1358 | popts |= IXGB_TX_DESC_POPTS_TXSM; |
1359 | 1359 | ||
1360 | if(tx_flags & IXGB_TX_FLAGS_VLAN) { | 1360 | if (tx_flags & IXGB_TX_FLAGS_VLAN) |
1361 | cmd_type_len |= IXGB_TX_DESC_CMD_VLE; | 1361 | cmd_type_len |= IXGB_TX_DESC_CMD_VLE; |
1362 | } | ||
1363 | 1362 | ||
1364 | i = tx_ring->next_to_use; | 1363 | i = tx_ring->next_to_use; |
1365 | 1364 | ||
@@ -1373,7 +1372,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1373 | tx_desc->popts = popts; | 1372 | tx_desc->popts = popts; |
1374 | tx_desc->vlan = cpu_to_le16(vlan_id); | 1373 | tx_desc->vlan = cpu_to_le16(vlan_id); |
1375 | 1374 | ||
1376 | if(++i == tx_ring->count) i = 0; | 1375 | if (++i == tx_ring->count) i = 0; |
1377 | } | 1376 | } |
1378 | 1377 | ||
1379 | tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP | 1378 | tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP |
@@ -1441,7 +1440,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1441 | return NETDEV_TX_OK; | 1440 | return NETDEV_TX_OK; |
1442 | } | 1441 | } |
1443 | 1442 | ||
1444 | if(skb->len <= 0) { | 1443 | if (skb->len <= 0) { |
1445 | dev_kfree_skb_any(skb); | 1444 | dev_kfree_skb_any(skb); |
1446 | return 0; | 1445 | return 0; |
1447 | } | 1446 | } |
@@ -1450,7 +1449,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1450 | DESC_NEEDED))) | 1449 | DESC_NEEDED))) |
1451 | return NETDEV_TX_BUSY; | 1450 | return NETDEV_TX_BUSY; |
1452 | 1451 | ||
1453 | if(adapter->vlgrp && vlan_tx_tag_present(skb)) { | 1452 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { |
1454 | tx_flags |= IXGB_TX_FLAGS_VLAN; | 1453 | tx_flags |= IXGB_TX_FLAGS_VLAN; |
1455 | vlan_id = vlan_tx_tag_get(skb); | 1454 | vlan_id = vlan_tx_tag_get(skb); |
1456 | } | 1455 | } |
@@ -1465,7 +1464,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1465 | 1464 | ||
1466 | if (likely(tso)) | 1465 | if (likely(tso)) |
1467 | tx_flags |= IXGB_TX_FLAGS_TSO; | 1466 | tx_flags |= IXGB_TX_FLAGS_TSO; |
1468 | else if(ixgb_tx_csum(adapter, skb)) | 1467 | else if (ixgb_tx_csum(adapter, skb)) |
1469 | tx_flags |= IXGB_TX_FLAGS_CSUM; | 1468 | tx_flags |= IXGB_TX_FLAGS_CSUM; |
1470 | 1469 | ||
1471 | ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, | 1470 | ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, |
@@ -1573,7 +1572,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter) | |||
1573 | if (pci_channel_offline(pdev)) | 1572 | if (pci_channel_offline(pdev)) |
1574 | return; | 1573 | return; |
1575 | 1574 | ||
1576 | if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || | 1575 | if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || |
1577 | (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { | 1576 | (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { |
1578 | u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); | 1577 | u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); |
1579 | u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); | 1578 | u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); |
@@ -1582,7 +1581,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter) | |||
1582 | 1581 | ||
1583 | multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); | 1582 | multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); |
1584 | /* fix up multicast stats by removing broadcasts */ | 1583 | /* fix up multicast stats by removing broadcasts */ |
1585 | if(multi >= bcast) | 1584 | if (multi >= bcast) |
1586 | multi -= bcast; | 1585 | multi -= bcast; |
1587 | 1586 | ||
1588 | adapter->stats.mprcl += (multi & 0xFFFFFFFF); | 1587 | adapter->stats.mprcl += (multi & 0xFFFFFFFF); |
@@ -1706,7 +1705,7 @@ ixgb_intr(int irq, void *data) | |||
1706 | unsigned int i; | 1705 | unsigned int i; |
1707 | #endif | 1706 | #endif |
1708 | 1707 | ||
1709 | if(unlikely(!icr)) | 1708 | if (unlikely(!icr)) |
1710 | return IRQ_NONE; /* Not our interrupt */ | 1709 | return IRQ_NONE; /* Not our interrupt */ |
1711 | 1710 | ||
1712 | if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) | 1711 | if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) |
@@ -1729,7 +1728,7 @@ ixgb_intr(int irq, void *data) | |||
1729 | * transmit queues for completed descriptors, intended to | 1728 | * transmit queues for completed descriptors, intended to |
1730 | * avoid starvation issues and assist tx/rx fairness. */ | 1729 | * avoid starvation issues and assist tx/rx fairness. */ |
1731 | for(i = 0; i < IXGB_MAX_INTR; i++) | 1730 | for(i = 0; i < IXGB_MAX_INTR; i++) |
1732 | if(!ixgb_clean_rx_irq(adapter) & | 1731 | if (!ixgb_clean_rx_irq(adapter) & |
1733 | !ixgb_clean_tx_irq(adapter)) | 1732 | !ixgb_clean_tx_irq(adapter)) |
1734 | break; | 1733 | break; |
1735 | #endif | 1734 | #endif |
@@ -1798,7 +1797,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1798 | *(u32 *)&(tx_desc->status) = 0; | 1797 | *(u32 *)&(tx_desc->status) = 0; |
1799 | 1798 | ||
1800 | cleaned = (i == eop); | 1799 | cleaned = (i == eop); |
1801 | if(++i == tx_ring->count) i = 0; | 1800 | if (++i == tx_ring->count) i = 0; |
1802 | } | 1801 | } |
1803 | 1802 | ||
1804 | eop = tx_ring->buffer_info[i].next_to_watch; | 1803 | eop = tx_ring->buffer_info[i].next_to_watch; |
@@ -1820,7 +1819,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1820 | } | 1819 | } |
1821 | } | 1820 | } |
1822 | 1821 | ||
1823 | if(adapter->detect_tx_hung) { | 1822 | if (adapter->detect_tx_hung) { |
1824 | /* detect a transmit hang in hardware, this serializes the | 1823 | /* detect a transmit hang in hardware, this serializes the |
1825 | * check with the clearing of time_stamp and movement of i */ | 1824 | * check with the clearing of time_stamp and movement of i */ |
1826 | adapter->detect_tx_hung = false; | 1825 | adapter->detect_tx_hung = false; |
@@ -1869,7 +1868,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, | |||
1869 | /* Ignore Checksum bit is set OR | 1868 | /* Ignore Checksum bit is set OR |
1870 | * TCP Checksum has not been calculated | 1869 | * TCP Checksum has not been calculated |
1871 | */ | 1870 | */ |
1872 | if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || | 1871 | if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || |
1873 | (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { | 1872 | (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { |
1874 | skb->ip_summed = CHECKSUM_NONE; | 1873 | skb->ip_summed = CHECKSUM_NONE; |
1875 | return; | 1874 | return; |
@@ -1877,7 +1876,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, | |||
1877 | 1876 | ||
1878 | /* At this point we know the hardware did the TCP checksum */ | 1877 | /* At this point we know the hardware did the TCP checksum */ |
1879 | /* now look at the TCP checksum error bit */ | 1878 | /* now look at the TCP checksum error bit */ |
1880 | if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { | 1879 | if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { |
1881 | /* let the stack verify checksum errors */ | 1880 | /* let the stack verify checksum errors */ |
1882 | skb->ip_summed = CHECKSUM_NONE; | 1881 | skb->ip_summed = CHECKSUM_NONE; |
1883 | adapter->hw_csum_rx_error++; | 1882 | adapter->hw_csum_rx_error++; |
@@ -1918,7 +1917,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1918 | u8 status; | 1917 | u8 status; |
1919 | 1918 | ||
1920 | #ifdef CONFIG_IXGB_NAPI | 1919 | #ifdef CONFIG_IXGB_NAPI |
1921 | if(*work_done >= work_to_do) | 1920 | if (*work_done >= work_to_do) |
1922 | break; | 1921 | break; |
1923 | 1922 | ||
1924 | (*work_done)++; | 1923 | (*work_done)++; |
@@ -1929,11 +1928,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1929 | 1928 | ||
1930 | prefetch(skb->data); | 1929 | prefetch(skb->data); |
1931 | 1930 | ||
1932 | if(++i == rx_ring->count) i = 0; | 1931 | if (++i == rx_ring->count) i = 0; |
1933 | next_rxd = IXGB_RX_DESC(*rx_ring, i); | 1932 | next_rxd = IXGB_RX_DESC(*rx_ring, i); |
1934 | prefetch(next_rxd); | 1933 | prefetch(next_rxd); |
1935 | 1934 | ||
1936 | if((j = i + 1) == rx_ring->count) j = 0; | 1935 | if ((j = i + 1) == rx_ring->count) j = 0; |
1937 | next2_buffer = &rx_ring->buffer_info[j]; | 1936 | next2_buffer = &rx_ring->buffer_info[j]; |
1938 | prefetch(next2_buffer); | 1937 | prefetch(next2_buffer); |
1939 | 1938 | ||
@@ -1950,7 +1949,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1950 | 1949 | ||
1951 | length = le16_to_cpu(rx_desc->length); | 1950 | length = le16_to_cpu(rx_desc->length); |
1952 | 1951 | ||
1953 | if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { | 1952 | if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { |
1954 | 1953 | ||
1955 | /* All receives must fit into a single buffer */ | 1954 | /* All receives must fit into a single buffer */ |
1956 | 1955 | ||
@@ -1999,14 +1998,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1999 | 1998 | ||
2000 | skb->protocol = eth_type_trans(skb, netdev); | 1999 | skb->protocol = eth_type_trans(skb, netdev); |
2001 | #ifdef CONFIG_IXGB_NAPI | 2000 | #ifdef CONFIG_IXGB_NAPI |
2002 | if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { | 2001 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { |
2003 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 2002 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
2004 | le16_to_cpu(rx_desc->special)); | 2003 | le16_to_cpu(rx_desc->special)); |
2005 | } else { | 2004 | } else { |
2006 | netif_receive_skb(skb); | 2005 | netif_receive_skb(skb); |
2007 | } | 2006 | } |
2008 | #else /* CONFIG_IXGB_NAPI */ | 2007 | #else /* CONFIG_IXGB_NAPI */ |
2009 | if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { | 2008 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { |
2010 | vlan_hwaccel_rx(skb, adapter->vlgrp, | 2009 | vlan_hwaccel_rx(skb, adapter->vlgrp, |
2011 | le16_to_cpu(rx_desc->special)); | 2010 | le16_to_cpu(rx_desc->special)); |
2012 | } else { | 2011 | } else { |
@@ -2092,7 +2091,7 @@ map_skb: | |||
2092 | rx_desc->status = 0; | 2091 | rx_desc->status = 0; |
2093 | 2092 | ||
2094 | 2093 | ||
2095 | if(++i == rx_ring->count) i = 0; | 2094 | if (++i == rx_ring->count) i = 0; |
2096 | buffer_info = &rx_ring->buffer_info[i]; | 2095 | buffer_info = &rx_ring->buffer_info[i]; |
2097 | } | 2096 | } |
2098 | 2097 | ||
@@ -2125,7 +2124,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
2125 | ixgb_irq_disable(adapter); | 2124 | ixgb_irq_disable(adapter); |
2126 | adapter->vlgrp = grp; | 2125 | adapter->vlgrp = grp; |
2127 | 2126 | ||
2128 | if(grp) { | 2127 | if (grp) { |
2129 | /* enable VLAN tag insert/strip */ | 2128 | /* enable VLAN tag insert/strip */ |
2130 | ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); | 2129 | ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); |
2131 | ctrl |= IXGB_CTRL0_VME; | 2130 | ctrl |= IXGB_CTRL0_VME; |
@@ -2197,10 +2196,10 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) | |||
2197 | { | 2196 | { |
2198 | ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 2197 | ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
2199 | 2198 | ||
2200 | if(adapter->vlgrp) { | 2199 | if (adapter->vlgrp) { |
2201 | u16 vid; | 2200 | u16 vid; |
2202 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 2201 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
2203 | if(!vlan_group_get_device(adapter->vlgrp, vid)) | 2202 | if (!vlan_group_get_device(adapter->vlgrp, vid)) |
2204 | continue; | 2203 | continue; |
2205 | ixgb_vlan_rx_add_vid(adapter->netdev, vid); | 2204 | ixgb_vlan_rx_add_vid(adapter->netdev, vid); |
2206 | } | 2205 | } |
@@ -2238,7 +2237,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | |||
2238 | struct net_device *netdev = pci_get_drvdata(pdev); | 2237 | struct net_device *netdev = pci_get_drvdata(pdev); |
2239 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2238 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
2240 | 2239 | ||
2241 | if(netif_running(netdev)) | 2240 | if (netif_running(netdev)) |
2242 | ixgb_down(adapter, true); | 2241 | ixgb_down(adapter, true); |
2243 | 2242 | ||
2244 | pci_disable_device(pdev); | 2243 | pci_disable_device(pdev); |
@@ -2261,7 +2260,7 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) | |||
2261 | struct net_device *netdev = pci_get_drvdata(pdev); | 2260 | struct net_device *netdev = pci_get_drvdata(pdev); |
2262 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2261 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
2263 | 2262 | ||
2264 | if(pci_enable_device(pdev)) { | 2263 | if (pci_enable_device(pdev)) { |
2265 | DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); | 2264 | DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); |
2266 | return PCI_ERS_RESULT_DISCONNECT; | 2265 | return PCI_ERS_RESULT_DISCONNECT; |
2267 | } | 2266 | } |
@@ -2277,14 +2276,14 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) | |||
2277 | ixgb_reset(adapter); | 2276 | ixgb_reset(adapter); |
2278 | 2277 | ||
2279 | /* Make sure the EEPROM is good */ | 2278 | /* Make sure the EEPROM is good */ |
2280 | if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { | 2279 | if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { |
2281 | DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); | 2280 | DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); |
2282 | return PCI_ERS_RESULT_DISCONNECT; | 2281 | return PCI_ERS_RESULT_DISCONNECT; |
2283 | } | 2282 | } |
2284 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); | 2283 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); |
2285 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | 2284 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); |
2286 | 2285 | ||
2287 | if(!is_valid_ether_addr(netdev->perm_addr)) { | 2286 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
2288 | DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); | 2287 | DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); |
2289 | return PCI_ERS_RESULT_DISCONNECT; | 2288 | return PCI_ERS_RESULT_DISCONNECT; |
2290 | } | 2289 | } |
@@ -2307,8 +2306,8 @@ static void ixgb_io_resume (struct pci_dev *pdev) | |||
2307 | 2306 | ||
2308 | pci_set_master(pdev); | 2307 | pci_set_master(pdev); |
2309 | 2308 | ||
2310 | if(netif_running(netdev)) { | 2309 | if (netif_running(netdev)) { |
2311 | if(ixgb_up(adapter)) { | 2310 | if (ixgb_up(adapter)) { |
2312 | printk ("ixgb: can't bring device back up after reset\n"); | 2311 | printk ("ixgb: can't bring device back up after reset\n"); |
2313 | return; | 2312 | return; |
2314 | } | 2313 | } |