diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2008-07-08 18:52:13 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-07-11 01:20:29 -0400 |
commit | 03f83041d836022a17258c2731f6221f248bedcb (patch) | |
tree | ff1e1aba41622705023e879c920258d3fa6811ad /drivers | |
parent | 7490d71a9245fd59e6cd5732cba4d6b744db581a (diff) |
ixgb: format all if( to be if (
this patch is trivial but because I want to have everything be nice and
tidy I'm updating it.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ixgb/ixgb_ee.c | 8 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ethtool.c | 68 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_hw.c | 13 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 141 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_osdep.h | 2 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_param.c | 30 |
6 files changed, 130 insertions, 132 deletions
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 2f7ed52c7502..ba41d2a54c81 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c | |||
@@ -108,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, | |||
108 | */ | 108 | */ |
109 | eecd_reg &= ~IXGB_EECD_DI; | 109 | eecd_reg &= ~IXGB_EECD_DI; |
110 | 110 | ||
111 | if(data & mask) | 111 | if (data & mask) |
112 | eecd_reg |= IXGB_EECD_DI; | 112 | eecd_reg |= IXGB_EECD_DI; |
113 | 113 | ||
114 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 114 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
@@ -159,7 +159,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw) | |||
159 | eecd_reg = IXGB_READ_REG(hw, EECD); | 159 | eecd_reg = IXGB_READ_REG(hw, EECD); |
160 | 160 | ||
161 | eecd_reg &= ~(IXGB_EECD_DI); | 161 | eecd_reg &= ~(IXGB_EECD_DI); |
162 | if(eecd_reg & IXGB_EECD_DO) | 162 | if (eecd_reg & IXGB_EECD_DO) |
163 | data |= 1; | 163 | data |= 1; |
164 | 164 | ||
165 | ixgb_lower_clock(hw, &eecd_reg); | 165 | ixgb_lower_clock(hw, &eecd_reg); |
@@ -300,7 +300,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw) | |||
300 | for(i = 0; i < 200; i++) { | 300 | for(i = 0; i < 200; i++) { |
301 | eecd_reg = IXGB_READ_REG(hw, EECD); | 301 | eecd_reg = IXGB_READ_REG(hw, EECD); |
302 | 302 | ||
303 | if(eecd_reg & IXGB_EECD_DO) | 303 | if (eecd_reg & IXGB_EECD_DO) |
304 | return (true); | 304 | return (true); |
305 | 305 | ||
306 | udelay(50); | 306 | udelay(50); |
@@ -331,7 +331,7 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) | |||
331 | for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) | 331 | for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) |
332 | checksum += ixgb_read_eeprom(hw, i); | 332 | checksum += ixgb_read_eeprom(hw, i); |
333 | 333 | ||
334 | if(checksum == (u16) EEPROM_SUM) | 334 | if (checksum == (u16) EEPROM_SUM) |
335 | return (true); | 335 | return (true); |
336 | else | 336 | else |
337 | return (false); | 337 | return (false); |
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 8464d8a013b0..7c9b35c677f0 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -95,7 +95,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
95 | ecmd->port = PORT_FIBRE; | 95 | ecmd->port = PORT_FIBRE; |
96 | ecmd->transceiver = XCVR_EXTERNAL; | 96 | ecmd->transceiver = XCVR_EXTERNAL; |
97 | 97 | ||
98 | if(netif_carrier_ok(adapter->netdev)) { | 98 | if (netif_carrier_ok(adapter->netdev)) { |
99 | ecmd->speed = SPEED_10000; | 99 | ecmd->speed = SPEED_10000; |
100 | ecmd->duplex = DUPLEX_FULL; | 100 | ecmd->duplex = DUPLEX_FULL; |
101 | } else { | 101 | } else { |
@@ -122,11 +122,11 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
122 | { | 122 | { |
123 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 123 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
124 | 124 | ||
125 | if(ecmd->autoneg == AUTONEG_ENABLE || | 125 | if (ecmd->autoneg == AUTONEG_ENABLE || |
126 | ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) | 126 | ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) |
127 | return -EINVAL; | 127 | return -EINVAL; |
128 | 128 | ||
129 | if(netif_running(adapter->netdev)) { | 129 | if (netif_running(adapter->netdev)) { |
130 | ixgb_down(adapter, true); | 130 | ixgb_down(adapter, true); |
131 | ixgb_reset(adapter); | 131 | ixgb_reset(adapter); |
132 | ixgb_up(adapter); | 132 | ixgb_up(adapter); |
@@ -146,11 +146,11 @@ ixgb_get_pauseparam(struct net_device *netdev, | |||
146 | 146 | ||
147 | pause->autoneg = AUTONEG_DISABLE; | 147 | pause->autoneg = AUTONEG_DISABLE; |
148 | 148 | ||
149 | if(hw->fc.type == ixgb_fc_rx_pause) | 149 | if (hw->fc.type == ixgb_fc_rx_pause) |
150 | pause->rx_pause = 1; | 150 | pause->rx_pause = 1; |
151 | else if(hw->fc.type == ixgb_fc_tx_pause) | 151 | else if (hw->fc.type == ixgb_fc_tx_pause) |
152 | pause->tx_pause = 1; | 152 | pause->tx_pause = 1; |
153 | else if(hw->fc.type == ixgb_fc_full) { | 153 | else if (hw->fc.type == ixgb_fc_full) { |
154 | pause->rx_pause = 1; | 154 | pause->rx_pause = 1; |
155 | pause->tx_pause = 1; | 155 | pause->tx_pause = 1; |
156 | } | 156 | } |
@@ -163,19 +163,19 @@ ixgb_set_pauseparam(struct net_device *netdev, | |||
163 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 163 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
164 | struct ixgb_hw *hw = &adapter->hw; | 164 | struct ixgb_hw *hw = &adapter->hw; |
165 | 165 | ||
166 | if(pause->autoneg == AUTONEG_ENABLE) | 166 | if (pause->autoneg == AUTONEG_ENABLE) |
167 | return -EINVAL; | 167 | return -EINVAL; |
168 | 168 | ||
169 | if(pause->rx_pause && pause->tx_pause) | 169 | if (pause->rx_pause && pause->tx_pause) |
170 | hw->fc.type = ixgb_fc_full; | 170 | hw->fc.type = ixgb_fc_full; |
171 | else if(pause->rx_pause && !pause->tx_pause) | 171 | else if (pause->rx_pause && !pause->tx_pause) |
172 | hw->fc.type = ixgb_fc_rx_pause; | 172 | hw->fc.type = ixgb_fc_rx_pause; |
173 | else if(!pause->rx_pause && pause->tx_pause) | 173 | else if (!pause->rx_pause && pause->tx_pause) |
174 | hw->fc.type = ixgb_fc_tx_pause; | 174 | hw->fc.type = ixgb_fc_tx_pause; |
175 | else if(!pause->rx_pause && !pause->tx_pause) | 175 | else if (!pause->rx_pause && !pause->tx_pause) |
176 | hw->fc.type = ixgb_fc_none; | 176 | hw->fc.type = ixgb_fc_none; |
177 | 177 | ||
178 | if(netif_running(adapter->netdev)) { | 178 | if (netif_running(adapter->netdev)) { |
179 | ixgb_down(adapter, true); | 179 | ixgb_down(adapter, true); |
180 | ixgb_up(adapter); | 180 | ixgb_up(adapter); |
181 | ixgb_set_speed_duplex(netdev); | 181 | ixgb_set_speed_duplex(netdev); |
@@ -200,7 +200,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data) | |||
200 | 200 | ||
201 | adapter->rx_csum = data; | 201 | adapter->rx_csum = data; |
202 | 202 | ||
203 | if(netif_running(netdev)) { | 203 | if (netif_running(netdev)) { |
204 | ixgb_down(adapter, true); | 204 | ixgb_down(adapter, true); |
205 | ixgb_up(adapter); | 205 | ixgb_up(adapter); |
206 | ixgb_set_speed_duplex(netdev); | 206 | ixgb_set_speed_duplex(netdev); |
@@ -229,7 +229,7 @@ ixgb_set_tx_csum(struct net_device *netdev, u32 data) | |||
229 | static int | 229 | static int |
230 | ixgb_set_tso(struct net_device *netdev, u32 data) | 230 | ixgb_set_tso(struct net_device *netdev, u32 data) |
231 | { | 231 | { |
232 | if(data) | 232 | if (data) |
233 | netdev->features |= NETIF_F_TSO; | 233 | netdev->features |= NETIF_F_TSO; |
234 | else | 234 | else |
235 | netdev->features &= ~NETIF_F_TSO; | 235 | netdev->features &= ~NETIF_F_TSO; |
@@ -415,7 +415,7 @@ ixgb_get_eeprom(struct net_device *netdev, | |||
415 | int i, max_len, first_word, last_word; | 415 | int i, max_len, first_word, last_word; |
416 | int ret_val = 0; | 416 | int ret_val = 0; |
417 | 417 | ||
418 | if(eeprom->len == 0) { | 418 | if (eeprom->len == 0) { |
419 | ret_val = -EINVAL; | 419 | ret_val = -EINVAL; |
420 | goto geeprom_error; | 420 | goto geeprom_error; |
421 | } | 421 | } |
@@ -424,12 +424,12 @@ ixgb_get_eeprom(struct net_device *netdev, | |||
424 | 424 | ||
425 | max_len = ixgb_get_eeprom_len(netdev); | 425 | max_len = ixgb_get_eeprom_len(netdev); |
426 | 426 | ||
427 | if(eeprom->offset > eeprom->offset + eeprom->len) { | 427 | if (eeprom->offset > eeprom->offset + eeprom->len) { |
428 | ret_val = -EINVAL; | 428 | ret_val = -EINVAL; |
429 | goto geeprom_error; | 429 | goto geeprom_error; |
430 | } | 430 | } |
431 | 431 | ||
432 | if((eeprom->offset + eeprom->len) > max_len) | 432 | if ((eeprom->offset + eeprom->len) > max_len) |
433 | eeprom->len = (max_len - eeprom->offset); | 433 | eeprom->len = (max_len - eeprom->offset); |
434 | 434 | ||
435 | first_word = eeprom->offset >> 1; | 435 | first_word = eeprom->offset >> 1; |
@@ -437,7 +437,7 @@ ixgb_get_eeprom(struct net_device *netdev, | |||
437 | 437 | ||
438 | eeprom_buff = kmalloc(sizeof(__le16) * | 438 | eeprom_buff = kmalloc(sizeof(__le16) * |
439 | (last_word - first_word + 1), GFP_KERNEL); | 439 | (last_word - first_word + 1), GFP_KERNEL); |
440 | if(!eeprom_buff) | 440 | if (!eeprom_buff) |
441 | return -ENOMEM; | 441 | return -ENOMEM; |
442 | 442 | ||
443 | /* note the eeprom was good because the driver loaded */ | 443 | /* note the eeprom was good because the driver loaded */ |
@@ -464,35 +464,35 @@ ixgb_set_eeprom(struct net_device *netdev, | |||
464 | int max_len, first_word, last_word; | 464 | int max_len, first_word, last_word; |
465 | u16 i; | 465 | u16 i; |
466 | 466 | ||
467 | if(eeprom->len == 0) | 467 | if (eeprom->len == 0) |
468 | return -EINVAL; | 468 | return -EINVAL; |
469 | 469 | ||
470 | if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) | 470 | if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) |
471 | return -EFAULT; | 471 | return -EFAULT; |
472 | 472 | ||
473 | max_len = ixgb_get_eeprom_len(netdev); | 473 | max_len = ixgb_get_eeprom_len(netdev); |
474 | 474 | ||
475 | if(eeprom->offset > eeprom->offset + eeprom->len) | 475 | if (eeprom->offset > eeprom->offset + eeprom->len) |
476 | return -EINVAL; | 476 | return -EINVAL; |
477 | 477 | ||
478 | if((eeprom->offset + eeprom->len) > max_len) | 478 | if ((eeprom->offset + eeprom->len) > max_len) |
479 | eeprom->len = (max_len - eeprom->offset); | 479 | eeprom->len = (max_len - eeprom->offset); |
480 | 480 | ||
481 | first_word = eeprom->offset >> 1; | 481 | first_word = eeprom->offset >> 1; |
482 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | 482 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; |
483 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); | 483 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); |
484 | if(!eeprom_buff) | 484 | if (!eeprom_buff) |
485 | return -ENOMEM; | 485 | return -ENOMEM; |
486 | 486 | ||
487 | ptr = (void *)eeprom_buff; | 487 | ptr = (void *)eeprom_buff; |
488 | 488 | ||
489 | if(eeprom->offset & 1) { | 489 | if (eeprom->offset & 1) { |
490 | /* need read/modify/write of first changed EEPROM word */ | 490 | /* need read/modify/write of first changed EEPROM word */ |
491 | /* only the second byte of the word is being modified */ | 491 | /* only the second byte of the word is being modified */ |
492 | eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); | 492 | eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); |
493 | ptr++; | 493 | ptr++; |
494 | } | 494 | } |
495 | if((eeprom->offset + eeprom->len) & 1) { | 495 | if ((eeprom->offset + eeprom->len) & 1) { |
496 | /* need read/modify/write of last changed EEPROM word */ | 496 | /* need read/modify/write of last changed EEPROM word */ |
497 | /* only the first byte of the word is being modified */ | 497 | /* only the first byte of the word is being modified */ |
498 | eeprom_buff[last_word - first_word] | 498 | eeprom_buff[last_word - first_word] |
@@ -504,7 +504,7 @@ ixgb_set_eeprom(struct net_device *netdev, | |||
504 | ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); | 504 | ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); |
505 | 505 | ||
506 | /* Update the checksum over the first part of the EEPROM if needed */ | 506 | /* Update the checksum over the first part of the EEPROM if needed */ |
507 | if(first_word <= EEPROM_CHECKSUM_REG) | 507 | if (first_word <= EEPROM_CHECKSUM_REG) |
508 | ixgb_update_eeprom_checksum(hw); | 508 | ixgb_update_eeprom_checksum(hw); |
509 | 509 | ||
510 | kfree(eeprom_buff); | 510 | kfree(eeprom_buff); |
@@ -557,10 +557,10 @@ ixgb_set_ringparam(struct net_device *netdev, | |||
557 | tx_old = adapter->tx_ring; | 557 | tx_old = adapter->tx_ring; |
558 | rx_old = adapter->rx_ring; | 558 | rx_old = adapter->rx_ring; |
559 | 559 | ||
560 | if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 560 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
561 | return -EINVAL; | 561 | return -EINVAL; |
562 | 562 | ||
563 | if(netif_running(adapter->netdev)) | 563 | if (netif_running(adapter->netdev)) |
564 | ixgb_down(adapter, true); | 564 | ixgb_down(adapter, true); |
565 | 565 | ||
566 | rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); | 566 | rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); |
@@ -571,11 +571,11 @@ ixgb_set_ringparam(struct net_device *netdev, | |||
571 | txdr->count = min(txdr->count,(u32)MAX_TXD); | 571 | txdr->count = min(txdr->count,(u32)MAX_TXD); |
572 | txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); | 572 | txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); |
573 | 573 | ||
574 | if(netif_running(adapter->netdev)) { | 574 | if (netif_running(adapter->netdev)) { |
575 | /* Try to get new resources before deleting old */ | 575 | /* Try to get new resources before deleting old */ |
576 | if((err = ixgb_setup_rx_resources(adapter))) | 576 | if ((err = ixgb_setup_rx_resources(adapter))) |
577 | goto err_setup_rx; | 577 | goto err_setup_rx; |
578 | if((err = ixgb_setup_tx_resources(adapter))) | 578 | if ((err = ixgb_setup_tx_resources(adapter))) |
579 | goto err_setup_tx; | 579 | goto err_setup_tx; |
580 | 580 | ||
581 | /* save the new, restore the old in order to free it, | 581 | /* save the new, restore the old in order to free it, |
@@ -589,7 +589,7 @@ ixgb_set_ringparam(struct net_device *netdev, | |||
589 | ixgb_free_tx_resources(adapter); | 589 | ixgb_free_tx_resources(adapter); |
590 | adapter->rx_ring = rx_new; | 590 | adapter->rx_ring = rx_new; |
591 | adapter->tx_ring = tx_new; | 591 | adapter->tx_ring = tx_new; |
592 | if((err = ixgb_up(adapter))) | 592 | if ((err = ixgb_up(adapter))) |
593 | return err; | 593 | return err; |
594 | ixgb_set_speed_duplex(netdev); | 594 | ixgb_set_speed_duplex(netdev); |
595 | } | 595 | } |
@@ -615,7 +615,7 @@ ixgb_led_blink_callback(unsigned long data) | |||
615 | { | 615 | { |
616 | struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; | 616 | struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; |
617 | 617 | ||
618 | if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status)) | 618 | if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status)) |
619 | ixgb_led_off(&adapter->hw); | 619 | ixgb_led_off(&adapter->hw); |
620 | else | 620 | else |
621 | ixgb_led_on(&adapter->hw); | 621 | ixgb_led_on(&adapter->hw); |
@@ -631,7 +631,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data) | |||
631 | if (!data) | 631 | if (!data) |
632 | data = INT_MAX; | 632 | data = INT_MAX; |
633 | 633 | ||
634 | if(!adapter->blink_timer.function) { | 634 | if (!adapter->blink_timer.function) { |
635 | init_timer(&adapter->blink_timer); | 635 | init_timer(&adapter->blink_timer); |
636 | adapter->blink_timer.function = ixgb_led_blink_callback; | 636 | adapter->blink_timer.function = ixgb_led_blink_callback; |
637 | adapter->blink_timer.data = (unsigned long)adapter; | 637 | adapter->blink_timer.data = (unsigned long)adapter; |
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 04d2003e24e1..d023fb59bf15 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c | |||
@@ -125,7 +125,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw) | |||
125 | /* If we are stopped or resetting exit gracefully and wait to be | 125 | /* If we are stopped or resetting exit gracefully and wait to be |
126 | * started again before accessing the hardware. | 126 | * started again before accessing the hardware. |
127 | */ | 127 | */ |
128 | if(hw->adapter_stopped) { | 128 | if (hw->adapter_stopped) { |
129 | DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); | 129 | DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); |
130 | return false; | 130 | return false; |
131 | } | 131 | } |
@@ -482,7 +482,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw, | |||
482 | /* Place this multicast address in the RAR if there is room, * | 482 | /* Place this multicast address in the RAR if there is room, * |
483 | * else put it in the MTA | 483 | * else put it in the MTA |
484 | */ | 484 | */ |
485 | if(rar_used_count < IXGB_RAR_ENTRIES) { | 485 | if (rar_used_count < IXGB_RAR_ENTRIES) { |
486 | ixgb_rar_set(hw, | 486 | ixgb_rar_set(hw, |
487 | mc_addr_list + | 487 | mc_addr_list + |
488 | (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)), | 488 | (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)), |
@@ -719,9 +719,8 @@ ixgb_setup_fc(struct ixgb_hw *hw) | |||
719 | /* Write the new settings */ | 719 | /* Write the new settings */ |
720 | IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); | 720 | IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); |
721 | 721 | ||
722 | if (pap_reg != 0) { | 722 | if (pap_reg != 0) |
723 | IXGB_WRITE_REG(hw, PAP, pap_reg); | 723 | IXGB_WRITE_REG(hw, PAP, pap_reg); |
724 | } | ||
725 | 724 | ||
726 | /* Set the flow control receive threshold registers. Normally, | 725 | /* Set the flow control receive threshold registers. Normally, |
727 | * these registers will be set to a default threshold that may be | 726 | * these registers will be set to a default threshold that may be |
@@ -729,14 +728,14 @@ ixgb_setup_fc(struct ixgb_hw *hw) | |||
729 | * ability to transmit pause frames in not enabled, then these | 728 | * ability to transmit pause frames in not enabled, then these |
730 | * registers will be set to 0. | 729 | * registers will be set to 0. |
731 | */ | 730 | */ |
732 | if(!(hw->fc.type & ixgb_fc_tx_pause)) { | 731 | if (!(hw->fc.type & ixgb_fc_tx_pause)) { |
733 | IXGB_WRITE_REG(hw, FCRTL, 0); | 732 | IXGB_WRITE_REG(hw, FCRTL, 0); |
734 | IXGB_WRITE_REG(hw, FCRTH, 0); | 733 | IXGB_WRITE_REG(hw, FCRTH, 0); |
735 | } else { | 734 | } else { |
736 | /* We need to set up the Receive Threshold high and low water | 735 | /* We need to set up the Receive Threshold high and low water |
737 | * marks as well as (optionally) enabling the transmission of XON | 736 | * marks as well as (optionally) enabling the transmission of XON |
738 | * frames. */ | 737 | * frames. */ |
739 | if(hw->fc.send_xon) { | 738 | if (hw->fc.send_xon) { |
740 | IXGB_WRITE_REG(hw, FCRTL, | 739 | IXGB_WRITE_REG(hw, FCRTL, |
741 | (hw->fc.low_water | IXGB_FCRTL_XONE)); | 740 | (hw->fc.low_water | IXGB_FCRTL_XONE)); |
742 | } else { | 741 | } else { |
@@ -1007,7 +1006,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw) | |||
1007 | DEBUGFUNC("ixgb_clear_hw_cntrs"); | 1006 | DEBUGFUNC("ixgb_clear_hw_cntrs"); |
1008 | 1007 | ||
1009 | /* if we are stopped or resetting exit gracefully */ | 1008 | /* if we are stopped or resetting exit gracefully */ |
1010 | if(hw->adapter_stopped) { | 1009 | if (hw->adapter_stopped) { |
1011 | DEBUGOUT("Exiting because the adapter is stopped!!!\n"); | 1010 | DEBUGOUT("Exiting because the adapter is stopped!!!\n"); |
1012 | return; | 1011 | return; |
1013 | } | 1012 | } |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index fc2cf0edb7e5..f7dda049dd86 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -250,7 +250,7 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
250 | return err; | 250 | return err; |
251 | } | 251 | } |
252 | 252 | ||
253 | if((hw->max_frame_size != max_frame) || | 253 | if ((hw->max_frame_size != max_frame) || |
254 | (hw->max_frame_size != | 254 | (hw->max_frame_size != |
255 | (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { | 255 | (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { |
256 | 256 | ||
@@ -258,11 +258,11 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
258 | 258 | ||
259 | IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); | 259 | IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); |
260 | 260 | ||
261 | if(hw->max_frame_size > | 261 | if (hw->max_frame_size > |
262 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { | 262 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { |
263 | u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); | 263 | u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); |
264 | 264 | ||
265 | if(!(ctrl0 & IXGB_CTRL0_JFE)) { | 265 | if (!(ctrl0 & IXGB_CTRL0_JFE)) { |
266 | ctrl0 |= IXGB_CTRL0_JFE; | 266 | ctrl0 |= IXGB_CTRL0_JFE; |
267 | IXGB_WRITE_REG(hw, CTRL0, ctrl0); | 267 | IXGB_WRITE_REG(hw, CTRL0, ctrl0); |
268 | } | 268 | } |
@@ -299,7 +299,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) | |||
299 | if (adapter->have_msi) | 299 | if (adapter->have_msi) |
300 | pci_disable_msi(adapter->pdev); | 300 | pci_disable_msi(adapter->pdev); |
301 | 301 | ||
302 | if(kill_watchdog) | 302 | if (kill_watchdog) |
303 | del_timer_sync(&adapter->watchdog_timer); | 303 | del_timer_sync(&adapter->watchdog_timer); |
304 | 304 | ||
305 | adapter->link_speed = 0; | 305 | adapter->link_speed = 0; |
@@ -356,14 +356,14 @@ ixgb_probe(struct pci_dev *pdev, | |||
356 | int i; | 356 | int i; |
357 | int err; | 357 | int err; |
358 | 358 | ||
359 | if((err = pci_enable_device(pdev))) | 359 | if ((err = pci_enable_device(pdev))) |
360 | return err; | 360 | return err; |
361 | 361 | ||
362 | if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && | 362 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && |
363 | !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { | 363 | !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { |
364 | pci_using_dac = 1; | 364 | pci_using_dac = 1; |
365 | } else { | 365 | } else { |
366 | if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || | 366 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || |
367 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { | 367 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { |
368 | printk(KERN_ERR | 368 | printk(KERN_ERR |
369 | "ixgb: No usable DMA configuration, aborting\n"); | 369 | "ixgb: No usable DMA configuration, aborting\n"); |
@@ -372,13 +372,13 @@ ixgb_probe(struct pci_dev *pdev, | |||
372 | pci_using_dac = 0; | 372 | pci_using_dac = 0; |
373 | } | 373 | } |
374 | 374 | ||
375 | if((err = pci_request_regions(pdev, ixgb_driver_name))) | 375 | if ((err = pci_request_regions(pdev, ixgb_driver_name))) |
376 | goto err_request_regions; | 376 | goto err_request_regions; |
377 | 377 | ||
378 | pci_set_master(pdev); | 378 | pci_set_master(pdev); |
379 | 379 | ||
380 | netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); | 380 | netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); |
381 | if(!netdev) { | 381 | if (!netdev) { |
382 | err = -ENOMEM; | 382 | err = -ENOMEM; |
383 | goto err_alloc_etherdev; | 383 | goto err_alloc_etherdev; |
384 | } | 384 | } |
@@ -400,9 +400,9 @@ ixgb_probe(struct pci_dev *pdev, | |||
400 | } | 400 | } |
401 | 401 | ||
402 | for(i = BAR_1; i <= BAR_5; i++) { | 402 | for(i = BAR_1; i <= BAR_5; i++) { |
403 | if(pci_resource_len(pdev, i) == 0) | 403 | if (pci_resource_len(pdev, i) == 0) |
404 | continue; | 404 | continue; |
405 | if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { | 405 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
406 | adapter->hw.io_base = pci_resource_start(pdev, i); | 406 | adapter->hw.io_base = pci_resource_start(pdev, i); |
407 | break; | 407 | break; |
408 | } | 408 | } |
@@ -436,7 +436,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
436 | 436 | ||
437 | /* setup the private structure */ | 437 | /* setup the private structure */ |
438 | 438 | ||
439 | if((err = ixgb_sw_init(adapter))) | 439 | if ((err = ixgb_sw_init(adapter))) |
440 | goto err_sw_init; | 440 | goto err_sw_init; |
441 | 441 | ||
442 | netdev->features = NETIF_F_SG | | 442 | netdev->features = NETIF_F_SG | |
@@ -446,12 +446,12 @@ ixgb_probe(struct pci_dev *pdev, | |||
446 | NETIF_F_HW_VLAN_FILTER; | 446 | NETIF_F_HW_VLAN_FILTER; |
447 | netdev->features |= NETIF_F_TSO; | 447 | netdev->features |= NETIF_F_TSO; |
448 | 448 | ||
449 | if(pci_using_dac) | 449 | if (pci_using_dac) |
450 | netdev->features |= NETIF_F_HIGHDMA; | 450 | netdev->features |= NETIF_F_HIGHDMA; |
451 | 451 | ||
452 | /* make sure the EEPROM is good */ | 452 | /* make sure the EEPROM is good */ |
453 | 453 | ||
454 | if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { | 454 | if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { |
455 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); | 455 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
456 | err = -EIO; | 456 | err = -EIO; |
457 | goto err_eeprom; | 457 | goto err_eeprom; |
@@ -460,7 +460,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
460 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); | 460 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); |
461 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | 461 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); |
462 | 462 | ||
463 | if(!is_valid_ether_addr(netdev->perm_addr)) { | 463 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
464 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | 464 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); |
465 | err = -EIO; | 465 | err = -EIO; |
466 | goto err_eeprom; | 466 | goto err_eeprom; |
@@ -475,7 +475,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
475 | INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); | 475 | INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); |
476 | 476 | ||
477 | strcpy(netdev->name, "eth%d"); | 477 | strcpy(netdev->name, "eth%d"); |
478 | if((err = register_netdev(netdev))) | 478 | if ((err = register_netdev(netdev))) |
479 | goto err_register; | 479 | goto err_register; |
480 | 480 | ||
481 | /* we're going to reset, so assume we have no link for now */ | 481 | /* we're going to reset, so assume we have no link for now */ |
@@ -558,7 +558,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter) | |||
558 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | 558 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; |
559 | adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ | 559 | adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ |
560 | 560 | ||
561 | if((hw->device_id == IXGB_DEVICE_ID_82597EX) | 561 | if ((hw->device_id == IXGB_DEVICE_ID_82597EX) |
562 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) | 562 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) |
563 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) | 563 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) |
564 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) | 564 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) |
@@ -596,15 +596,15 @@ ixgb_open(struct net_device *netdev) | |||
596 | 596 | ||
597 | /* allocate transmit descriptors */ | 597 | /* allocate transmit descriptors */ |
598 | 598 | ||
599 | if((err = ixgb_setup_tx_resources(adapter))) | 599 | if ((err = ixgb_setup_tx_resources(adapter))) |
600 | goto err_setup_tx; | 600 | goto err_setup_tx; |
601 | 601 | ||
602 | /* allocate receive descriptors */ | 602 | /* allocate receive descriptors */ |
603 | 603 | ||
604 | if((err = ixgb_setup_rx_resources(adapter))) | 604 | if ((err = ixgb_setup_rx_resources(adapter))) |
605 | goto err_setup_rx; | 605 | goto err_setup_rx; |
606 | 606 | ||
607 | if((err = ixgb_up(adapter))) | 607 | if ((err = ixgb_up(adapter))) |
608 | goto err_up; | 608 | goto err_up; |
609 | 609 | ||
610 | return 0; | 610 | return 0; |
@@ -660,7 +660,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
660 | 660 | ||
661 | size = sizeof(struct ixgb_buffer) * txdr->count; | 661 | size = sizeof(struct ixgb_buffer) * txdr->count; |
662 | txdr->buffer_info = vmalloc(size); | 662 | txdr->buffer_info = vmalloc(size); |
663 | if(!txdr->buffer_info) { | 663 | if (!txdr->buffer_info) { |
664 | DPRINTK(PROBE, ERR, | 664 | DPRINTK(PROBE, ERR, |
665 | "Unable to allocate transmit descriptor ring memory\n"); | 665 | "Unable to allocate transmit descriptor ring memory\n"); |
666 | return -ENOMEM; | 666 | return -ENOMEM; |
@@ -673,7 +673,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
673 | txdr->size = ALIGN(txdr->size, 4096); | 673 | txdr->size = ALIGN(txdr->size, 4096); |
674 | 674 | ||
675 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 675 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
676 | if(!txdr->desc) { | 676 | if (!txdr->desc) { |
677 | vfree(txdr->buffer_info); | 677 | vfree(txdr->buffer_info); |
678 | DPRINTK(PROBE, ERR, | 678 | DPRINTK(PROBE, ERR, |
679 | "Unable to allocate transmit descriptor memory\n"); | 679 | "Unable to allocate transmit descriptor memory\n"); |
@@ -749,7 +749,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
749 | 749 | ||
750 | size = sizeof(struct ixgb_buffer) * rxdr->count; | 750 | size = sizeof(struct ixgb_buffer) * rxdr->count; |
751 | rxdr->buffer_info = vmalloc(size); | 751 | rxdr->buffer_info = vmalloc(size); |
752 | if(!rxdr->buffer_info) { | 752 | if (!rxdr->buffer_info) { |
753 | DPRINTK(PROBE, ERR, | 753 | DPRINTK(PROBE, ERR, |
754 | "Unable to allocate receive descriptor ring\n"); | 754 | "Unable to allocate receive descriptor ring\n"); |
755 | return -ENOMEM; | 755 | return -ENOMEM; |
@@ -763,7 +763,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
763 | 763 | ||
764 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); | 764 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); |
765 | 765 | ||
766 | if(!rxdr->desc) { | 766 | if (!rxdr->desc) { |
767 | vfree(rxdr->buffer_info); | 767 | vfree(rxdr->buffer_info); |
768 | DPRINTK(PROBE, ERR, | 768 | DPRINTK(PROBE, ERR, |
769 | "Unable to allocate receive descriptors\n"); | 769 | "Unable to allocate receive descriptors\n"); |
@@ -984,7 +984,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter) | |||
984 | 984 | ||
985 | for(i = 0; i < rx_ring->count; i++) { | 985 | for(i = 0; i < rx_ring->count; i++) { |
986 | buffer_info = &rx_ring->buffer_info[i]; | 986 | buffer_info = &rx_ring->buffer_info[i]; |
987 | if(buffer_info->skb) { | 987 | if (buffer_info->skb) { |
988 | 988 | ||
989 | pci_unmap_single(pdev, | 989 | pci_unmap_single(pdev, |
990 | buffer_info->dma, | 990 | buffer_info->dma, |
@@ -1025,7 +1025,7 @@ ixgb_set_mac(struct net_device *netdev, void *p) | |||
1025 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1025 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
1026 | struct sockaddr *addr = p; | 1026 | struct sockaddr *addr = p; |
1027 | 1027 | ||
1028 | if(!is_valid_ether_addr(addr->sa_data)) | 1028 | if (!is_valid_ether_addr(addr->sa_data)) |
1029 | return -EADDRNOTAVAIL; | 1029 | return -EADDRNOTAVAIL; |
1030 | 1030 | ||
1031 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 1031 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
@@ -1058,16 +1058,16 @@ ixgb_set_multi(struct net_device *netdev) | |||
1058 | 1058 | ||
1059 | rctl = IXGB_READ_REG(hw, RCTL); | 1059 | rctl = IXGB_READ_REG(hw, RCTL); |
1060 | 1060 | ||
1061 | if(netdev->flags & IFF_PROMISC) { | 1061 | if (netdev->flags & IFF_PROMISC) { |
1062 | rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); | 1062 | rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); |
1063 | } else if(netdev->flags & IFF_ALLMULTI) { | 1063 | } else if (netdev->flags & IFF_ALLMULTI) { |
1064 | rctl |= IXGB_RCTL_MPE; | 1064 | rctl |= IXGB_RCTL_MPE; |
1065 | rctl &= ~IXGB_RCTL_UPE; | 1065 | rctl &= ~IXGB_RCTL_UPE; |
1066 | } else { | 1066 | } else { |
1067 | rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); | 1067 | rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { | 1070 | if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { |
1071 | rctl |= IXGB_RCTL_MPE; | 1071 | rctl |= IXGB_RCTL_MPE; |
1072 | IXGB_WRITE_REG(hw, RCTL, rctl); | 1072 | IXGB_WRITE_REG(hw, RCTL, rctl); |
1073 | } else { | 1073 | } else { |
@@ -1104,8 +1104,8 @@ ixgb_watchdog(unsigned long data) | |||
1104 | netif_stop_queue(netdev); | 1104 | netif_stop_queue(netdev); |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | if(adapter->hw.link_up) { | 1107 | if (adapter->hw.link_up) { |
1108 | if(!netif_carrier_ok(netdev)) { | 1108 | if (!netif_carrier_ok(netdev)) { |
1109 | DPRINTK(LINK, INFO, | 1109 | DPRINTK(LINK, INFO, |
1110 | "NIC Link is Up 10000 Mbps Full Duplex\n"); | 1110 | "NIC Link is Up 10000 Mbps Full Duplex\n"); |
1111 | adapter->link_speed = 10000; | 1111 | adapter->link_speed = 10000; |
@@ -1114,7 +1114,7 @@ ixgb_watchdog(unsigned long data) | |||
1114 | netif_wake_queue(netdev); | 1114 | netif_wake_queue(netdev); |
1115 | } | 1115 | } |
1116 | } else { | 1116 | } else { |
1117 | if(netif_carrier_ok(netdev)) { | 1117 | if (netif_carrier_ok(netdev)) { |
1118 | adapter->link_speed = 0; | 1118 | adapter->link_speed = 0; |
1119 | adapter->link_duplex = 0; | 1119 | adapter->link_duplex = 0; |
1120 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); | 1120 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); |
@@ -1126,8 +1126,8 @@ ixgb_watchdog(unsigned long data) | |||
1126 | 1126 | ||
1127 | ixgb_update_stats(adapter); | 1127 | ixgb_update_stats(adapter); |
1128 | 1128 | ||
1129 | if(!netif_carrier_ok(netdev)) { | 1129 | if (!netif_carrier_ok(netdev)) { |
1130 | if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { | 1130 | if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { |
1131 | /* We've lost link, so the controller stops DMA, | 1131 | /* We've lost link, so the controller stops DMA, |
1132 | * but we've got queued Tx work that's never going | 1132 | * but we've got queued Tx work that's never going |
1133 | * to get done, so reset controller to flush Tx. | 1133 | * to get done, so reset controller to flush Tx. |
@@ -1207,7 +1207,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1207 | | (skb->len - (hdr_len))); | 1207 | | (skb->len - (hdr_len))); |
1208 | 1208 | ||
1209 | 1209 | ||
1210 | if(++i == adapter->tx_ring.count) i = 0; | 1210 | if (++i == adapter->tx_ring.count) i = 0; |
1211 | adapter->tx_ring.next_to_use = i; | 1211 | adapter->tx_ring.next_to_use = i; |
1212 | 1212 | ||
1213 | return 1; | 1213 | return 1; |
@@ -1223,7 +1223,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1223 | unsigned int i; | 1223 | unsigned int i; |
1224 | u8 css, cso; | 1224 | u8 css, cso; |
1225 | 1225 | ||
1226 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 1226 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1227 | struct ixgb_buffer *buffer_info; | 1227 | struct ixgb_buffer *buffer_info; |
1228 | css = skb_transport_offset(skb); | 1228 | css = skb_transport_offset(skb); |
1229 | cso = css + skb->csum_offset; | 1229 | cso = css + skb->csum_offset; |
@@ -1245,7 +1245,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1245 | cpu_to_le32(IXGB_CONTEXT_DESC_TYPE | 1245 | cpu_to_le32(IXGB_CONTEXT_DESC_TYPE |
1246 | | IXGB_TX_DESC_CMD_IDE); | 1246 | | IXGB_TX_DESC_CMD_IDE); |
1247 | 1247 | ||
1248 | if(++i == adapter->tx_ring.count) i = 0; | 1248 | if (++i == adapter->tx_ring.count) i = 0; |
1249 | adapter->tx_ring.next_to_use = i; | 1249 | adapter->tx_ring.next_to_use = i; |
1250 | 1250 | ||
1251 | return true; | 1251 | return true; |
@@ -1295,7 +1295,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1295 | len -= size; | 1295 | len -= size; |
1296 | offset += size; | 1296 | offset += size; |
1297 | count++; | 1297 | count++; |
1298 | if(++i == tx_ring->count) i = 0; | 1298 | if (++i == tx_ring->count) i = 0; |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | for(f = 0; f < nr_frags; f++) { | 1301 | for(f = 0; f < nr_frags; f++) { |
@@ -1328,7 +1328,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1328 | len -= size; | 1328 | len -= size; |
1329 | offset += size; | 1329 | offset += size; |
1330 | count++; | 1330 | count++; |
1331 | if(++i == tx_ring->count) i = 0; | 1331 | if (++i == tx_ring->count) i = 0; |
1332 | } | 1332 | } |
1333 | } | 1333 | } |
1334 | i = (i == 0) ? tx_ring->count - 1 : i - 1; | 1334 | i = (i == 0) ? tx_ring->count - 1 : i - 1; |
@@ -1349,17 +1349,16 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1349 | u8 popts = 0; | 1349 | u8 popts = 0; |
1350 | unsigned int i; | 1350 | unsigned int i; |
1351 | 1351 | ||
1352 | if(tx_flags & IXGB_TX_FLAGS_TSO) { | 1352 | if (tx_flags & IXGB_TX_FLAGS_TSO) { |
1353 | cmd_type_len |= IXGB_TX_DESC_CMD_TSE; | 1353 | cmd_type_len |= IXGB_TX_DESC_CMD_TSE; |
1354 | popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); | 1354 | popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); |
1355 | } | 1355 | } |
1356 | 1356 | ||
1357 | if(tx_flags & IXGB_TX_FLAGS_CSUM) | 1357 | if (tx_flags & IXGB_TX_FLAGS_CSUM) |
1358 | popts |= IXGB_TX_DESC_POPTS_TXSM; | 1358 | popts |= IXGB_TX_DESC_POPTS_TXSM; |
1359 | 1359 | ||
1360 | if(tx_flags & IXGB_TX_FLAGS_VLAN) { | 1360 | if (tx_flags & IXGB_TX_FLAGS_VLAN) |
1361 | cmd_type_len |= IXGB_TX_DESC_CMD_VLE; | 1361 | cmd_type_len |= IXGB_TX_DESC_CMD_VLE; |
1362 | } | ||
1363 | 1362 | ||
1364 | i = tx_ring->next_to_use; | 1363 | i = tx_ring->next_to_use; |
1365 | 1364 | ||
@@ -1373,7 +1372,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1373 | tx_desc->popts = popts; | 1372 | tx_desc->popts = popts; |
1374 | tx_desc->vlan = cpu_to_le16(vlan_id); | 1373 | tx_desc->vlan = cpu_to_le16(vlan_id); |
1375 | 1374 | ||
1376 | if(++i == tx_ring->count) i = 0; | 1375 | if (++i == tx_ring->count) i = 0; |
1377 | } | 1376 | } |
1378 | 1377 | ||
1379 | tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP | 1378 | tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP |
@@ -1441,7 +1440,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1441 | return NETDEV_TX_OK; | 1440 | return NETDEV_TX_OK; |
1442 | } | 1441 | } |
1443 | 1442 | ||
1444 | if(skb->len <= 0) { | 1443 | if (skb->len <= 0) { |
1445 | dev_kfree_skb_any(skb); | 1444 | dev_kfree_skb_any(skb); |
1446 | return 0; | 1445 | return 0; |
1447 | } | 1446 | } |
@@ -1450,7 +1449,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1450 | DESC_NEEDED))) | 1449 | DESC_NEEDED))) |
1451 | return NETDEV_TX_BUSY; | 1450 | return NETDEV_TX_BUSY; |
1452 | 1451 | ||
1453 | if(adapter->vlgrp && vlan_tx_tag_present(skb)) { | 1452 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { |
1454 | tx_flags |= IXGB_TX_FLAGS_VLAN; | 1453 | tx_flags |= IXGB_TX_FLAGS_VLAN; |
1455 | vlan_id = vlan_tx_tag_get(skb); | 1454 | vlan_id = vlan_tx_tag_get(skb); |
1456 | } | 1455 | } |
@@ -1465,7 +1464,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1465 | 1464 | ||
1466 | if (likely(tso)) | 1465 | if (likely(tso)) |
1467 | tx_flags |= IXGB_TX_FLAGS_TSO; | 1466 | tx_flags |= IXGB_TX_FLAGS_TSO; |
1468 | else if(ixgb_tx_csum(adapter, skb)) | 1467 | else if (ixgb_tx_csum(adapter, skb)) |
1469 | tx_flags |= IXGB_TX_FLAGS_CSUM; | 1468 | tx_flags |= IXGB_TX_FLAGS_CSUM; |
1470 | 1469 | ||
1471 | ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, | 1470 | ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, |
@@ -1573,7 +1572,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter) | |||
1573 | if (pci_channel_offline(pdev)) | 1572 | if (pci_channel_offline(pdev)) |
1574 | return; | 1573 | return; |
1575 | 1574 | ||
1576 | if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || | 1575 | if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || |
1577 | (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { | 1576 | (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { |
1578 | u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); | 1577 | u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); |
1579 | u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); | 1578 | u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); |
@@ -1582,7 +1581,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter) | |||
1582 | 1581 | ||
1583 | multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); | 1582 | multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); |
1584 | /* fix up multicast stats by removing broadcasts */ | 1583 | /* fix up multicast stats by removing broadcasts */ |
1585 | if(multi >= bcast) | 1584 | if (multi >= bcast) |
1586 | multi -= bcast; | 1585 | multi -= bcast; |
1587 | 1586 | ||
1588 | adapter->stats.mprcl += (multi & 0xFFFFFFFF); | 1587 | adapter->stats.mprcl += (multi & 0xFFFFFFFF); |
@@ -1706,7 +1705,7 @@ ixgb_intr(int irq, void *data) | |||
1706 | unsigned int i; | 1705 | unsigned int i; |
1707 | #endif | 1706 | #endif |
1708 | 1707 | ||
1709 | if(unlikely(!icr)) | 1708 | if (unlikely(!icr)) |
1710 | return IRQ_NONE; /* Not our interrupt */ | 1709 | return IRQ_NONE; /* Not our interrupt */ |
1711 | 1710 | ||
1712 | if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) | 1711 | if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) |
@@ -1729,7 +1728,7 @@ ixgb_intr(int irq, void *data) | |||
1729 | * transmit queues for completed descriptors, intended to | 1728 | * transmit queues for completed descriptors, intended to |
1730 | * avoid starvation issues and assist tx/rx fairness. */ | 1729 | * avoid starvation issues and assist tx/rx fairness. */ |
1731 | for(i = 0; i < IXGB_MAX_INTR; i++) | 1730 | for(i = 0; i < IXGB_MAX_INTR; i++) |
1732 | if(!ixgb_clean_rx_irq(adapter) & | 1731 | if (!ixgb_clean_rx_irq(adapter) & |
1733 | !ixgb_clean_tx_irq(adapter)) | 1732 | !ixgb_clean_tx_irq(adapter)) |
1734 | break; | 1733 | break; |
1735 | #endif | 1734 | #endif |
@@ -1798,7 +1797,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1798 | *(u32 *)&(tx_desc->status) = 0; | 1797 | *(u32 *)&(tx_desc->status) = 0; |
1799 | 1798 | ||
1800 | cleaned = (i == eop); | 1799 | cleaned = (i == eop); |
1801 | if(++i == tx_ring->count) i = 0; | 1800 | if (++i == tx_ring->count) i = 0; |
1802 | } | 1801 | } |
1803 | 1802 | ||
1804 | eop = tx_ring->buffer_info[i].next_to_watch; | 1803 | eop = tx_ring->buffer_info[i].next_to_watch; |
@@ -1820,7 +1819,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1820 | } | 1819 | } |
1821 | } | 1820 | } |
1822 | 1821 | ||
1823 | if(adapter->detect_tx_hung) { | 1822 | if (adapter->detect_tx_hung) { |
1824 | /* detect a transmit hang in hardware, this serializes the | 1823 | /* detect a transmit hang in hardware, this serializes the |
1825 | * check with the clearing of time_stamp and movement of i */ | 1824 | * check with the clearing of time_stamp and movement of i */ |
1826 | adapter->detect_tx_hung = false; | 1825 | adapter->detect_tx_hung = false; |
@@ -1869,7 +1868,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, | |||
1869 | /* Ignore Checksum bit is set OR | 1868 | /* Ignore Checksum bit is set OR |
1870 | * TCP Checksum has not been calculated | 1869 | * TCP Checksum has not been calculated |
1871 | */ | 1870 | */ |
1872 | if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || | 1871 | if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || |
1873 | (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { | 1872 | (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { |
1874 | skb->ip_summed = CHECKSUM_NONE; | 1873 | skb->ip_summed = CHECKSUM_NONE; |
1875 | return; | 1874 | return; |
@@ -1877,7 +1876,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, | |||
1877 | 1876 | ||
1878 | /* At this point we know the hardware did the TCP checksum */ | 1877 | /* At this point we know the hardware did the TCP checksum */ |
1879 | /* now look at the TCP checksum error bit */ | 1878 | /* now look at the TCP checksum error bit */ |
1880 | if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { | 1879 | if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { |
1881 | /* let the stack verify checksum errors */ | 1880 | /* let the stack verify checksum errors */ |
1882 | skb->ip_summed = CHECKSUM_NONE; | 1881 | skb->ip_summed = CHECKSUM_NONE; |
1883 | adapter->hw_csum_rx_error++; | 1882 | adapter->hw_csum_rx_error++; |
@@ -1918,7 +1917,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1918 | u8 status; | 1917 | u8 status; |
1919 | 1918 | ||
1920 | #ifdef CONFIG_IXGB_NAPI | 1919 | #ifdef CONFIG_IXGB_NAPI |
1921 | if(*work_done >= work_to_do) | 1920 | if (*work_done >= work_to_do) |
1922 | break; | 1921 | break; |
1923 | 1922 | ||
1924 | (*work_done)++; | 1923 | (*work_done)++; |
@@ -1929,11 +1928,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1929 | 1928 | ||
1930 | prefetch(skb->data); | 1929 | prefetch(skb->data); |
1931 | 1930 | ||
1932 | if(++i == rx_ring->count) i = 0; | 1931 | if (++i == rx_ring->count) i = 0; |
1933 | next_rxd = IXGB_RX_DESC(*rx_ring, i); | 1932 | next_rxd = IXGB_RX_DESC(*rx_ring, i); |
1934 | prefetch(next_rxd); | 1933 | prefetch(next_rxd); |
1935 | 1934 | ||
1936 | if((j = i + 1) == rx_ring->count) j = 0; | 1935 | if ((j = i + 1) == rx_ring->count) j = 0; |
1937 | next2_buffer = &rx_ring->buffer_info[j]; | 1936 | next2_buffer = &rx_ring->buffer_info[j]; |
1938 | prefetch(next2_buffer); | 1937 | prefetch(next2_buffer); |
1939 | 1938 | ||
@@ -1950,7 +1949,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1950 | 1949 | ||
1951 | length = le16_to_cpu(rx_desc->length); | 1950 | length = le16_to_cpu(rx_desc->length); |
1952 | 1951 | ||
1953 | if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { | 1952 | if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { |
1954 | 1953 | ||
1955 | /* All receives must fit into a single buffer */ | 1954 | /* All receives must fit into a single buffer */ |
1956 | 1955 | ||
@@ -1999,14 +1998,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1999 | 1998 | ||
2000 | skb->protocol = eth_type_trans(skb, netdev); | 1999 | skb->protocol = eth_type_trans(skb, netdev); |
2001 | #ifdef CONFIG_IXGB_NAPI | 2000 | #ifdef CONFIG_IXGB_NAPI |
2002 | if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { | 2001 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { |
2003 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 2002 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
2004 | le16_to_cpu(rx_desc->special)); | 2003 | le16_to_cpu(rx_desc->special)); |
2005 | } else { | 2004 | } else { |
2006 | netif_receive_skb(skb); | 2005 | netif_receive_skb(skb); |
2007 | } | 2006 | } |
2008 | #else /* CONFIG_IXGB_NAPI */ | 2007 | #else /* CONFIG_IXGB_NAPI */ |
2009 | if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { | 2008 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { |
2010 | vlan_hwaccel_rx(skb, adapter->vlgrp, | 2009 | vlan_hwaccel_rx(skb, adapter->vlgrp, |
2011 | le16_to_cpu(rx_desc->special)); | 2010 | le16_to_cpu(rx_desc->special)); |
2012 | } else { | 2011 | } else { |
@@ -2092,7 +2091,7 @@ map_skb: | |||
2092 | rx_desc->status = 0; | 2091 | rx_desc->status = 0; |
2093 | 2092 | ||
2094 | 2093 | ||
2095 | if(++i == rx_ring->count) i = 0; | 2094 | if (++i == rx_ring->count) i = 0; |
2096 | buffer_info = &rx_ring->buffer_info[i]; | 2095 | buffer_info = &rx_ring->buffer_info[i]; |
2097 | } | 2096 | } |
2098 | 2097 | ||
@@ -2125,7 +2124,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
2125 | ixgb_irq_disable(adapter); | 2124 | ixgb_irq_disable(adapter); |
2126 | adapter->vlgrp = grp; | 2125 | adapter->vlgrp = grp; |
2127 | 2126 | ||
2128 | if(grp) { | 2127 | if (grp) { |
2129 | /* enable VLAN tag insert/strip */ | 2128 | /* enable VLAN tag insert/strip */ |
2130 | ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); | 2129 | ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); |
2131 | ctrl |= IXGB_CTRL0_VME; | 2130 | ctrl |= IXGB_CTRL0_VME; |
@@ -2197,10 +2196,10 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) | |||
2197 | { | 2196 | { |
2198 | ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 2197 | ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
2199 | 2198 | ||
2200 | if(adapter->vlgrp) { | 2199 | if (adapter->vlgrp) { |
2201 | u16 vid; | 2200 | u16 vid; |
2202 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 2201 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
2203 | if(!vlan_group_get_device(adapter->vlgrp, vid)) | 2202 | if (!vlan_group_get_device(adapter->vlgrp, vid)) |
2204 | continue; | 2203 | continue; |
2205 | ixgb_vlan_rx_add_vid(adapter->netdev, vid); | 2204 | ixgb_vlan_rx_add_vid(adapter->netdev, vid); |
2206 | } | 2205 | } |
@@ -2238,7 +2237,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | |||
2238 | struct net_device *netdev = pci_get_drvdata(pdev); | 2237 | struct net_device *netdev = pci_get_drvdata(pdev); |
2239 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2238 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
2240 | 2239 | ||
2241 | if(netif_running(netdev)) | 2240 | if (netif_running(netdev)) |
2242 | ixgb_down(adapter, true); | 2241 | ixgb_down(adapter, true); |
2243 | 2242 | ||
2244 | pci_disable_device(pdev); | 2243 | pci_disable_device(pdev); |
@@ -2261,7 +2260,7 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) | |||
2261 | struct net_device *netdev = pci_get_drvdata(pdev); | 2260 | struct net_device *netdev = pci_get_drvdata(pdev); |
2262 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2261 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
2263 | 2262 | ||
2264 | if(pci_enable_device(pdev)) { | 2263 | if (pci_enable_device(pdev)) { |
2265 | DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); | 2264 | DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); |
2266 | return PCI_ERS_RESULT_DISCONNECT; | 2265 | return PCI_ERS_RESULT_DISCONNECT; |
2267 | } | 2266 | } |
@@ -2277,14 +2276,14 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) | |||
2277 | ixgb_reset(adapter); | 2276 | ixgb_reset(adapter); |
2278 | 2277 | ||
2279 | /* Make sure the EEPROM is good */ | 2278 | /* Make sure the EEPROM is good */ |
2280 | if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { | 2279 | if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { |
2281 | DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); | 2280 | DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); |
2282 | return PCI_ERS_RESULT_DISCONNECT; | 2281 | return PCI_ERS_RESULT_DISCONNECT; |
2283 | } | 2282 | } |
2284 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); | 2283 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); |
2285 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | 2284 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); |
2286 | 2285 | ||
2287 | if(!is_valid_ether_addr(netdev->perm_addr)) { | 2286 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
2288 | DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); | 2287 | DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); |
2289 | return PCI_ERS_RESULT_DISCONNECT; | 2288 | return PCI_ERS_RESULT_DISCONNECT; |
2290 | } | 2289 | } |
@@ -2307,8 +2306,8 @@ static void ixgb_io_resume (struct pci_dev *pdev) | |||
2307 | 2306 | ||
2308 | pci_set_master(pdev); | 2307 | pci_set_master(pdev); |
2309 | 2308 | ||
2310 | if(netif_running(netdev)) { | 2309 | if (netif_running(netdev)) { |
2311 | if(ixgb_up(adapter)) { | 2310 | if (ixgb_up(adapter)) { |
2312 | printk ("ixgb: can't bring device back up after reset\n"); | 2311 | printk ("ixgb: can't bring device back up after reset\n"); |
2313 | return; | 2312 | return; |
2314 | } | 2313 | } |
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h index 4be1b273e1b8..fb74bb122e69 100644 --- a/drivers/net/ixgb/ixgb_osdep.h +++ b/drivers/net/ixgb/ixgb_osdep.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <linux/sched.h> | 40 | #include <linux/sched.h> |
41 | 41 | ||
42 | #undef ASSERT | 42 | #undef ASSERT |
43 | #define ASSERT(x) if(!(x)) BUG() | 43 | #define ASSERT(x) if (!(x)) BUG() |
44 | #define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) | 44 | #define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) |
45 | 45 | ||
46 | #ifdef DBG | 46 | #ifdef DBG |
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index 865d14d6e5a7..a23d2ffc4b7c 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c | |||
@@ -200,7 +200,7 @@ struct ixgb_option { | |||
200 | static int __devinit | 200 | static int __devinit |
201 | ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) | 201 | ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) |
202 | { | 202 | { |
203 | if(*value == OPTION_UNSET) { | 203 | if (*value == OPTION_UNSET) { |
204 | *value = opt->def; | 204 | *value = opt->def; |
205 | return 0; | 205 | return 0; |
206 | } | 206 | } |
@@ -217,7 +217,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) | |||
217 | } | 217 | } |
218 | break; | 218 | break; |
219 | case range_option: | 219 | case range_option: |
220 | if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | 220 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { |
221 | printk(KERN_INFO "%s set to %i\n", opt->name, *value); | 221 | printk(KERN_INFO "%s set to %i\n", opt->name, *value); |
222 | return 0; | 222 | return 0; |
223 | } | 223 | } |
@@ -228,8 +228,8 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) | |||
228 | 228 | ||
229 | for(i = 0; i < opt->arg.l.nr; i++) { | 229 | for(i = 0; i < opt->arg.l.nr; i++) { |
230 | ent = &opt->arg.l.p[i]; | 230 | ent = &opt->arg.l.p[i]; |
231 | if(*value == ent->i) { | 231 | if (*value == ent->i) { |
232 | if(ent->str[0] != '\0') | 232 | if (ent->str[0] != '\0') |
233 | printk(KERN_INFO "%s\n", ent->str); | 233 | printk(KERN_INFO "%s\n", ent->str); |
234 | return 0; | 234 | return 0; |
235 | } | 235 | } |
@@ -260,7 +260,7 @@ void __devinit | |||
260 | ixgb_check_options(struct ixgb_adapter *adapter) | 260 | ixgb_check_options(struct ixgb_adapter *adapter) |
261 | { | 261 | { |
262 | int bd = adapter->bd_number; | 262 | int bd = adapter->bd_number; |
263 | if(bd >= IXGB_MAX_NIC) { | 263 | if (bd >= IXGB_MAX_NIC) { |
264 | printk(KERN_NOTICE | 264 | printk(KERN_NOTICE |
265 | "Warning: no configuration for board #%i\n", bd); | 265 | "Warning: no configuration for board #%i\n", bd); |
266 | printk(KERN_NOTICE "Using defaults for all values\n"); | 266 | printk(KERN_NOTICE "Using defaults for all values\n"); |
@@ -277,7 +277,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
277 | }; | 277 | }; |
278 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | 278 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; |
279 | 279 | ||
280 | if(num_TxDescriptors > bd) { | 280 | if (num_TxDescriptors > bd) { |
281 | tx_ring->count = TxDescriptors[bd]; | 281 | tx_ring->count = TxDescriptors[bd]; |
282 | ixgb_validate_option(&tx_ring->count, &opt); | 282 | ixgb_validate_option(&tx_ring->count, &opt); |
283 | } else { | 283 | } else { |
@@ -296,7 +296,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
296 | }; | 296 | }; |
297 | struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; | 297 | struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; |
298 | 298 | ||
299 | if(num_RxDescriptors > bd) { | 299 | if (num_RxDescriptors > bd) { |
300 | rx_ring->count = RxDescriptors[bd]; | 300 | rx_ring->count = RxDescriptors[bd]; |
301 | ixgb_validate_option(&rx_ring->count, &opt); | 301 | ixgb_validate_option(&rx_ring->count, &opt); |
302 | } else { | 302 | } else { |
@@ -312,7 +312,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
312 | .def = OPTION_ENABLED | 312 | .def = OPTION_ENABLED |
313 | }; | 313 | }; |
314 | 314 | ||
315 | if(num_XsumRX > bd) { | 315 | if (num_XsumRX > bd) { |
316 | unsigned int rx_csum = XsumRX[bd]; | 316 | unsigned int rx_csum = XsumRX[bd]; |
317 | ixgb_validate_option(&rx_csum, &opt); | 317 | ixgb_validate_option(&rx_csum, &opt); |
318 | adapter->rx_csum = rx_csum; | 318 | adapter->rx_csum = rx_csum; |
@@ -338,7 +338,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
338 | .p = fc_list }} | 338 | .p = fc_list }} |
339 | }; | 339 | }; |
340 | 340 | ||
341 | if(num_FlowControl > bd) { | 341 | if (num_FlowControl > bd) { |
342 | unsigned int fc = FlowControl[bd]; | 342 | unsigned int fc = FlowControl[bd]; |
343 | ixgb_validate_option(&fc, &opt); | 343 | ixgb_validate_option(&fc, &opt); |
344 | adapter->hw.fc.type = fc; | 344 | adapter->hw.fc.type = fc; |
@@ -356,7 +356,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
356 | .max = MAX_FCRTH}} | 356 | .max = MAX_FCRTH}} |
357 | }; | 357 | }; |
358 | 358 | ||
359 | if(num_RxFCHighThresh > bd) { | 359 | if (num_RxFCHighThresh > bd) { |
360 | adapter->hw.fc.high_water = RxFCHighThresh[bd]; | 360 | adapter->hw.fc.high_water = RxFCHighThresh[bd]; |
361 | ixgb_validate_option(&adapter->hw.fc.high_water, &opt); | 361 | ixgb_validate_option(&adapter->hw.fc.high_water, &opt); |
362 | } else { | 362 | } else { |
@@ -376,7 +376,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
376 | .max = MAX_FCRTL}} | 376 | .max = MAX_FCRTL}} |
377 | }; | 377 | }; |
378 | 378 | ||
379 | if(num_RxFCLowThresh > bd) { | 379 | if (num_RxFCLowThresh > bd) { |
380 | adapter->hw.fc.low_water = RxFCLowThresh[bd]; | 380 | adapter->hw.fc.low_water = RxFCLowThresh[bd]; |
381 | ixgb_validate_option(&adapter->hw.fc.low_water, &opt); | 381 | ixgb_validate_option(&adapter->hw.fc.low_water, &opt); |
382 | } else { | 382 | } else { |
@@ -396,7 +396,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
396 | .max = MAX_FCPAUSE}} | 396 | .max = MAX_FCPAUSE}} |
397 | }; | 397 | }; |
398 | 398 | ||
399 | if(num_FCReqTimeout > bd) { | 399 | if (num_FCReqTimeout > bd) { |
400 | unsigned int pause_time = FCReqTimeout[bd]; | 400 | unsigned int pause_time = FCReqTimeout[bd]; |
401 | ixgb_validate_option(&pause_time, &opt); | 401 | ixgb_validate_option(&pause_time, &opt); |
402 | adapter->hw.fc.pause_time = pause_time; | 402 | adapter->hw.fc.pause_time = pause_time; |
@@ -429,7 +429,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
429 | .max = MAX_RDTR}} | 429 | .max = MAX_RDTR}} |
430 | }; | 430 | }; |
431 | 431 | ||
432 | if(num_RxIntDelay > bd) { | 432 | if (num_RxIntDelay > bd) { |
433 | adapter->rx_int_delay = RxIntDelay[bd]; | 433 | adapter->rx_int_delay = RxIntDelay[bd]; |
434 | ixgb_validate_option(&adapter->rx_int_delay, &opt); | 434 | ixgb_validate_option(&adapter->rx_int_delay, &opt); |
435 | } else { | 435 | } else { |
@@ -446,7 +446,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
446 | .max = MAX_TIDV}} | 446 | .max = MAX_TIDV}} |
447 | }; | 447 | }; |
448 | 448 | ||
449 | if(num_TxIntDelay > bd) { | 449 | if (num_TxIntDelay > bd) { |
450 | adapter->tx_int_delay = TxIntDelay[bd]; | 450 | adapter->tx_int_delay = TxIntDelay[bd]; |
451 | ixgb_validate_option(&adapter->tx_int_delay, &opt); | 451 | ixgb_validate_option(&adapter->tx_int_delay, &opt); |
452 | } else { | 452 | } else { |
@@ -462,7 +462,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
462 | .def = OPTION_ENABLED | 462 | .def = OPTION_ENABLED |
463 | }; | 463 | }; |
464 | 464 | ||
465 | if(num_IntDelayEnable > bd) { | 465 | if (num_IntDelayEnable > bd) { |
466 | unsigned int ide = IntDelayEnable[bd]; | 466 | unsigned int ide = IntDelayEnable[bd]; |
467 | ixgb_validate_option(&ide, &opt); | 467 | ixgb_validate_option(&ide, &opt); |
468 | adapter->tx_int_delay_enable = ide; | 468 | adapter->tx_int_delay_enable = ide; |