aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-08-26 03:44:05 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-09-20 02:59:14 -0400
commit6013690699dd8316f4018324a6c2d90377d50d2c (patch)
tree231af400a337c7025cc19ffcb353697aa38ce88e /drivers/net/ethernet
parentcd392f5ca976b5ad166acc368c239cce2f0df58a (diff)
igb: Replace E1000_XX_DESC_ADV with IGB_XX_DESC
Since igb only uses advanced descriptors we might as well just use an IGB specific define and drop the _ADV suffix for the descriptor declarations. In addition this can be further reduced by assuming that it will be working on pointers since that is normally how the Tx descriptors are handled. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h12
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c24
3 files changed, 20 insertions, 20 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index b1ca8ea385eb..8607a1d6aa80 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -232,12 +232,12 @@ struct igb_ring {
232 232
233#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS) 233#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
234 234
235#define E1000_RX_DESC_ADV(R, i) \ 235#define IGB_RX_DESC(R, i) \
236 (&(((union e1000_adv_rx_desc *)((R).desc))[i])) 236 (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
237#define E1000_TX_DESC_ADV(R, i) \ 237#define IGB_TX_DESC(R, i) \
238 (&(((union e1000_adv_tx_desc *)((R).desc))[i])) 238 (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
239#define E1000_TX_CTXTDESC_ADV(R, i) \ 239#define IGB_TX_CTXTDESC(R, i) \
240 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) 240 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
241 241
242/* igb_desc_unused - calculate if we have unused descriptors */ 242/* igb_desc_unused - calculate if we have unused descriptors */
243static inline int igb_desc_unused(struct igb_ring *ring) 243static inline int igb_desc_unused(struct igb_ring *ring)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 67eee0a137ad..f231d82cc6cf 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1586,7 +1586,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1586 /* initialize next to clean and descriptor values */ 1586 /* initialize next to clean and descriptor values */
1587 rx_ntc = rx_ring->next_to_clean; 1587 rx_ntc = rx_ring->next_to_clean;
1588 tx_ntc = tx_ring->next_to_clean; 1588 tx_ntc = tx_ring->next_to_clean;
1589 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); 1589 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1590 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1590 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1591 1591
1592 while (staterr & E1000_RXD_STAT_DD) { 1592 while (staterr & E1000_RXD_STAT_DD) {
@@ -1617,7 +1617,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1617 tx_ntc = 0; 1617 tx_ntc = 0;
1618 1618
1619 /* fetch next descriptor */ 1619 /* fetch next descriptor */
1620 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); 1620 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1621 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1621 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1622 } 1622 }
1623 1623
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9a0cfd669f1b..55d643180bfc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -413,7 +413,7 @@ static void igb_dump(struct igb_adapter *adapter)
413 "leng ntw timestamp bi->skb\n"); 413 "leng ntw timestamp bi->skb\n");
414 414
415 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 415 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
416 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 416 tx_desc = IGB_TX_DESC(tx_ring, i);
417 buffer_info = &tx_ring->buffer_info[i]; 417 buffer_info = &tx_ring->buffer_info[i];
418 u0 = (struct my_u0 *)tx_desc; 418 u0 = (struct my_u0 *)tx_desc;
419 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" 419 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
@@ -494,7 +494,7 @@ rx_ring_summary:
494 494
495 for (i = 0; i < rx_ring->count; i++) { 495 for (i = 0; i < rx_ring->count; i++) {
496 buffer_info = &rx_ring->buffer_info[i]; 496 buffer_info = &rx_ring->buffer_info[i];
497 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 497 rx_desc = IGB_RX_DESC(rx_ring, i);
498 u0 = (struct my_u0 *)rx_desc; 498 u0 = (struct my_u0 *)rx_desc;
499 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 499 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
500 if (staterr & E1000_RXD_STAT_DD) { 500 if (staterr & E1000_RXD_STAT_DD) {
@@ -3993,7 +3993,7 @@ static inline int igb_tso(struct igb_ring *tx_ring,
3993 i = tx_ring->next_to_use; 3993 i = tx_ring->next_to_use;
3994 3994
3995 buffer_info = &tx_ring->buffer_info[i]; 3995 buffer_info = &tx_ring->buffer_info[i];
3996 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); 3996 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3997 /* VLAN MACLEN IPLEN */ 3997 /* VLAN MACLEN IPLEN */
3998 if (tx_flags & IGB_TX_FLAGS_VLAN) 3998 if (tx_flags & IGB_TX_FLAGS_VLAN)
3999 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 3999 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
@@ -4048,7 +4048,7 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring,
4048 (tx_flags & IGB_TX_FLAGS_VLAN)) { 4048 (tx_flags & IGB_TX_FLAGS_VLAN)) {
4049 i = tx_ring->next_to_use; 4049 i = tx_ring->next_to_use;
4050 buffer_info = &tx_ring->buffer_info[i]; 4050 buffer_info = &tx_ring->buffer_info[i];
4051 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); 4051 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4052 4052
4053 if (tx_flags & IGB_TX_FLAGS_VLAN) 4053 if (tx_flags & IGB_TX_FLAGS_VLAN)
4054 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 4054 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
@@ -4238,7 +4238,7 @@ static inline void igb_tx_queue(struct igb_ring *tx_ring,
4238 4238
4239 do { 4239 do {
4240 buffer_info = &tx_ring->buffer_info[i]; 4240 buffer_info = &tx_ring->buffer_info[i];
4241 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 4241 tx_desc = IGB_TX_DESC(tx_ring, i);
4242 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 4242 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
4243 tx_desc->read.cmd_type_len = 4243 tx_desc->read.cmd_type_len =
4244 cpu_to_le32(cmd_type_len | buffer_info->length); 4244 cpu_to_le32(cmd_type_len | buffer_info->length);
@@ -5580,13 +5580,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5580 5580
5581 i = tx_ring->next_to_clean; 5581 i = tx_ring->next_to_clean;
5582 eop = tx_ring->buffer_info[i].next_to_watch; 5582 eop = tx_ring->buffer_info[i].next_to_watch;
5583 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); 5583 eop_desc = IGB_TX_DESC(tx_ring, eop);
5584 5584
5585 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 5585 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5586 (count < tx_ring->count)) { 5586 (count < tx_ring->count)) {
5587 rmb(); /* read buffer_info after eop_desc status */ 5587 rmb(); /* read buffer_info after eop_desc status */
5588 for (cleaned = false; !cleaned; count++) { 5588 for (cleaned = false; !cleaned; count++) {
5589 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5589 tx_desc = IGB_TX_DESC(tx_ring, i);
5590 buffer_info = &tx_ring->buffer_info[i]; 5590 buffer_info = &tx_ring->buffer_info[i];
5591 cleaned = (i == eop); 5591 cleaned = (i == eop);
5592 5592
@@ -5605,7 +5605,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5605 i = 0; 5605 i = 0;
5606 } 5606 }
5607 eop = tx_ring->buffer_info[i].next_to_watch; 5607 eop = tx_ring->buffer_info[i].next_to_watch;
5608 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); 5608 eop_desc = IGB_TX_DESC(tx_ring, eop);
5609 } 5609 }
5610 5610
5611 tx_ring->next_to_clean = i; 5611 tx_ring->next_to_clean = i;
@@ -5760,7 +5760,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
5760 u16 cleaned_count = igb_desc_unused(rx_ring); 5760 u16 cleaned_count = igb_desc_unused(rx_ring);
5761 u16 i = rx_ring->next_to_clean; 5761 u16 i = rx_ring->next_to_clean;
5762 5762
5763 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5763 rx_desc = IGB_RX_DESC(rx_ring, i);
5764 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 5764 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5765 5765
5766 while (staterr & E1000_RXD_STAT_DD) { 5766 while (staterr & E1000_RXD_STAT_DD) {
@@ -5775,7 +5775,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
5775 if (i == rx_ring->count) 5775 if (i == rx_ring->count)
5776 i = 0; 5776 i = 0;
5777 5777
5778 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); 5778 next_rxd = IGB_RX_DESC(rx_ring, i);
5779 prefetch(next_rxd); 5779 prefetch(next_rxd);
5780 5780
5781 /* 5781 /*
@@ -5955,7 +5955,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
5955 struct igb_buffer *bi; 5955 struct igb_buffer *bi;
5956 u16 i = rx_ring->next_to_use; 5956 u16 i = rx_ring->next_to_use;
5957 5957
5958 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5958 rx_desc = IGB_RX_DESC(rx_ring, i);
5959 bi = &rx_ring->buffer_info[i]; 5959 bi = &rx_ring->buffer_info[i];
5960 i -= rx_ring->count; 5960 i -= rx_ring->count;
5961 5961
@@ -5976,7 +5976,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
5976 bi++; 5976 bi++;
5977 i++; 5977 i++;
5978 if (unlikely(!i)) { 5978 if (unlikely(!i)) {
5979 rx_desc = E1000_RX_DESC_ADV(*rx_ring, 0); 5979 rx_desc = IGB_RX_DESC(rx_ring, 0);
5980 bi = rx_ring->buffer_info; 5980 bi = rx_ring->buffer_info;
5981 i -= rx_ring->count; 5981 i -= rx_ring->count;
5982 } 5982 }