diff options
-rw-r--r-- | drivers/net/ixgb/ixgb.h | 3 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_param.c | 4 |
4 files changed, 6 insertions, 9 deletions
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index cf30a1059ce0..c8e90861f869 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -111,9 +111,6 @@ struct ixgb_adapter; | |||
111 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | 111 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
112 | #define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */ | 112 | #define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */ |
113 | 113 | ||
114 | /* only works for sizes that are powers of 2 */ | ||
115 | #define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) | ||
116 | |||
117 | /* wrapper around a pointer to a socket buffer, | 114 | /* wrapper around a pointer to a socket buffer, |
118 | * so a DMA handle can be stored along with the buffer */ | 115 | * so a DMA handle can be stored along with the buffer */ |
119 | struct ixgb_buffer { | 116 | struct ixgb_buffer { |
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index d6628bd9590a..afde84868bea 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -577,11 +577,11 @@ ixgb_set_ringparam(struct net_device *netdev, | |||
577 | 577 | ||
578 | rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD); | 578 | rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD); |
579 | rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD); | 579 | rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD); |
580 | IXGB_ROUNDUP(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); | 580 | rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); |
581 | 581 | ||
582 | txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD); | 582 | txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD); |
583 | txdr->count = min(txdr->count,(uint32_t)MAX_TXD); | 583 | txdr->count = min(txdr->count,(uint32_t)MAX_TXD); |
584 | IXGB_ROUNDUP(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); | 584 | txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); |
585 | 585 | ||
586 | if(netif_running(adapter->netdev)) { | 586 | if(netif_running(adapter->netdev)) { |
587 | /* Try to get new resources before deleting old */ | 587 | /* Try to get new resources before deleting old */ |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index dfde80e54aef..6d2b059371f1 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -685,7 +685,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
685 | /* round up to nearest 4K */ | 685 | /* round up to nearest 4K */ |
686 | 686 | ||
687 | txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); | 687 | txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); |
688 | IXGB_ROUNDUP(txdr->size, 4096); | 688 | txdr->size = ALIGN(txdr->size, 4096); |
689 | 689 | ||
690 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 690 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
691 | if(!txdr->desc) { | 691 | if(!txdr->desc) { |
@@ -774,7 +774,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
774 | /* Round up to nearest 4K */ | 774 | /* Round up to nearest 4K */ |
775 | 775 | ||
776 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | 776 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); |
777 | IXGB_ROUNDUP(rxdr->size, 4096); | 777 | rxdr->size = ALIGN(rxdr->size, 4096); |
778 | 778 | ||
779 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); | 779 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); |
780 | 780 | ||
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index c38ce739e3f7..5d5ddabf4360 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c | |||
@@ -282,7 +282,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
282 | } else { | 282 | } else { |
283 | tx_ring->count = opt.def; | 283 | tx_ring->count = opt.def; |
284 | } | 284 | } |
285 | IXGB_ROUNDUP(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); | 285 | tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); |
286 | } | 286 | } |
287 | { /* Receive Descriptor Count */ | 287 | { /* Receive Descriptor Count */ |
288 | struct ixgb_option opt = { | 288 | struct ixgb_option opt = { |
@@ -301,7 +301,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
301 | } else { | 301 | } else { |
302 | rx_ring->count = opt.def; | 302 | rx_ring->count = opt.def; |
303 | } | 303 | } |
304 | IXGB_ROUNDUP(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); | 304 | rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); |
305 | } | 305 | } |
306 | { /* Receive Checksum Offload Enable */ | 306 | { /* Receive Checksum Offload Enable */ |
307 | struct ixgb_option opt = { | 307 | struct ixgb_option opt = { |