diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2014-02-12 13:58:57 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-02-12 17:53:34 -0500 |
commit | 0bdadad16608a69defe0b64745a0a6a2edc8e012 (patch) | |
tree | 95dbf00c5191880ac9adb3e3e00f14dda6cdb3e9 | |
parent | 92d8f766ecce190dc2aa5d1aa9a5f5381e831641 (diff) |
sfc: Replace TSOH_OFFSET with the equivalent NET_IP_ALIGN
If CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined then NET_IP_ALIGN
will be defined as 0, so this macro is redundant.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Shradha Shah <sshah@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/sfc/tx.c | 17 |
1 files changed, 4 insertions, 13 deletions
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 75d11fa4eb0a..3aa22cdef380 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |||
787 | * Requires TX checksum offload support. | 787 | * Requires TX checksum offload support. |
788 | */ | 788 | */ |
789 | 789 | ||
790 | /* Number of bytes inserted at the start of a TSO header buffer, | ||
791 | * similar to NET_IP_ALIGN. | ||
792 | */ | ||
793 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
794 | #define TSOH_OFFSET 0 | ||
795 | #else | ||
796 | #define TSOH_OFFSET NET_IP_ALIGN | ||
797 | #endif | ||
798 | |||
799 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) | 790 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) |
800 | 791 | ||
801 | /** | 792 | /** |
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, | |||
882 | EFX_BUG_ON_PARANOID(buffer->flags); | 873 | EFX_BUG_ON_PARANOID(buffer->flags); |
883 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | 874 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
884 | 875 | ||
885 | if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { | 876 | if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) { |
886 | unsigned index = | 877 | unsigned index = |
887 | (tx_queue->insert_count & tx_queue->ptr_mask) / 2; | 878 | (tx_queue->insert_count & tx_queue->ptr_mask) / 2; |
888 | struct efx_buffer *page_buf = | 879 | struct efx_buffer *page_buf = |
889 | &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; | 880 | &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; |
890 | unsigned offset = | 881 | unsigned offset = |
891 | TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; | 882 | TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN; |
892 | 883 | ||
893 | if (unlikely(!page_buf->addr) && | 884 | if (unlikely(!page_buf->addr) && |
894 | efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, | 885 | efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, |
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, | |||
901 | } else { | 892 | } else { |
902 | tx_queue->tso_long_headers++; | 893 | tx_queue->tso_long_headers++; |
903 | 894 | ||
904 | buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); | 895 | buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC); |
905 | if (unlikely(!buffer->heap_buf)) | 896 | if (unlikely(!buffer->heap_buf)) |
906 | return NULL; | 897 | return NULL; |
907 | result = (u8 *)buffer->heap_buf + TSOH_OFFSET; | 898 | result = (u8 *)buffer->heap_buf + NET_IP_ALIGN; |
908 | buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; | 899 | buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; |
909 | } | 900 | } |
910 | 901 | ||