diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2009-10-23 04:31:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-24 07:27:07 -0400 |
commit | 63f1988419ccaa544d1d31aadc1dd309f6471ffe (patch) | |
tree | d973e7c103b9453063d02c8895f629c4ca692a33 /drivers/net/sfc/tx.c | |
parent | 6d51d307509f98f070688b4bff1d0f7462c4d3ec (diff) |
sfc: Move all TX DMA length limiting into tx.c
Replace the duplicated logic in efx_enqueue_skb() and
efx_tx_queue_insert() with an inline function, efx_max_tx_len().
Remove the failed attempt at abstracting hardware-specifics and put
all the magic numbers in efx_max_tx_len().
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r-- | drivers/net/sfc/tx.c | 37 |
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index ae554eec0563..303919a34df6 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -124,6 +124,24 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue, | |||
124 | } | 124 | } |
125 | 125 | ||
126 | 126 | ||
127 | static inline unsigned | ||
128 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | ||
129 | { | ||
130 | /* Depending on the NIC revision, we can use descriptor | ||
131 | * lengths up to 8K or 8K-1. However, since PCI Express | ||
132 | * devices must split read requests at 4K boundaries, there is | ||
133 | * little benefit from using descriptors that cross those | ||
134 | * boundaries and we keep things simple by not doing so. | ||
135 | */ | ||
136 | unsigned len = (~dma_addr & 0xfff) + 1; | ||
137 | |||
138 | /* Work around hardware bug for unaligned buffers. */ | ||
139 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | ||
140 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | ||
141 | |||
142 | return len; | ||
143 | } | ||
144 | |||
127 | /* | 145 | /* |
128 | * Add a socket buffer to a TX queue | 146 | * Add a socket buffer to a TX queue |
129 | * | 147 | * |
@@ -146,7 +164,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
146 | skb_frag_t *fragment; | 164 | skb_frag_t *fragment; |
147 | struct page *page; | 165 | struct page *page; |
148 | int page_offset; | 166 | int page_offset; |
149 | unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; | 167 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; |
150 | dma_addr_t dma_addr, unmap_addr = 0; | 168 | dma_addr_t dma_addr, unmap_addr = 0; |
151 | unsigned int dma_len; | 169 | unsigned int dma_len; |
152 | bool unmap_single; | 170 | bool unmap_single; |
@@ -223,14 +241,10 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
223 | EFX_BUG_ON_PARANOID(!buffer->continuation); | 241 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
224 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | 242 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
225 | 243 | ||
226 | dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); | 244 | dma_len = efx_max_tx_len(efx, dma_addr); |
227 | if (likely(dma_len > len)) | 245 | if (likely(dma_len >= len)) |
228 | dma_len = len; | 246 | dma_len = len; |
229 | 247 | ||
230 | misalign = (unsigned)dma_addr & efx->type->bug5391_mask; | ||
231 | if (misalign && dma_len + misalign > 512) | ||
232 | dma_len = 512 - misalign; | ||
233 | |||
234 | /* Fill out per descriptor fields */ | 248 | /* Fill out per descriptor fields */ |
235 | buffer->len = dma_len; | 249 | buffer->len = dma_len; |
236 | buffer->dma_addr = dma_addr; | 250 | buffer->dma_addr = dma_addr; |
@@ -703,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
703 | { | 717 | { |
704 | struct efx_tx_buffer *buffer; | 718 | struct efx_tx_buffer *buffer; |
705 | struct efx_nic *efx = tx_queue->efx; | 719 | struct efx_nic *efx = tx_queue->efx; |
706 | unsigned dma_len, fill_level, insert_ptr, misalign; | 720 | unsigned dma_len, fill_level, insert_ptr; |
707 | int q_space; | 721 | int q_space; |
708 | 722 | ||
709 | EFX_BUG_ON_PARANOID(len <= 0); | 723 | EFX_BUG_ON_PARANOID(len <= 0); |
@@ -752,12 +766,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
752 | 766 | ||
753 | buffer->dma_addr = dma_addr; | 767 | buffer->dma_addr = dma_addr; |
754 | 768 | ||
755 | /* Ensure we do not cross a boundary unsupported by H/W */ | 769 | dma_len = efx_max_tx_len(efx, dma_addr); |
756 | dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; | ||
757 | |||
758 | misalign = (unsigned)dma_addr & efx->type->bug5391_mask; | ||
759 | if (misalign && dma_len + misalign > 512) | ||
760 | dma_len = 512 - misalign; | ||
761 | 770 | ||
762 | /* If there is enough space to send then do so */ | 771 | /* If there is enough space to send then do so */ |
763 | if (dma_len >= len) | 772 | if (dma_len >= len) |