aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/tx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2008-09-01 07:46:50 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-03 09:53:45 -0400
commitdc8cfa55da8c21e0b3290c29677a9d05c0a3e595 (patch)
treea4c8bedad12a15d1e7c9fcfc99f873280ca644b4 /drivers/net/sfc/tx.c
parentcc12dac2e512c2b6185ed91899e09e9910630315 (diff)
sfc: Use explicit bool for boolean variables, parameters and return values
Replace (cond ? 1 : 0) with cond or !!cond as appropriate, and (cond ? 0 : 1) with !cond. Remove some redundant boolean temporaries. Rename one field that looks like a flag but isn't. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r--drivers/net/sfc/tx.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5f01371baaf9..51429b6a4dee 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -73,7 +73,7 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
73 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, 73 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
74 PCI_DMA_TODEVICE); 74 PCI_DMA_TODEVICE);
75 buffer->unmap_len = 0; 75 buffer->unmap_len = 0;
76 buffer->unmap_single = 0; 76 buffer->unmap_single = false;
77 } 77 }
78 78
79 if (buffer->skb) { 79 if (buffer->skb) {
@@ -150,7 +150,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; 150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
151 dma_addr_t dma_addr, unmap_addr = 0; 151 dma_addr_t dma_addr, unmap_addr = 0;
152 unsigned int dma_len; 152 unsigned int dma_len;
153 unsigned unmap_single; 153 bool unmap_single;
154 int q_space, i = 0; 154 int q_space, i = 0;
155 int rc = NETDEV_TX_OK; 155 int rc = NETDEV_TX_OK;
156 156
@@ -169,7 +169,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
169 * since this is more efficient on machines with sparse 169 * since this is more efficient on machines with sparse
170 * memory. 170 * memory.
171 */ 171 */
172 unmap_single = 1; 172 unmap_single = true;
173 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); 173 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
174 174
175 /* Process all fragments */ 175 /* Process all fragments */
@@ -215,7 +215,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
215 EFX_BUG_ON_PARANOID(buffer->tsoh); 215 EFX_BUG_ON_PARANOID(buffer->tsoh);
216 EFX_BUG_ON_PARANOID(buffer->skb); 216 EFX_BUG_ON_PARANOID(buffer->skb);
217 EFX_BUG_ON_PARANOID(buffer->len); 217 EFX_BUG_ON_PARANOID(buffer->len);
218 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 218 EFX_BUG_ON_PARANOID(!buffer->continuation);
219 EFX_BUG_ON_PARANOID(buffer->unmap_len); 219 EFX_BUG_ON_PARANOID(buffer->unmap_len);
220 220
221 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); 221 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
@@ -248,14 +248,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
248 page_offset = fragment->page_offset; 248 page_offset = fragment->page_offset;
249 i++; 249 i++;
250 /* Map for DMA */ 250 /* Map for DMA */
251 unmap_single = 0; 251 unmap_single = false;
252 dma_addr = pci_map_page(pci_dev, page, page_offset, len, 252 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
253 PCI_DMA_TODEVICE); 253 PCI_DMA_TODEVICE);
254 } 254 }
255 255
256 /* Transfer ownership of the skb to the final buffer */ 256 /* Transfer ownership of the skb to the final buffer */
257 buffer->skb = skb; 257 buffer->skb = skb;
258 buffer->continuation = 0; 258 buffer->continuation = false;
259 259
260 /* Pass off to hardware */ 260 /* Pass off to hardware */
261 falcon_push_buffers(tx_queue); 261 falcon_push_buffers(tx_queue);
@@ -326,7 +326,7 @@ static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
326 } 326 }
327 327
328 efx_dequeue_buffer(tx_queue, buffer); 328 efx_dequeue_buffer(tx_queue, buffer);
329 buffer->continuation = 1; 329 buffer->continuation = true;
330 buffer->len = 0; 330 buffer->len = 0;
331 331
332 ++tx_queue->read_count; 332 ++tx_queue->read_count;
@@ -428,7 +428,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
428 if (!tx_queue->buffer) 428 if (!tx_queue->buffer)
429 return -ENOMEM; 429 return -ENOMEM;
430 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 430 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
431 tx_queue->buffer[i].continuation = 1; 431 tx_queue->buffer[i].continuation = true;
432 432
433 /* Allocate hardware ring */ 433 /* Allocate hardware ring */
434 rc = falcon_probe_tx(tx_queue); 434 rc = falcon_probe_tx(tx_queue);
@@ -469,7 +469,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
469 buffer = &tx_queue->buffer[tx_queue->read_count & 469 buffer = &tx_queue->buffer[tx_queue->read_count &
470 tx_queue->efx->type->txd_ring_mask]; 470 tx_queue->efx->type->txd_ring_mask];
471 efx_dequeue_buffer(tx_queue, buffer); 471 efx_dequeue_buffer(tx_queue, buffer);
472 buffer->continuation = 1; 472 buffer->continuation = true;
473 buffer->len = 0; 473 buffer->len = 0;
474 474
475 ++tx_queue->read_count; 475 ++tx_queue->read_count;
@@ -567,7 +567,7 @@ struct tso_state {
567 /* DMA address and length of the whole fragment */ 567 /* DMA address and length of the whole fragment */
568 unsigned int unmap_len; 568 unsigned int unmap_len;
569 dma_addr_t unmap_addr; 569 dma_addr_t unmap_addr;
570 unsigned int unmap_single; 570 bool unmap_single;
571 } ifc; 571 } ifc;
572 572
573 struct { 573 struct {
@@ -746,7 +746,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
746 EFX_BUG_ON_PARANOID(buffer->len); 746 EFX_BUG_ON_PARANOID(buffer->len);
747 EFX_BUG_ON_PARANOID(buffer->unmap_len); 747 EFX_BUG_ON_PARANOID(buffer->unmap_len);
748 EFX_BUG_ON_PARANOID(buffer->skb); 748 EFX_BUG_ON_PARANOID(buffer->skb);
749 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 749 EFX_BUG_ON_PARANOID(!buffer->continuation);
750 EFX_BUG_ON_PARANOID(buffer->tsoh); 750 EFX_BUG_ON_PARANOID(buffer->tsoh);
751 751
752 buffer->dma_addr = dma_addr; 752 buffer->dma_addr = dma_addr;
@@ -792,7 +792,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
792 EFX_BUG_ON_PARANOID(buffer->len); 792 EFX_BUG_ON_PARANOID(buffer->len);
793 EFX_BUG_ON_PARANOID(buffer->unmap_len); 793 EFX_BUG_ON_PARANOID(buffer->unmap_len);
794 EFX_BUG_ON_PARANOID(buffer->skb); 794 EFX_BUG_ON_PARANOID(buffer->skb);
795 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 795 EFX_BUG_ON_PARANOID(!buffer->continuation);
796 EFX_BUG_ON_PARANOID(buffer->tsoh); 796 EFX_BUG_ON_PARANOID(buffer->tsoh);
797 buffer->len = len; 797 buffer->len = len;
798 buffer->dma_addr = tsoh->dma_addr; 798 buffer->dma_addr = tsoh->dma_addr;
@@ -816,7 +816,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
816 efx_tsoh_free(tx_queue, buffer); 816 efx_tsoh_free(tx_queue, buffer);
817 EFX_BUG_ON_PARANOID(buffer->skb); 817 EFX_BUG_ON_PARANOID(buffer->skb);
818 buffer->len = 0; 818 buffer->len = 0;
819 buffer->continuation = 1; 819 buffer->continuation = true;
820 if (buffer->unmap_len) { 820 if (buffer->unmap_len) {
821 unmap_addr = (buffer->dma_addr + buffer->len - 821 unmap_addr = (buffer->dma_addr + buffer->len -
822 buffer->unmap_len); 822 buffer->unmap_len);
@@ -855,7 +855,7 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
855 st->packet_space = st->p.full_packet_size; 855 st->packet_space = st->p.full_packet_size;
856 st->remaining_len = skb->len - st->p.header_length; 856 st->remaining_len = skb->len - st->p.header_length;
857 st->ifc.unmap_len = 0; 857 st->ifc.unmap_len = 0;
858 st->ifc.unmap_single = 0; 858 st->ifc.unmap_single = false;
859} 859}
860 860
861static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 861static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -865,7 +865,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
865 frag->page_offset, frag->size, 865 frag->page_offset, frag->size,
866 PCI_DMA_TODEVICE); 866 PCI_DMA_TODEVICE);
867 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { 867 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
868 st->ifc.unmap_single = 0; 868 st->ifc.unmap_single = false;
869 st->ifc.unmap_len = frag->size; 869 st->ifc.unmap_len = frag->size;
870 st->ifc.len = frag->size; 870 st->ifc.len = frag->size;
871 st->ifc.dma_addr = st->ifc.unmap_addr; 871 st->ifc.dma_addr = st->ifc.unmap_addr;
@@ -884,7 +884,7 @@ tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
884 st->ifc.unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, 884 st->ifc.unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
885 len, PCI_DMA_TODEVICE); 885 len, PCI_DMA_TODEVICE);
886 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { 886 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
887 st->ifc.unmap_single = 1; 887 st->ifc.unmap_single = true;
888 st->ifc.unmap_len = len; 888 st->ifc.unmap_len = len;
889 st->ifc.len = len; 889 st->ifc.len = len;
890 st->ifc.dma_addr = st->ifc.unmap_addr; 890 st->ifc.dma_addr = st->ifc.unmap_addr;