aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2008-05-16 16:15:49 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-22 05:59:26 -0400
commitb3475645ed8b823c063f7560b243026150d7c3f8 (patch)
treee69a067136b0344967c1464f3143222bd3963d9e /drivers
parent2c118e0f6b7f3b8021df3c80c80c0545402f38b4 (diff)
sfc: Added and removed braces to comply with kernel style
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sfc/falcon.c3
-rw-r--r--drivers/net/sfc/rx.c3
-rw-r--r--drivers/net/sfc/tx.c9
4 files changed, 13 insertions, 8 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 3494f4cd314e..df19e86ab2e7 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1060,9 +1060,8 @@ static void efx_flush_all(struct efx_nic *efx)
1060 cancel_delayed_work_sync(&efx->monitor_work); 1060 cancel_delayed_work_sync(&efx->monitor_work);
1061 1061
1062 /* Ensure that all RX slow refills are complete. */ 1062 /* Ensure that all RX slow refills are complete. */
1063 efx_for_each_rx_queue(rx_queue, efx) { 1063 efx_for_each_rx_queue(rx_queue, efx)
1064 cancel_delayed_work_sync(&rx_queue->work); 1064 cancel_delayed_work_sync(&rx_queue->work);
1065 }
1066 1065
1067 /* Stop scheduled port reconfigurations */ 1066 /* Stop scheduled port reconfigurations */
1068 cancel_work_sync(&efx->reconfigure_work); 1067 cancel_work_sync(&efx->reconfigure_work);
@@ -1088,9 +1087,10 @@ static void efx_stop_all(struct efx_nic *efx)
1088 falcon_disable_interrupts(efx); 1087 falcon_disable_interrupts(efx);
1089 if (efx->legacy_irq) 1088 if (efx->legacy_irq)
1090 synchronize_irq(efx->legacy_irq); 1089 synchronize_irq(efx->legacy_irq);
1091 efx_for_each_channel_with_interrupt(channel, efx) 1090 efx_for_each_channel_with_interrupt(channel, efx) {
1092 if (channel->irq) 1091 if (channel->irq)
1093 synchronize_irq(channel->irq); 1092 synchronize_irq(channel->irq);
1093 }
1094 1094
1095 /* Stop all NAPI processing and synchronous rx refills */ 1095 /* Stop all NAPI processing and synchronous rx refills */
1096 efx_for_each_channel(channel, efx) 1096 efx_for_each_channel(channel, efx)
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c58f8a3443cc..4f96ce4c3532 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1636,9 +1636,10 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1636 efx_oword_t reg; 1636 efx_oword_t reg;
1637 1637
1638 /* Disable MSI/MSI-X interrupts */ 1638 /* Disable MSI/MSI-X interrupts */
1639 efx_for_each_channel_with_interrupt(channel, efx) 1639 efx_for_each_channel_with_interrupt(channel, efx) {
1640 if (channel->irq) 1640 if (channel->irq)
1641 free_irq(channel->irq, channel); 1641 free_irq(channel->irq, channel);
1642 }
1642 1643
1643 /* ACK legacy interrupt */ 1644 /* ACK legacy interrupt */
1644 if (FALCON_REV(efx) >= FALCON_REV_B0) 1645 if (FALCON_REV(efx) >= FALCON_REV_B0)
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 670622373ddf..a6413309c577 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -400,9 +400,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
400 return 0; 400 return 0;
401 401
402 /* Record minimum fill level */ 402 /* Record minimum fill level */
403 if (unlikely(fill_level < rx_queue->min_fill)) 403 if (unlikely(fill_level < rx_queue->min_fill)) {
404 if (fill_level) 404 if (fill_level)
405 rx_queue->min_fill = fill_level; 405 rx_queue->min_fill = fill_level;
406 }
406 407
407 /* Acquire RX add lock. If this lock is contended, then a fast 408 /* Acquire RX add lock. If this lock is contended, then a fast
408 * fill must already be in progress (e.g. in the refill 409 * fill must already be in progress (e.g. in the refill
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 9b436f5b4888..75eb0fd5fd2b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
639 base_dma = tsoh->dma_addr & PAGE_MASK; 639 base_dma = tsoh->dma_addr & PAGE_MASK;
640 640
641 p = &tx_queue->tso_headers_free; 641 p = &tx_queue->tso_headers_free;
642 while (*p != NULL) 642 while (*p != NULL) {
643 if (((unsigned long)*p & PAGE_MASK) == base_kva) 643 if (((unsigned long)*p & PAGE_MASK) == base_kva)
644 *p = (*p)->next; 644 *p = (*p)->next;
645 else 645 else
646 p = &(*p)->next; 646 p = &(*p)->next;
647 }
647 648
648 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 649 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
649} 650}
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
939 940
940 /* Allocate a DMA-mapped header buffer. */ 941 /* Allocate a DMA-mapped header buffer. */
941 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 942 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
942 if (tx_queue->tso_headers_free == NULL) 943 if (tx_queue->tso_headers_free == NULL) {
943 if (efx_tsoh_block_alloc(tx_queue)) 944 if (efx_tsoh_block_alloc(tx_queue))
944 return -1; 945 return -1;
946 }
945 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 947 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
946 tsoh = tx_queue->tso_headers_free; 948 tsoh = tx_queue->tso_headers_free;
947 tx_queue->tso_headers_free = tsoh->next; 949 tx_queue->tso_headers_free = tsoh->next;
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1106{ 1108{
1107 unsigned i; 1109 unsigned i;
1108 1110
1109 if (tx_queue->buffer) 1111 if (tx_queue->buffer) {
1110 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1112 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
1111 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1113 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1114 }
1112 1115
1113 while (tx_queue->tso_headers_free != NULL) 1116 while (tx_queue->tso_headers_free != NULL)
1114 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1117 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,