aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/chelsio/sge.c
diff options
context:
space:
mode:
authorFrancois Romieu <romieu@fr.zoreil.com>2006-12-11 17:47:00 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:43 -0500
commit356bd1460d1e1c4e433e4114fdac02139bddf17c (patch)
tree677c17fddfb7c66f29134c33f64463f15fc43824 /drivers/net/chelsio/sge.c
parentb7d58394e65c7d90486026614a6ae26d82dd7756 (diff)
chelsio: spaces, tabs and friends
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'drivers/net/chelsio/sge.c')
-rw-r--r--drivers/net/chelsio/sge.c125
1 files changed, 64 insertions, 61 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 659cb2252e44..6b1e857ee07e 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -195,7 +195,7 @@ struct cmdQ {
195 struct cmdQ_e *entries; /* HW command descriptor Q */ 195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
198 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 198 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
199}; 199};
200 200
201struct freelQ { 201struct freelQ {
@@ -241,9 +241,9 @@ struct sched_port {
241/* Per T204 device */ 241/* Per T204 device */
242struct sched { 242struct sched {
243 ktime_t last_updated; /* last time quotas were computed */ 243 ktime_t last_updated; /* last time quotas were computed */
244 unsigned int max_avail; /* max bits to be sent to any port */ 244 unsigned int max_avail; /* max bits to be sent to any port */
245 unsigned int port; /* port index (round robin ports) */ 245 unsigned int port; /* port index (round robin ports) */
246 unsigned int num; /* num skbs in per port queues */ 246 unsigned int num; /* num skbs in per port queues */
247 struct sched_port p[MAX_NPORTS]; 247 struct sched_port p[MAX_NPORTS];
248 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 248 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
249}; 249};
@@ -259,10 +259,10 @@ static void restart_sched(unsigned long);
259 * contention. 259 * contention.
260 */ 260 */
261struct sge { 261struct sge {
262 struct adapter *adapter; /* adapter backpointer */ 262 struct adapter *adapter; /* adapter backpointer */
263 struct net_device *netdev; /* netdevice backpointer */ 263 struct net_device *netdev; /* netdevice backpointer */
264 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 264 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
265 struct respQ respQ; /* response Q */ 265 struct respQ respQ; /* response Q */
266 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 266 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
267 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 267 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
268 unsigned int jumbo_fl; /* jumbo freelist Q index */ 268 unsigned int jumbo_fl; /* jumbo freelist Q index */
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
460 if (credits < MAX_SKB_FRAGS + 1) 460 if (credits < MAX_SKB_FRAGS + 1)
461 goto out; 461 goto out;
462 462
463 again: 463again:
464 for (i = 0; i < MAX_NPORTS; i++) { 464 for (i = 0; i < MAX_NPORTS; i++) {
465 s->port = ++s->port & (MAX_NPORTS - 1); 465 s->port = ++s->port & (MAX_NPORTS - 1);
466 skbq = &s->p[s->port].skbq; 466 skbq = &s->p[s->port].skbq;
@@ -483,8 +483,8 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
483 if (update-- && sched_update_avail(sge)) 483 if (update-- && sched_update_avail(sge))
484 goto again; 484 goto again;
485 485
486 out: 486out:
487 /* If there are more pending skbs, we use the hardware to schedule us 487 /* If there are more pending skbs, we use the hardware to schedule us
488 * again. 488 * again.
489 */ 489 */
490 if (s->num && !skb) { 490 if (s->num && !skb) {
@@ -641,14 +641,14 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
641 if (likely(pci_unmap_len(ce, dma_len))) { 641 if (likely(pci_unmap_len(ce, dma_len))) {
642 pci_unmap_single(pdev, 642 pci_unmap_single(pdev,
643 pci_unmap_addr(ce, dma_addr), 643 pci_unmap_addr(ce, dma_addr),
644 pci_unmap_len(ce, dma_len), 644 pci_unmap_len(ce, dma_len),
645 PCI_DMA_TODEVICE); 645 PCI_DMA_TODEVICE);
646 q->sop = 0; 646 q->sop = 0;
647 } 647 }
648 } else { 648 } else {
649 if (likely(pci_unmap_len(ce, dma_len))) { 649 if (likely(pci_unmap_len(ce, dma_len))) {
650 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), 650 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
651 pci_unmap_len(ce, dma_len), 651 pci_unmap_len(ce, dma_len),
652 PCI_DMA_TODEVICE); 652 PCI_DMA_TODEVICE);
653 } 653 }
654 } 654 }
@@ -770,7 +770,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
770static void configure_sge(struct sge *sge, struct sge_params *p) 770static void configure_sge(struct sge *sge, struct sge_params *p)
771{ 771{
772 struct adapter *ap = sge->adapter; 772 struct adapter *ap = sge->adapter;
773 773
774 writel(0, ap->regs + A_SG_CONTROL); 774 writel(0, ap->regs + A_SG_CONTROL);
775 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 775 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
776 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 776 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
@@ -850,7 +850,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
850 struct freelQ_e *e = &q->entries[q->pidx]; 850 struct freelQ_e *e = &q->entries[q->pidx];
851 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 851 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
852 852
853
854 while (q->credits < q->size) { 853 while (q->credits < q->size) {
855 struct sk_buff *skb; 854 struct sk_buff *skb;
856 dma_addr_t mapping; 855 dma_addr_t mapping;
@@ -881,7 +880,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
881 } 880 }
882 q->credits++; 881 q->credits++;
883 } 882 }
884
885} 883}
886 884
887/* 885/*
@@ -1075,12 +1073,12 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1075 skb_put(skb, len); 1073 skb_put(skb, len);
1076 pci_dma_sync_single_for_cpu(pdev, 1074 pci_dma_sync_single_for_cpu(pdev,
1077 pci_unmap_addr(ce, dma_addr), 1075 pci_unmap_addr(ce, dma_addr),
1078 pci_unmap_len(ce, dma_len), 1076 pci_unmap_len(ce, dma_len),
1079 PCI_DMA_FROMDEVICE); 1077 PCI_DMA_FROMDEVICE);
1080 memcpy(skb->data, ce->skb->data + dma_pad, len); 1078 memcpy(skb->data, ce->skb->data + dma_pad, len);
1081 pci_dma_sync_single_for_device(pdev, 1079 pci_dma_sync_single_for_device(pdev,
1082 pci_unmap_addr(ce, dma_addr), 1080 pci_unmap_addr(ce, dma_addr),
1083 pci_unmap_len(ce, dma_len), 1081 pci_unmap_len(ce, dma_len),
1084 PCI_DMA_FROMDEVICE); 1082 PCI_DMA_FROMDEVICE);
1085 } else if (!drop_thres) 1083 } else if (!drop_thres)
1086 goto use_orig_buf; 1084 goto use_orig_buf;
@@ -1137,6 +1135,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1137static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1135static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1138{ 1136{
1139 unsigned int count = 0; 1137 unsigned int count = 0;
1138
1140 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1139 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1141 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1140 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1142 unsigned int i, len = skb->len - skb->data_len; 1141 unsigned int i, len = skb->len - skb->data_len;
@@ -1343,7 +1342,7 @@ static void restart_sched(unsigned long arg)
1343 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1342 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1344 unsigned int genbit, pidx, count; 1343 unsigned int genbit, pidx, count;
1345 count = 1 + skb_shinfo(skb)->nr_frags; 1344 count = 1 + skb_shinfo(skb)->nr_frags;
1346 count += compute_large_page_tx_descs(skb); 1345 count += compute_large_page_tx_descs(skb);
1347 q->in_use += count; 1346 q->in_use += count;
1348 genbit = q->genbit; 1347 genbit = q->genbit;
1349 pidx = q->pidx; 1348 pidx = q->pidx;
@@ -1466,11 +1465,11 @@ static void restart_tx_queues(struct sge *sge)
1466} 1465}
1467 1466
1468/* 1467/*
1469 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1468 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1470 * information. 1469 * information.
1471 */ 1470 */
1472static unsigned int update_tx_info(struct adapter *adapter, 1471static unsigned int update_tx_info(struct adapter *adapter,
1473 unsigned int flags, 1472 unsigned int flags,
1474 unsigned int pr0) 1473 unsigned int pr0)
1475{ 1474{
1476 struct sge *sge = adapter->sge; 1475 struct sge *sge = adapter->sge;
@@ -1513,14 +1512,14 @@ static int process_responses(struct adapter *adapter, int budget)
1513 int budget_left = budget; 1512 int budget_left = budget;
1514 unsigned int flags = 0; 1513 unsigned int flags = 0;
1515 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1514 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1516 1515
1517 1516
1518 while (likely(budget_left && e->GenerationBit == q->genbit)) { 1517 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1519 flags |= e->Qsleeping; 1518 flags |= e->Qsleeping;
1520 1519
1521 cmdq_processed[0] += e->Cmdq0CreditReturn; 1520 cmdq_processed[0] += e->Cmdq0CreditReturn;
1522 cmdq_processed[1] += e->Cmdq1CreditReturn; 1521 cmdq_processed[1] += e->Cmdq1CreditReturn;
1523 1522
1524 /* We batch updates to the TX side to avoid cacheline 1523 /* We batch updates to the TX side to avoid cacheline
1525 * ping-pong of TX state information on MP where the sender 1524 * ping-pong of TX state information on MP where the sender
1526 * might run on a different CPU than this function... 1525 * might run on a different CPU than this function...
@@ -1569,7 +1568,7 @@ static int process_responses(struct adapter *adapter, int budget)
1569 --budget_left; 1568 --budget_left;
1570 } 1569 }
1571 1570
1572 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1571 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1573 sge->cmdQ[1].processed += cmdq_processed[1]; 1572 sge->cmdQ[1].processed += cmdq_processed[1];
1574 1573
1575 budget -= budget_left; 1574 budget -= budget_left;
@@ -1597,7 +1596,7 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1597 1596
1598 cmdq_processed[0] += e->Cmdq0CreditReturn; 1597 cmdq_processed[0] += e->Cmdq0CreditReturn;
1599 cmdq_processed[1] += e->Cmdq1CreditReturn; 1598 cmdq_processed[1] += e->Cmdq1CreditReturn;
1600 1599
1601 e++; 1600 e++;
1602 if (unlikely(++q->cidx == q->size)) { 1601 if (unlikely(++q->cidx == q->size)) {
1603 q->cidx = 0; 1602 q->cidx = 0;
@@ -1613,7 +1612,7 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1613 sge->stats.pure_rsps++; 1612 sge->stats.pure_rsps++;
1614 } while (e->GenerationBit == q->genbit && !e->DataValid); 1613 } while (e->GenerationBit == q->genbit && !e->DataValid);
1615 1614
1616 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1615 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1617 sge->cmdQ[1].processed += cmdq_processed[1]; 1616 sge->cmdQ[1].processed += cmdq_processed[1];
1618 1617
1619 return e->GenerationBit == q->genbit; 1618 return e->GenerationBit == q->genbit;
@@ -1636,12 +1635,12 @@ int t1_poll(struct net_device *dev, int *budget)
1636 if (work_done >= effective_budget) 1635 if (work_done >= effective_budget)
1637 return 1; 1636 return 1;
1638 1637
1639 spin_lock_irq(&adapter->async_lock); 1638 spin_lock_irq(&adapter->async_lock);
1640 __netif_rx_complete(dev); 1639 __netif_rx_complete(dev);
1641 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1640 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1642 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, 1641 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1643 adapter->regs + A_PL_ENABLE); 1642 adapter->regs + A_PL_ENABLE);
1644 spin_unlock_irq(&adapter->async_lock); 1643 spin_unlock_irq(&adapter->async_lock);
1645 1644
1646 return 0; 1645 return 0;
1647} 1646}
@@ -1652,9 +1651,9 @@ int t1_poll(struct net_device *dev, int *budget)
1652irqreturn_t t1_interrupt(int irq, void *data) 1651irqreturn_t t1_interrupt(int irq, void *data)
1653{ 1652{
1654 struct adapter *adapter = data; 1653 struct adapter *adapter = data;
1655 struct net_device *dev = adapter->sge->netdev; 1654 struct net_device *dev = adapter->sge->netdev;
1656 struct sge *sge = adapter->sge; 1655 struct sge *sge = adapter->sge;
1657 u32 cause; 1656 u32 cause;
1658 int handled = 0; 1657 int handled = 0;
1659 1658
1660 cause = readl(adapter->regs + A_PL_CAUSE); 1659 cause = readl(adapter->regs + A_PL_CAUSE);
@@ -1662,12 +1661,12 @@ irqreturn_t t1_interrupt(int irq, void *data)
1662 return IRQ_NONE; 1661 return IRQ_NONE;
1663 1662
1664 spin_lock(&adapter->async_lock); 1663 spin_lock(&adapter->async_lock);
1665 if (cause & F_PL_INTR_SGE_DATA) { 1664 if (cause & F_PL_INTR_SGE_DATA) {
1666 struct respQ *q = &adapter->sge->respQ; 1665 struct respQ *q = &adapter->sge->respQ;
1667 struct respQ_e *e = &q->entries[q->cidx]; 1666 struct respQ_e *e = &q->entries[q->cidx];
1668 1667
1669 handled = 1; 1668 handled = 1;
1670 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1669 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1671 1670
1672 if (e->GenerationBit == q->genbit && 1671 if (e->GenerationBit == q->genbit &&
1673 __netif_rx_schedule_prep(dev)) { 1672 __netif_rx_schedule_prep(dev)) {
@@ -1796,7 +1795,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1796 * through the scheduler. 1795 * through the scheduler.
1797 */ 1796 */
1798 if (sge->tx_sched && !qid && skb->dev) { 1797 if (sge->tx_sched && !qid && skb->dev) {
1799 use_sched: 1798use_sched:
1800 use_sched_skb = 1; 1799 use_sched_skb = 1;
1801 /* Note that the scheduler might return a different skb than 1800 /* Note that the scheduler might return a different skb than
1802 * the one passed in. 1801 * the one passed in.
@@ -1900,7 +1899,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1900 cpl = (struct cpl_tx_pkt *)hdr; 1899 cpl = (struct cpl_tx_pkt *)hdr;
1901 } else { 1900 } else {
1902 /* 1901 /*
1903 * Packets shorter than ETH_HLEN can break the MAC, drop them 1902 * Packets shorter than ETH_HLEN can break the MAC, drop them
1904 * early. Also, we may get oversized packets because some 1903 * early. Also, we may get oversized packets because some
1905 * parts of the kernel don't handle our unusual hard_header_len 1904 * parts of the kernel don't handle our unusual hard_header_len
1906 * right, drop those too. 1905 * right, drop those too.
@@ -1984,9 +1983,9 @@ send:
1984 * then silently discard to avoid leak. 1983 * then silently discard to avoid leak.
1985 */ 1984 */
1986 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1985 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1987 dev_kfree_skb_any(skb); 1986 dev_kfree_skb_any(skb);
1988 ret = NETDEV_TX_OK; 1987 ret = NETDEV_TX_OK;
1989 } 1988 }
1990 return ret; 1989 return ret;
1991} 1990}
1992 1991
@@ -2099,31 +2098,35 @@ static void espibug_workaround_t204(unsigned long data)
2099 2098
2100 if (adapter->open_device_map & PORT_MASK) { 2099 if (adapter->open_device_map & PORT_MASK) {
2101 int i; 2100 int i;
2102 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) { 2101
2102 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2103 return; 2103 return;
2104 } 2104
2105 for (i = 0; i < nports; i++) { 2105 for (i = 0; i < nports; i++) {
2106 struct sk_buff *skb = sge->espibug_skb[i]; 2106 struct sk_buff *skb = sge->espibug_skb[i];
2107 if ( (netif_running(adapter->port[i].dev)) && 2107
2108 !(netif_queue_stopped(adapter->port[i].dev)) && 2108 if (!netif_running(adapter->port[i].dev) ||
2109 (seop[i] && ((seop[i] & 0xfff) == 0)) && 2109 netif_queue_stopped(adapter->port[i].dev) ||
2110 skb ) { 2110 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2111 if (!skb->cb[0]) { 2111 continue;
2112 u8 ch_mac_addr[ETH_ALEN] = 2112
2113 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2113 if (!skb->cb[0]) {
2114 memcpy(skb->data + sizeof(struct cpl_tx_pkt), 2114 u8 ch_mac_addr[ETH_ALEN] = {
2115 ch_mac_addr, ETH_ALEN); 2115 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2116 memcpy(skb->data + skb->len - 10, 2116 };
2117 ch_mac_addr, ETH_ALEN); 2117
2118 skb->cb[0] = 0xff; 2118 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2119 } 2119 ch_mac_addr, ETH_ALEN);
2120 2120 memcpy(skb->data + skb->len - 10,
2121 /* bump the reference count to avoid freeing of 2121 ch_mac_addr, ETH_ALEN);
2122 * the skb once the DMA has completed. 2122 skb->cb[0] = 0xff;
2123 */
2124 skb = skb_get(skb);
2125 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2126 } 2123 }
2124
2125 /* bump the reference count to avoid freeing of
2126 * the skb once the DMA has completed.
2127 */
2128 skb = skb_get(skb);
2129 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2127 } 2130 }
2128 } 2131 }
2129 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2132 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
@@ -2202,7 +2205,7 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2202 if (adapter->params.nports > 1) 2205 if (adapter->params.nports > 1)
2203 sge->espibug_timeout = HZ/100; 2206 sge->espibug_timeout = HZ/100;
2204 } 2207 }
2205 2208
2206 2209
2207 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2210 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2208 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2211 p->cmdQ_size[1] = SGE_CMDQ1_E_N;