aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h4
-rw-r--r--drivers/net/cxgb3/common.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/cxgb3/sge.c164
-rw-r--r--drivers/net/cxgb3/t3_hw.c80
5 files changed, 197 insertions, 67 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 71eaa431371d..714df2b675e6 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,6 +85,8 @@ struct fl_pg_chunk {
85 struct page *page; 85 struct page *page;
86 void *va; 86 void *va;
87 unsigned int offset; 87 unsigned int offset;
88 u64 *p_cnt;
89 DECLARE_PCI_UNMAP_ADDR(mapping);
88}; 90};
89 91
90struct rx_desc; 92struct rx_desc;
@@ -101,6 +103,7 @@ struct sge_fl { /* SGE per free-buffer list state */
101 struct fl_pg_chunk pg_chunk;/* page chunk cache */ 103 struct fl_pg_chunk pg_chunk;/* page chunk cache */
102 unsigned int use_pages; /* whether FL uses pages or sk_buffs */ 104 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
103 unsigned int order; /* order of page allocations */ 105 unsigned int order; /* order of page allocations */
106 unsigned int alloc_size; /* size of allocated buffer */
104 struct rx_desc *desc; /* address of HW Rx descriptor ring */ 107 struct rx_desc *desc; /* address of HW Rx descriptor ring */
105 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 108 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
106 dma_addr_t phys_addr; /* physical address of HW ring start */ 109 dma_addr_t phys_addr; /* physical address of HW ring start */
@@ -291,6 +294,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
291 294
292void t3_sge_start(struct adapter *adap); 295void t3_sge_start(struct adapter *adap);
293void t3_sge_stop(struct adapter *adap); 296void t3_sge_stop(struct adapter *adap);
297void t3_start_sge_timers(struct adapter *adap);
294void t3_stop_sge_timers(struct adapter *adap); 298void t3_stop_sge_timers(struct adapter *adap);
295void t3_free_sge_resources(struct adapter *adap); 299void t3_free_sge_resources(struct adapter *adap);
296void t3_sge_err_intr_handler(struct adapter *adapter); 300void t3_sge_err_intr_handler(struct adapter *adapter);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 9ee021e750c8..e508dc32f3ec 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -191,7 +191,8 @@ struct mdio_ops {
191}; 191};
192 192
193struct adapter_info { 193struct adapter_info {
194 unsigned char nports; /* # of ports */ 194 unsigned char nports0; /* # of ports on channel 0 */
195 unsigned char nports1; /* # of ports on channel 1 */
195 unsigned char phy_base_addr; /* MDIO PHY base address */ 196 unsigned char phy_base_addr; /* MDIO PHY base address */
196 unsigned int gpio_out; /* GPIO output settings */ 197 unsigned int gpio_out; /* GPIO output settings */
197 unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ 198 unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
@@ -422,6 +423,7 @@ struct adapter_params {
422 unsigned short b_wnd[NCCTRL_WIN]; 423 unsigned short b_wnd[NCCTRL_WIN];
423 424
424 unsigned int nports; /* # of ethernet ports */ 425 unsigned int nports; /* # of ethernet ports */
426 unsigned int chan_map; /* bitmap of in-use Tx channels */
425 unsigned int stats_update_period; /* MAC stats accumulation period */ 427 unsigned int stats_update_period; /* MAC stats accumulation period */
426 unsigned int linkpoll_period; /* link poll period in 0.1s */ 428 unsigned int linkpoll_period; /* link poll period in 0.1s */
427 unsigned int rev; /* chip revision */ 429 unsigned int rev; /* chip revision */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8be89621bf7..2c2aaa741450 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -602,7 +602,6 @@ static int setup_sge_qsets(struct adapter *adap)
602 &adap->params.sge.qset[qset_idx], ntxq, dev, 602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j)); 603 netdev_get_tx_queue(dev, j));
604 if (err) { 604 if (err) {
605 t3_stop_sge_timers(adap);
606 t3_free_sge_resources(adap); 605 t3_free_sge_resources(adap);
607 return err; 606 return err;
608 } 607 }
@@ -1046,6 +1045,8 @@ static int cxgb_up(struct adapter *adap)
1046 setup_rss(adap); 1045 setup_rss(adap);
1047 if (!(adap->flags & NAPI_INIT)) 1046 if (!(adap->flags & NAPI_INIT))
1048 init_napi(adap); 1047 init_napi(adap);
1048
1049 t3_start_sge_timers(adap);
1049 adap->flags |= FULL_INIT_DONE; 1050 adap->flags |= FULL_INIT_DONE;
1050 } 1051 }
1051 1052
@@ -2870,6 +2871,9 @@ static void t3_io_resume(struct pci_dev *pdev)
2870{ 2871{
2871 struct adapter *adapter = pci_get_drvdata(pdev); 2872 struct adapter *adapter = pci_get_drvdata(pdev);
2872 2873
2874 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2875 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2876
2873 t3_resume_ports(adapter); 2877 t3_resume_ports(adapter);
2874} 2878}
2875 2879
@@ -3002,7 +3006,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3002 static int version_printed; 3006 static int version_printed;
3003 3007
3004 int i, err, pci_using_dac = 0; 3008 int i, err, pci_using_dac = 0;
3005 unsigned long mmio_start, mmio_len; 3009 resource_size_t mmio_start, mmio_len;
3006 const struct adapter_info *ai; 3010 const struct adapter_info *ai;
3007 struct adapter *adapter = NULL; 3011 struct adapter *adapter = NULL;
3008 struct port_info *pi; 3012 struct port_info *pi;
@@ -3082,7 +3086,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3082 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3086 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3083 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3087 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3084 3088
3085 for (i = 0; i < ai->nports; ++i) { 3089 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3086 struct net_device *netdev; 3090 struct net_device *netdev;
3087 3091
3088 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); 3092 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
@@ -3172,7 +3176,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3172 3176
3173out_free_dev: 3177out_free_dev:
3174 iounmap(adapter->regs); 3178 iounmap(adapter->regs);
3175 for (i = ai->nports - 1; i >= 0; --i) 3179 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3176 if (adapter->port[i]) 3180 if (adapter->port[i])
3177 free_netdev(adapter->port[i]); 3181 free_netdev(adapter->port[i]);
3178 3182
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a7555cb3fa4a..26d3587f3399 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -50,6 +50,7 @@
50#define SGE_RX_COPY_THRES 256 50#define SGE_RX_COPY_THRES 256
51#define SGE_RX_PULL_LEN 128 51#define SGE_RX_PULL_LEN 128
52 52
53#define SGE_PG_RSVD SMP_CACHE_BYTES
53/* 54/*
54 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. 55 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
55 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs 56 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
@@ -57,8 +58,10 @@
57 */ 58 */
58#define FL0_PG_CHUNK_SIZE 2048 59#define FL0_PG_CHUNK_SIZE 2048
59#define FL0_PG_ORDER 0 60#define FL0_PG_ORDER 0
61#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
60#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) 62#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
61#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) 63#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
64#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
62 65
63#define SGE_RX_DROP_THRES 16 66#define SGE_RX_DROP_THRES 16
64#define RX_RECLAIM_PERIOD (HZ/4) 67#define RX_RECLAIM_PERIOD (HZ/4)
@@ -345,13 +348,21 @@ static inline int should_restart_tx(const struct sge_txq *q)
345 return q->in_use - r < (q->size >> 1); 348 return q->in_use - r < (q->size >> 1);
346} 349}
347 350
348static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d) 351static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
352 struct rx_sw_desc *d)
349{ 353{
350 if (q->use_pages) { 354 if (q->use_pages && d->pg_chunk.page) {
351 if (d->pg_chunk.page) 355 (*d->pg_chunk.p_cnt)--;
352 put_page(d->pg_chunk.page); 356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev,
358 pci_unmap_addr(&d->pg_chunk, mapping),
359 q->alloc_size, PCI_DMA_FROMDEVICE);
360
361 put_page(d->pg_chunk.page);
353 d->pg_chunk.page = NULL; 362 d->pg_chunk.page = NULL;
354 } else { 363 } else {
364 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
365 q->buf_size, PCI_DMA_FROMDEVICE);
355 kfree_skb(d->skb); 366 kfree_skb(d->skb);
356 d->skb = NULL; 367 d->skb = NULL;
357 } 368 }
@@ -372,9 +383,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
372 while (q->credits--) { 383 while (q->credits--) {
373 struct rx_sw_desc *d = &q->sdesc[cidx]; 384 struct rx_sw_desc *d = &q->sdesc[cidx];
374 385
375 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 386
376 q->buf_size, PCI_DMA_FROMDEVICE); 387 clear_rx_desc(pdev, q, d);
377 clear_rx_desc(q, d);
378 if (++cidx == q->size) 388 if (++cidx == q->size)
379 cidx = 0; 389 cidx = 0;
380 } 390 }
@@ -417,18 +427,39 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
417 return 0; 427 return 0;
418} 428}
419 429
420static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, 430static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
431 unsigned int gen)
432{
433 d->addr_lo = cpu_to_be32(mapping);
434 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
435 wmb();
436 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438 return 0;
439}
440
441static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 struct rx_sw_desc *sd, gfp_t gfp,
421 unsigned int order) 443 unsigned int order)
422{ 444{
423 if (!q->pg_chunk.page) { 445 if (!q->pg_chunk.page) {
446 dma_addr_t mapping;
447
424 q->pg_chunk.page = alloc_pages(gfp, order); 448 q->pg_chunk.page = alloc_pages(gfp, order);
425 if (unlikely(!q->pg_chunk.page)) 449 if (unlikely(!q->pg_chunk.page))
426 return -ENOMEM; 450 return -ENOMEM;
427 q->pg_chunk.va = page_address(q->pg_chunk.page); 451 q->pg_chunk.va = page_address(q->pg_chunk.page);
452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
453 SGE_PG_RSVD;
428 q->pg_chunk.offset = 0; 454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
429 } 458 }
430 sd->pg_chunk = q->pg_chunk; 459 sd->pg_chunk = q->pg_chunk;
431 460
461 prefetch(sd->pg_chunk.p_cnt);
462
432 q->pg_chunk.offset += q->buf_size; 463 q->pg_chunk.offset += q->buf_size;
433 if (q->pg_chunk.offset == (PAGE_SIZE << order)) 464 if (q->pg_chunk.offset == (PAGE_SIZE << order))
434 q->pg_chunk.page = NULL; 465 q->pg_chunk.page = NULL;
@@ -436,6 +467,12 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
436 q->pg_chunk.va += q->buf_size; 467 q->pg_chunk.va += q->buf_size;
437 get_page(q->pg_chunk.page); 468 get_page(q->pg_chunk.page);
438 } 469 }
470
471 if (sd->pg_chunk.offset == 0)
472 *sd->pg_chunk.p_cnt = 1;
473 else
474 *sd->pg_chunk.p_cnt += 1;
475
439 return 0; 476 return 0;
440} 477}
441 478
@@ -460,35 +497,43 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
460 */ 497 */
461static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 498static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
462{ 499{
463 void *buf_start;
464 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 500 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
465 struct rx_desc *d = &q->desc[q->pidx]; 501 struct rx_desc *d = &q->desc[q->pidx];
466 unsigned int count = 0; 502 unsigned int count = 0;
467 503
468 while (n--) { 504 while (n--) {
505 dma_addr_t mapping;
469 int err; 506 int err;
470 507
471 if (q->use_pages) { 508 if (q->use_pages) {
472 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) { 509 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
510 q->order))) {
473nomem: q->alloc_failed++; 511nomem: q->alloc_failed++;
474 break; 512 break;
475 } 513 }
476 buf_start = sd->pg_chunk.va; 514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
515 sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping);
517
518 add_one_rx_chunk(mapping, d, q->gen);
519 pci_dma_sync_single_for_device(adap->pdev, mapping,
520 q->buf_size - SGE_PG_RSVD,
521 PCI_DMA_FROMDEVICE);
477 } else { 522 } else {
478 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); 523 void *buf_start;
479 524
525 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
480 if (!skb) 526 if (!skb)
481 goto nomem; 527 goto nomem;
482 528
483 sd->skb = skb; 529 sd->skb = skb;
484 buf_start = skb->data; 530 buf_start = skb->data;
485 } 531 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
486 532 q->gen, adap->pdev);
487 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, 533 if (unlikely(err)) {
488 adap->pdev); 534 clear_rx_desc(adap->pdev, q, sd);
489 if (unlikely(err)) { 535 break;
490 clear_rx_desc(q, sd); 536 }
491 break;
492 } 537 }
493 538
494 d++; 539 d++;
@@ -795,19 +840,19 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
795 struct sk_buff *newskb, *skb; 840 struct sk_buff *newskb, *skb;
796 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 841 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
797 842
798 newskb = skb = q->pg_skb; 843 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
799 844
845 newskb = skb = q->pg_skb;
800 if (!skb && (len <= SGE_RX_COPY_THRES)) { 846 if (!skb && (len <= SGE_RX_COPY_THRES)) {
801 newskb = alloc_skb(len, GFP_ATOMIC); 847 newskb = alloc_skb(len, GFP_ATOMIC);
802 if (likely(newskb != NULL)) { 848 if (likely(newskb != NULL)) {
803 __skb_put(newskb, len); 849 __skb_put(newskb, len);
804 pci_dma_sync_single_for_cpu(adap->pdev, 850 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
805 pci_unmap_addr(sd, dma_addr), len,
806 PCI_DMA_FROMDEVICE); 851 PCI_DMA_FROMDEVICE);
807 memcpy(newskb->data, sd->pg_chunk.va, len); 852 memcpy(newskb->data, sd->pg_chunk.va, len);
808 pci_dma_sync_single_for_device(adap->pdev, 853 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
809 pci_unmap_addr(sd, dma_addr), len, 854 len,
810 PCI_DMA_FROMDEVICE); 855 PCI_DMA_FROMDEVICE);
811 } else if (!drop_thres) 856 } else if (!drop_thres)
812 return NULL; 857 return NULL;
813recycle: 858recycle:
@@ -820,16 +865,25 @@ recycle:
820 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) 865 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
821 goto recycle; 866 goto recycle;
822 867
868 prefetch(sd->pg_chunk.p_cnt);
869
823 if (!skb) 870 if (!skb)
824 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); 871 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
872
825 if (unlikely(!newskb)) { 873 if (unlikely(!newskb)) {
826 if (!drop_thres) 874 if (!drop_thres)
827 return NULL; 875 return NULL;
828 goto recycle; 876 goto recycle;
829 } 877 }
830 878
831 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 879 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
832 fl->buf_size, PCI_DMA_FROMDEVICE); 880 PCI_DMA_FROMDEVICE);
881 (*sd->pg_chunk.p_cnt)--;
882 if (!*sd->pg_chunk.p_cnt)
883 pci_unmap_page(adap->pdev,
884 pci_unmap_addr(&sd->pg_chunk, mapping),
885 fl->alloc_size,
886 PCI_DMA_FROMDEVICE);
833 if (!skb) { 887 if (!skb) {
834 __skb_put(newskb, SGE_RX_PULL_LEN); 888 __skb_put(newskb, SGE_RX_PULL_LEN);
835 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); 889 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
@@ -1089,7 +1143,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1089 struct tx_desc *d = &q->desc[pidx]; 1143 struct tx_desc *d = &q->desc[pidx];
1090 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; 1144 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1091 1145
1092 cpl->len = htonl(skb->len | 0x80000000); 1146 cpl->len = htonl(skb->len);
1093 cntrl = V_TXPKT_INTF(pi->port_id); 1147 cntrl = V_TXPKT_INTF(pi->port_id);
1094 1148
1095 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1149 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
@@ -1958,8 +2012,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1958 skb_pull(skb, sizeof(*p) + pad); 2012 skb_pull(skb, sizeof(*p) + pad);
1959 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 2013 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1960 pi = netdev_priv(skb->dev); 2014 pi = netdev_priv(skb->dev);
1961 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) && 2015 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
1962 !p->fragment) { 2016 p->csum == htons(0xffff) && !p->fragment) {
1963 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2017 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1964 skb->ip_summed = CHECKSUM_UNNECESSARY; 2018 skb->ip_summed = CHECKSUM_UNNECESSARY;
1965 } else 2019 } else
@@ -2034,10 +2088,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2034 fl->credits--; 2088 fl->credits--;
2035 2089
2036 len -= offset; 2090 len -= offset;
2037 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 2091 pci_dma_sync_single_for_cpu(adap->pdev,
2038 fl->buf_size, PCI_DMA_FROMDEVICE); 2092 pci_unmap_addr(sd, dma_addr),
2093 fl->buf_size - SGE_PG_RSVD,
2094 PCI_DMA_FROMDEVICE);
2039 2095
2040 prefetch(&qs->lro_frag_tbl); 2096 (*sd->pg_chunk.p_cnt)--;
2097 if (!*sd->pg_chunk.p_cnt)
2098 pci_unmap_page(adap->pdev,
2099 pci_unmap_addr(&sd->pg_chunk, mapping),
2100 fl->alloc_size,
2101 PCI_DMA_FROMDEVICE);
2102
2103 prefetch(qs->lro_va);
2041 2104
2042 rx_frag += nr_frags; 2105 rx_frag += nr_frags;
2043 rx_frag->page = sd->pg_chunk.page; 2106 rx_frag->page = sd->pg_chunk.page;
@@ -2047,6 +2110,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2047 qs->lro_frag_tbl.nr_frags++; 2110 qs->lro_frag_tbl.nr_frags++;
2048 qs->lro_frag_tbl.len = frag_len; 2111 qs->lro_frag_tbl.len = frag_len;
2049 2112
2113
2050 if (!complete) 2114 if (!complete)
2051 return; 2115 return;
2052 2116
@@ -2236,6 +2300,8 @@ no_mem:
2236 if (fl->use_pages) { 2300 if (fl->use_pages) {
2237 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2301 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2238 2302
2303 prefetch(&qs->lro_frag_tbl);
2304
2239 prefetch(addr); 2305 prefetch(addr);
2240#if L1_CACHE_BYTES < 128 2306#if L1_CACHE_BYTES < 128
2241 prefetch(addr + L1_CACHE_BYTES); 2307 prefetch(addr + L1_CACHE_BYTES);
@@ -2972,21 +3038,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2972 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; 3038 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2973 q->fl[0].order = FL0_PG_ORDER; 3039 q->fl[0].order = FL0_PG_ORDER;
2974 q->fl[1].order = FL1_PG_ORDER; 3040 q->fl[1].order = FL1_PG_ORDER;
3041 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3042 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
2975 3043
2976 spin_lock_irq(&adapter->sge.reg_lock); 3044 spin_lock_irq(&adapter->sge.reg_lock);
2977 3045
2978 /* FL threshold comparison uses < */ 3046 /* FL threshold comparison uses < */
2979 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, 3047 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2980 q->rspq.phys_addr, q->rspq.size, 3048 q->rspq.phys_addr, q->rspq.size,
2981 q->fl[0].buf_size, 1, 0); 3049 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
2982 if (ret) 3050 if (ret)
2983 goto err_unlock; 3051 goto err_unlock;
2984 3052
2985 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 3053 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2986 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, 3054 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2987 q->fl[i].phys_addr, q->fl[i].size, 3055 q->fl[i].phys_addr, q->fl[i].size,
2988 q->fl[i].buf_size, p->cong_thres, 1, 3056 q->fl[i].buf_size - SGE_PG_RSVD,
2989 0); 3057 p->cong_thres, 1, 0);
2990 if (ret) 3058 if (ret)
2991 goto err_unlock; 3059 goto err_unlock;
2992 } 3060 }
@@ -3044,9 +3112,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3044 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 3112 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3045 V_NEWTIMER(q->rspq.holdoff_tmr)); 3113 V_NEWTIMER(q->rspq.holdoff_tmr));
3046 3114
3047 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3048 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3049
3050 return 0; 3115 return 0;
3051 3116
3052err_unlock: 3117err_unlock:
@@ -3057,6 +3122,27 @@ err:
3057} 3122}
3058 3123
3059/** 3124/**
3125 * t3_start_sge_timers - start SGE timer call backs
3126 * @adap: the adapter
3127 *
3128 * Starts each SGE queue set's timer call back
3129 */
3130void t3_start_sge_timers(struct adapter *adap)
3131{
3132 int i;
3133
3134 for (i = 0; i < SGE_QSETS; ++i) {
3135 struct sge_qset *q = &adap->sge.qs[i];
3136
3137 if (q->tx_reclaim_timer.function)
3138 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3139
3140 if (q->rx_reclaim_timer.function)
3141 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3142 }
3143}
3144
3145/**
3060 * t3_stop_sge_timers - stop SGE timer call backs 3146 * t3_stop_sge_timers - stop SGE timer call backs
3061 * @adap: the adapter 3147 * @adap: the adapter
3062 * 3148 *
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ff262a04ded0..31ed31a3428b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -493,20 +493,20 @@ int t3_phy_lasi_intr_handler(struct cphy *phy)
493} 493}
494 494
495static const struct adapter_info t3_adap_info[] = { 495static const struct adapter_info t3_adap_info[] = {
496 {2, 0, 496 {1, 1, 0,
497 F_GPIO2_OEN | F_GPIO4_OEN | 497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio PE9000"}, 499 &mi1_mdio_ops, "Chelsio PE9000"},
500 {2, 0, 500 {1, 1, 0,
501 F_GPIO2_OEN | F_GPIO4_OEN | 501 F_GPIO2_OEN | F_GPIO4_OEN |
502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 &mi1_mdio_ops, "Chelsio T302"}, 503 &mi1_mdio_ops, "Chelsio T302"},
504 {1, 0, 504 {1, 0, 0,
505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | 505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 &mi1_mdio_ext_ops, "Chelsio T310"}, 508 &mi1_mdio_ext_ops, "Chelsio T310"},
509 {2, 0, 509 {1, 1, 0,
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | 510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | 511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
@@ -514,7 +514,7 @@ static const struct adapter_info t3_adap_info[] = {
514 &mi1_mdio_ext_ops, "Chelsio T320"}, 514 &mi1_mdio_ext_ops, "Chelsio T320"},
515 {}, 515 {},
516 {}, 516 {},
517 {1, 0, 517 {1, 0, 0,
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | 518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
@@ -2128,16 +2128,40 @@ void t3_port_intr_clear(struct adapter *adapter, int idx)
2128static int t3_sge_write_context(struct adapter *adapter, unsigned int id, 2128static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2129 unsigned int type) 2129 unsigned int type)
2130{ 2130{
2131 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); 2131 if (type == F_RESPONSEQ) {
2132 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); 2132 /*
2133 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); 2133 * Can't write the Response Queue Context bits for
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); 2134 * Interrupt Armed or the Reserve bits after the chip
2135 * has been initialized out of reset. Writing to these
2136 * bits can confuse the hardware.
2137 */
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2139 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2140 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2141 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2142 } else {
2143 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2144 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2145 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2146 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2147 }
2135 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2148 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2136 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); 2149 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2137 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2150 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2138 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2151 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2139} 2152}
2140 2153
2154/**
2155 * clear_sge_ctxt - completely clear an SGE context
2156 * @adapter: the adapter
2157 * @id: the context id
2158 * @type: the context type
2159 *
2160 * Completely clear an SGE context. Used predominantly at post-reset
2161 * initialization. Note in particular that we don't skip writing to any
2162 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2163 * does ...
2164 */
2141static int clear_sge_ctxt(struct adapter *adap, unsigned int id, 2165static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2142 unsigned int type) 2166 unsigned int type)
2143{ 2167{
@@ -2145,7 +2169,14 @@ static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2145 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); 2169 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2146 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); 2170 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2147 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); 2171 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2148 return t3_sge_write_context(adap, id, type); 2172 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2173 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2174 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2175 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2176 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2177 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2178 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2179 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2149} 2180}
2150 2181
2151/** 2182/**
@@ -2729,10 +2760,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2729 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); 2760 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2730 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | 2761 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2731 F_MTUENABLE | V_WINDOWSCALEMODE(1) | 2762 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2732 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1)); 2763 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2733 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | 2764 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2734 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | 2765 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2735 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | 2766 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2736 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); 2767 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2737 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, 2768 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2738 F_IPV6ENABLE | F_NICMODE); 2769 F_IPV6ENABLE | F_NICMODE);
@@ -3196,20 +3227,22 @@ int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3196} 3227}
3197 3228
3198/* 3229/*
3199 * Perform the bits of HW initialization that are dependent on the number 3230 * Perform the bits of HW initialization that are dependent on the Tx
3200 * of available ports. 3231 * channels being used.
3201 */ 3232 */
3202static void init_hw_for_avail_ports(struct adapter *adap, int nports) 3233static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3203{ 3234{
3204 int i; 3235 int i;
3205 3236
3206 if (nports == 1) { 3237 if (chan_map != 3) { /* one channel */
3207 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); 3238 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3208 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); 3239 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3209 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN | 3240 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3210 F_PORT0ACTIVE | F_ENFORCEPKT); 3241 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3211 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff); 3242 F_TPTXPORT1EN | F_PORT1ACTIVE));
3212 } else { 3243 t3_write_reg(adap, A_PM1_TX_CFG,
3244 chan_map == 1 ? 0xffffffff : 0);
3245 } else { /* two channels */
3213 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); 3246 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3214 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); 3247 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3215 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, 3248 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
@@ -3517,7 +3550,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3517 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); 3550 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3518 t3_write_reg(adapter, A_PM1_RX_MODE, 0); 3551 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3519 t3_write_reg(adapter, A_PM1_TX_MODE, 0); 3552 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3520 init_hw_for_avail_ports(adapter, adapter->params.nports); 3553 chan_init_hw(adapter, adapter->params.chan_map);
3521 t3_sge_init(adapter, &adapter->params.sge); 3554 t3_sge_init(adapter, &adapter->params.sge);
3522 3555
3523 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); 3556 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
@@ -3754,7 +3787,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3754 get_pci_mode(adapter, &adapter->params.pci); 3787 get_pci_mode(adapter, &adapter->params.pci);
3755 3788
3756 adapter->params.info = ai; 3789 adapter->params.info = ai;
3757 adapter->params.nports = ai->nports; 3790 adapter->params.nports = ai->nports0 + ai->nports1;
3791 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3758 adapter->params.rev = t3_read_reg(adapter, A_PL_REV); 3792 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3759 /* 3793 /*
3760 * We used to only run the "adapter check task" once a second if 3794 * We used to only run the "adapter check task" once a second if
@@ -3785,7 +3819,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3785 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); 3819 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3786 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); 3820 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3787 3821
3788 p->nchan = ai->nports; 3822 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3789 p->pmrx_size = t3_mc7_size(&adapter->pmrx); 3823 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3790 p->pmtx_size = t3_mc7_size(&adapter->pmtx); 3824 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3791 p->cm_size = t3_mc7_size(&adapter->cm); 3825 p->cm_size = t3_mc7_size(&adapter->cm);