diff options
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r-- | drivers/net/cxgb3/adapter.h | 11 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_ioctl.h | 33 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_main.c | 69 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 390 | ||||
-rw-r--r-- | drivers/net/cxgb3/t3_hw.c | 6 | ||||
-rw-r--r-- | drivers/net/cxgb3/version.h | 2 |
6 files changed, 342 insertions, 169 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 5c97a64451ce..80c3d8f268a7 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -74,6 +74,11 @@ enum { /* adapter flags */ | |||
74 | struct rx_desc; | 74 | struct rx_desc; |
75 | struct rx_sw_desc; | 75 | struct rx_sw_desc; |
76 | 76 | ||
77 | struct sge_fl_page { | ||
78 | struct skb_frag_struct frag; | ||
79 | unsigned char *va; | ||
80 | }; | ||
81 | |||
77 | struct sge_fl { /* SGE per free-buffer list state */ | 82 | struct sge_fl { /* SGE per free-buffer list state */ |
78 | unsigned int buf_size; /* size of each Rx buffer */ | 83 | unsigned int buf_size; /* size of each Rx buffer */ |
79 | unsigned int credits; /* # of available Rx buffers */ | 84 | unsigned int credits; /* # of available Rx buffers */ |
@@ -81,11 +86,13 @@ struct sge_fl { /* SGE per free-buffer list state */ | |||
81 | unsigned int cidx; /* consumer index */ | 86 | unsigned int cidx; /* consumer index */ |
82 | unsigned int pidx; /* producer index */ | 87 | unsigned int pidx; /* producer index */ |
83 | unsigned int gen; /* free list generation */ | 88 | unsigned int gen; /* free list generation */ |
89 | unsigned int cntxt_id; /* SGE context id for the free list */ | ||
90 | struct sge_fl_page page; | ||
84 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ | 91 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ |
85 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | 92 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ |
86 | dma_addr_t phys_addr; /* physical address of HW ring start */ | 93 | dma_addr_t phys_addr; /* physical address of HW ring start */ |
87 | unsigned int cntxt_id; /* SGE context id for the free list */ | ||
88 | unsigned long empty; /* # of times queue ran out of buffers */ | 94 | unsigned long empty; /* # of times queue ran out of buffers */ |
95 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | ||
89 | }; | 96 | }; |
90 | 97 | ||
91 | /* | 98 | /* |
@@ -121,6 +128,8 @@ struct sge_rspq { /* state for an SGE response queue */ | |||
121 | unsigned long empty; /* # of times queue ran out of credits */ | 128 | unsigned long empty; /* # of times queue ran out of credits */ |
122 | unsigned long nomem; /* # of responses deferred due to no mem */ | 129 | unsigned long nomem; /* # of responses deferred due to no mem */ |
123 | unsigned long unhandled_irqs; /* # of spurious intrs */ | 130 | unsigned long unhandled_irqs; /* # of spurious intrs */ |
131 | unsigned long starved; | ||
132 | unsigned long restarted; | ||
124 | }; | 133 | }; |
125 | 134 | ||
126 | struct tx_desc; | 135 | struct tx_desc; |
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h index a94281861a66..0a82fcddf2d8 100644 --- a/drivers/net/cxgb3/cxgb3_ioctl.h +++ b/drivers/net/cxgb3/cxgb3_ioctl.h | |||
@@ -36,28 +36,17 @@ | |||
36 | * Ioctl commands specific to this driver. | 36 | * Ioctl commands specific to this driver. |
37 | */ | 37 | */ |
38 | enum { | 38 | enum { |
39 | CHELSIO_SETREG = 1024, | 39 | CHELSIO_GETMTUTAB = 1029, |
40 | CHELSIO_GETREG, | 40 | CHELSIO_SETMTUTAB = 1030, |
41 | CHELSIO_SETTPI, | 41 | CHELSIO_SET_PM = 1032, |
42 | CHELSIO_GETTPI, | 42 | CHELSIO_GET_PM = 1033, |
43 | CHELSIO_GETMTUTAB, | 43 | CHELSIO_GET_MEM = 1038, |
44 | CHELSIO_SETMTUTAB, | 44 | CHELSIO_LOAD_FW = 1041, |
45 | CHELSIO_GETMTU, | 45 | CHELSIO_SET_TRACE_FILTER = 1044, |
46 | CHELSIO_SET_PM, | 46 | CHELSIO_SET_QSET_PARAMS = 1045, |
47 | CHELSIO_GET_PM, | 47 | CHELSIO_GET_QSET_PARAMS = 1046, |
48 | CHELSIO_GET_TCAM, | 48 | CHELSIO_SET_QSET_NUM = 1047, |
49 | CHELSIO_SET_TCAM, | 49 | CHELSIO_GET_QSET_NUM = 1048, |
50 | CHELSIO_GET_TCB, | ||
51 | CHELSIO_GET_MEM, | ||
52 | CHELSIO_LOAD_FW, | ||
53 | CHELSIO_GET_PROTO, | ||
54 | CHELSIO_SET_PROTO, | ||
55 | CHELSIO_SET_TRACE_FILTER, | ||
56 | CHELSIO_SET_QSET_PARAMS, | ||
57 | CHELSIO_GET_QSET_PARAMS, | ||
58 | CHELSIO_SET_QSET_NUM, | ||
59 | CHELSIO_GET_QSET_NUM, | ||
60 | CHELSIO_SET_PKTSCHED, | ||
61 | }; | 50 | }; |
62 | 51 | ||
63 | struct ch_reg { | 52 | struct ch_reg { |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 43583ed655ab..7ff834e45d6b 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -434,27 +434,25 @@ static int setup_sge_qsets(struct adapter *adap) | |||
434 | 434 | ||
435 | static ssize_t attr_show(struct device *d, struct device_attribute *attr, | 435 | static ssize_t attr_show(struct device *d, struct device_attribute *attr, |
436 | char *buf, | 436 | char *buf, |
437 | ssize_t(*format) (struct adapter *, char *)) | 437 | ssize_t(*format) (struct net_device *, char *)) |
438 | { | 438 | { |
439 | ssize_t len; | 439 | ssize_t len; |
440 | struct adapter *adap = to_net_dev(d)->priv; | ||
441 | 440 | ||
442 | /* Synchronize with ioctls that may shut down the device */ | 441 | /* Synchronize with ioctls that may shut down the device */ |
443 | rtnl_lock(); | 442 | rtnl_lock(); |
444 | len = (*format) (adap, buf); | 443 | len = (*format) (to_net_dev(d), buf); |
445 | rtnl_unlock(); | 444 | rtnl_unlock(); |
446 | return len; | 445 | return len; |
447 | } | 446 | } |
448 | 447 | ||
449 | static ssize_t attr_store(struct device *d, struct device_attribute *attr, | 448 | static ssize_t attr_store(struct device *d, struct device_attribute *attr, |
450 | const char *buf, size_t len, | 449 | const char *buf, size_t len, |
451 | ssize_t(*set) (struct adapter *, unsigned int), | 450 | ssize_t(*set) (struct net_device *, unsigned int), |
452 | unsigned int min_val, unsigned int max_val) | 451 | unsigned int min_val, unsigned int max_val) |
453 | { | 452 | { |
454 | char *endp; | 453 | char *endp; |
455 | ssize_t ret; | 454 | ssize_t ret; |
456 | unsigned int val; | 455 | unsigned int val; |
457 | struct adapter *adap = to_net_dev(d)->priv; | ||
458 | 456 | ||
459 | if (!capable(CAP_NET_ADMIN)) | 457 | if (!capable(CAP_NET_ADMIN)) |
460 | return -EPERM; | 458 | return -EPERM; |
@@ -464,7 +462,7 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr, | |||
464 | return -EINVAL; | 462 | return -EINVAL; |
465 | 463 | ||
466 | rtnl_lock(); | 464 | rtnl_lock(); |
467 | ret = (*set) (adap, val); | 465 | ret = (*set) (to_net_dev(d), val); |
468 | if (!ret) | 466 | if (!ret) |
469 | ret = len; | 467 | ret = len; |
470 | rtnl_unlock(); | 468 | rtnl_unlock(); |
@@ -472,8 +470,9 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr, | |||
472 | } | 470 | } |
473 | 471 | ||
474 | #define CXGB3_SHOW(name, val_expr) \ | 472 | #define CXGB3_SHOW(name, val_expr) \ |
475 | static ssize_t format_##name(struct adapter *adap, char *buf) \ | 473 | static ssize_t format_##name(struct net_device *dev, char *buf) \ |
476 | { \ | 474 | { \ |
475 | struct adapter *adap = dev->priv; \ | ||
477 | return sprintf(buf, "%u\n", val_expr); \ | 476 | return sprintf(buf, "%u\n", val_expr); \ |
478 | } \ | 477 | } \ |
479 | static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ | 478 | static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ |
@@ -482,8 +481,10 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ | |||
482 | return attr_show(d, attr, buf, format_##name); \ | 481 | return attr_show(d, attr, buf, format_##name); \ |
483 | } | 482 | } |
484 | 483 | ||
485 | static ssize_t set_nfilters(struct adapter *adap, unsigned int val) | 484 | static ssize_t set_nfilters(struct net_device *dev, unsigned int val) |
486 | { | 485 | { |
486 | struct adapter *adap = dev->priv; | ||
487 | |||
487 | if (adap->flags & FULL_INIT_DONE) | 488 | if (adap->flags & FULL_INIT_DONE) |
488 | return -EBUSY; | 489 | return -EBUSY; |
489 | if (val && adap->params.rev == 0) | 490 | if (val && adap->params.rev == 0) |
@@ -500,8 +501,10 @@ static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, | |||
500 | return attr_store(d, attr, buf, len, set_nfilters, 0, ~0); | 501 | return attr_store(d, attr, buf, len, set_nfilters, 0, ~0); |
501 | } | 502 | } |
502 | 503 | ||
503 | static ssize_t set_nservers(struct adapter *adap, unsigned int val) | 504 | static ssize_t set_nservers(struct net_device *dev, unsigned int val) |
504 | { | 505 | { |
506 | struct adapter *adap = dev->priv; | ||
507 | |||
505 | if (adap->flags & FULL_INIT_DONE) | 508 | if (adap->flags & FULL_INIT_DONE) |
506 | return -EBUSY; | 509 | return -EBUSY; |
507 | if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters) | 510 | if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters) |
@@ -1549,32 +1552,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1549 | return -EFAULT; | 1552 | return -EFAULT; |
1550 | 1553 | ||
1551 | switch (cmd) { | 1554 | switch (cmd) { |
1552 | case CHELSIO_SETREG:{ | ||
1553 | struct ch_reg edata; | ||
1554 | |||
1555 | if (!capable(CAP_NET_ADMIN)) | ||
1556 | return -EPERM; | ||
1557 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | ||
1558 | return -EFAULT; | ||
1559 | if ((edata.addr & 3) != 0 | ||
1560 | || edata.addr >= adapter->mmio_len) | ||
1561 | return -EINVAL; | ||
1562 | writel(edata.val, adapter->regs + edata.addr); | ||
1563 | break; | ||
1564 | } | ||
1565 | case CHELSIO_GETREG:{ | ||
1566 | struct ch_reg edata; | ||
1567 | |||
1568 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | ||
1569 | return -EFAULT; | ||
1570 | if ((edata.addr & 3) != 0 | ||
1571 | || edata.addr >= adapter->mmio_len) | ||
1572 | return -EINVAL; | ||
1573 | edata.val = readl(adapter->regs + edata.addr); | ||
1574 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | ||
1575 | return -EFAULT; | ||
1576 | break; | ||
1577 | } | ||
1578 | case CHELSIO_SET_QSET_PARAMS:{ | 1555 | case CHELSIO_SET_QSET_PARAMS:{ |
1579 | int i; | 1556 | int i; |
1580 | struct qset_params *q; | 1557 | struct qset_params *q; |
@@ -1838,10 +1815,10 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1838 | return -EINVAL; | 1815 | return -EINVAL; |
1839 | 1816 | ||
1840 | /* | 1817 | /* |
1841 | * Version scheme: | 1818 | * Version scheme: |
1842 | * bits 0..9: chip version | 1819 | * bits 0..9: chip version |
1843 | * bits 10..15: chip revision | 1820 | * bits 10..15: chip revision |
1844 | */ | 1821 | */ |
1845 | t.version = 3 | (adapter->params.rev << 10); | 1822 | t.version = 3 | (adapter->params.rev << 10); |
1846 | if (copy_to_user(useraddr, &t, sizeof(t))) | 1823 | if (copy_to_user(useraddr, &t, sizeof(t))) |
1847 | return -EFAULT; | 1824 | return -EFAULT; |
@@ -1890,20 +1867,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1890 | t.trace_rx); | 1867 | t.trace_rx); |
1891 | break; | 1868 | break; |
1892 | } | 1869 | } |
1893 | case CHELSIO_SET_PKTSCHED:{ | ||
1894 | struct ch_pktsched_params p; | ||
1895 | |||
1896 | if (!capable(CAP_NET_ADMIN)) | ||
1897 | return -EPERM; | ||
1898 | if (!adapter->open_device_map) | ||
1899 | return -EAGAIN; /* uP and SGE must be running */ | ||
1900 | if (copy_from_user(&p, useraddr, sizeof(p))) | ||
1901 | return -EFAULT; | ||
1902 | send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max, | ||
1903 | p.binding); | ||
1904 | break; | ||
1905 | |||
1906 | } | ||
1907 | default: | 1870 | default: |
1908 | return -EOPNOTSUPP; | 1871 | return -EOPNOTSUPP; |
1909 | } | 1872 | } |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 3f2cf8a07c61..c23783432e51 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -45,9 +45,25 @@ | |||
45 | #define USE_GTS 0 | 45 | #define USE_GTS 0 |
46 | 46 | ||
47 | #define SGE_RX_SM_BUF_SIZE 1536 | 47 | #define SGE_RX_SM_BUF_SIZE 1536 |
48 | |||
49 | /* | ||
50 | * If USE_RX_PAGE is defined, the small freelist populated with (partial) | ||
51 | * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must | ||
52 | * be a multiple of the host page size). | ||
53 | */ | ||
54 | #define USE_RX_PAGE | ||
55 | #define RX_PAGE_SIZE 2048 | ||
56 | |||
57 | /* | ||
58 | * skb freelist packets are copied into a new skb (and the freelist one is | ||
59 | * reused) if their len is <= | ||
60 | */ | ||
48 | #define SGE_RX_COPY_THRES 256 | 61 | #define SGE_RX_COPY_THRES 256 |
49 | 62 | ||
50 | # define SGE_RX_DROP_THRES 16 | 63 | /* |
64 | * Minimum number of freelist entries before we start dropping TUNNEL frames. | ||
65 | */ | ||
66 | #define SGE_RX_DROP_THRES 16 | ||
51 | 67 | ||
52 | /* | 68 | /* |
53 | * Period of the Tx buffer reclaim timer. This timer does not need to run | 69 | * Period of the Tx buffer reclaim timer. This timer does not need to run |
@@ -85,7 +101,10 @@ struct tx_sw_desc { /* SW state per Tx descriptor */ | |||
85 | }; | 101 | }; |
86 | 102 | ||
87 | struct rx_sw_desc { /* SW state per Rx descriptor */ | 103 | struct rx_sw_desc { /* SW state per Rx descriptor */ |
88 | struct sk_buff *skb; | 104 | union { |
105 | struct sk_buff *skb; | ||
106 | struct sge_fl_page page; | ||
107 | } t; | ||
89 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | 108 | DECLARE_PCI_UNMAP_ADDR(dma_addr); |
90 | }; | 109 | }; |
91 | 110 | ||
@@ -105,6 +124,15 @@ struct unmap_info { /* packet unmapping info, overlays skb->cb */ | |||
105 | }; | 124 | }; |
106 | 125 | ||
107 | /* | 126 | /* |
127 | * Holds unmapping information for Tx packets that need deferred unmapping. | ||
128 | * This structure lives at skb->head and must be allocated by callers. | ||
129 | */ | ||
130 | struct deferred_unmap_info { | ||
131 | struct pci_dev *pdev; | ||
132 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
133 | }; | ||
134 | |||
135 | /* | ||
108 | * Maps a number of flits to the number of Tx descriptors that can hold them. | 136 | * Maps a number of flits to the number of Tx descriptors that can hold them. |
109 | * The formula is | 137 | * The formula is |
110 | * | 138 | * |
@@ -252,10 +280,13 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, | |||
252 | struct pci_dev *pdev = adapter->pdev; | 280 | struct pci_dev *pdev = adapter->pdev; |
253 | unsigned int cidx = q->cidx; | 281 | unsigned int cidx = q->cidx; |
254 | 282 | ||
283 | const int need_unmap = need_skb_unmap() && | ||
284 | q->cntxt_id >= FW_TUNNEL_SGEEC_START; | ||
285 | |||
255 | d = &q->sdesc[cidx]; | 286 | d = &q->sdesc[cidx]; |
256 | while (n--) { | 287 | while (n--) { |
257 | if (d->skb) { /* an SGL is present */ | 288 | if (d->skb) { /* an SGL is present */ |
258 | if (need_skb_unmap()) | 289 | if (need_unmap) |
259 | unmap_skb(d->skb, q, cidx, pdev); | 290 | unmap_skb(d->skb, q, cidx, pdev); |
260 | if (d->skb->priority == cidx) | 291 | if (d->skb->priority == cidx) |
261 | kfree_skb(d->skb); | 292 | kfree_skb(d->skb); |
@@ -320,16 +351,27 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |||
320 | 351 | ||
321 | pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), | 352 | pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), |
322 | q->buf_size, PCI_DMA_FROMDEVICE); | 353 | q->buf_size, PCI_DMA_FROMDEVICE); |
323 | kfree_skb(d->skb); | 354 | |
324 | d->skb = NULL; | 355 | if (q->buf_size != RX_PAGE_SIZE) { |
356 | kfree_skb(d->t.skb); | ||
357 | d->t.skb = NULL; | ||
358 | } else { | ||
359 | if (d->t.page.frag.page) | ||
360 | put_page(d->t.page.frag.page); | ||
361 | d->t.page.frag.page = NULL; | ||
362 | } | ||
325 | if (++cidx == q->size) | 363 | if (++cidx == q->size) |
326 | cidx = 0; | 364 | cidx = 0; |
327 | } | 365 | } |
366 | |||
367 | if (q->page.frag.page) | ||
368 | put_page(q->page.frag.page); | ||
369 | q->page.frag.page = NULL; | ||
328 | } | 370 | } |
329 | 371 | ||
330 | /** | 372 | /** |
331 | * add_one_rx_buf - add a packet buffer to a free-buffer list | 373 | * add_one_rx_buf - add a packet buffer to a free-buffer list |
332 | * @skb: the buffer to add | 374 | * @va: va of the buffer to add |
333 | * @len: the buffer length | 375 | * @len: the buffer length |
334 | * @d: the HW Rx descriptor to write | 376 | * @d: the HW Rx descriptor to write |
335 | * @sd: the SW Rx descriptor to write | 377 | * @sd: the SW Rx descriptor to write |
@@ -339,14 +381,13 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |||
339 | * Add a buffer of the given length to the supplied HW and SW Rx | 381 | * Add a buffer of the given length to the supplied HW and SW Rx |
340 | * descriptors. | 382 | * descriptors. |
341 | */ | 383 | */ |
342 | static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len, | 384 | static inline void add_one_rx_buf(unsigned char *va, unsigned int len, |
343 | struct rx_desc *d, struct rx_sw_desc *sd, | 385 | struct rx_desc *d, struct rx_sw_desc *sd, |
344 | unsigned int gen, struct pci_dev *pdev) | 386 | unsigned int gen, struct pci_dev *pdev) |
345 | { | 387 | { |
346 | dma_addr_t mapping; | 388 | dma_addr_t mapping; |
347 | 389 | ||
348 | sd->skb = skb; | 390 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
349 | mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE); | ||
350 | pci_unmap_addr_set(sd, dma_addr, mapping); | 391 | pci_unmap_addr_set(sd, dma_addr, mapping); |
351 | 392 | ||
352 | d->addr_lo = cpu_to_be32(mapping); | 393 | d->addr_lo = cpu_to_be32(mapping); |
@@ -371,14 +412,47 @@ static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) | |||
371 | { | 412 | { |
372 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | 413 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; |
373 | struct rx_desc *d = &q->desc[q->pidx]; | 414 | struct rx_desc *d = &q->desc[q->pidx]; |
415 | struct sge_fl_page *p = &q->page; | ||
374 | 416 | ||
375 | while (n--) { | 417 | while (n--) { |
376 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); | 418 | unsigned char *va; |
377 | 419 | ||
378 | if (!skb) | 420 | if (unlikely(q->buf_size != RX_PAGE_SIZE)) { |
379 | break; | 421 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); |
422 | |||
423 | if (!skb) { | ||
424 | q->alloc_failed++; | ||
425 | break; | ||
426 | } | ||
427 | va = skb->data; | ||
428 | sd->t.skb = skb; | ||
429 | } else { | ||
430 | if (!p->frag.page) { | ||
431 | p->frag.page = alloc_pages(gfp, 0); | ||
432 | if (unlikely(!p->frag.page)) { | ||
433 | q->alloc_failed++; | ||
434 | break; | ||
435 | } else { | ||
436 | p->frag.size = RX_PAGE_SIZE; | ||
437 | p->frag.page_offset = 0; | ||
438 | p->va = page_address(p->frag.page); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | memcpy(&sd->t, p, sizeof(*p)); | ||
443 | va = p->va; | ||
444 | |||
445 | p->frag.page_offset += RX_PAGE_SIZE; | ||
446 | BUG_ON(p->frag.page_offset > PAGE_SIZE); | ||
447 | p->va += RX_PAGE_SIZE; | ||
448 | if (p->frag.page_offset == PAGE_SIZE) | ||
449 | p->frag.page = NULL; | ||
450 | else | ||
451 | get_page(p->frag.page); | ||
452 | } | ||
453 | |||
454 | add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev); | ||
380 | 455 | ||
381 | add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev); | ||
382 | d++; | 456 | d++; |
383 | sd++; | 457 | sd++; |
384 | if (++q->pidx == q->size) { | 458 | if (++q->pidx == q->size) { |
@@ -413,7 +487,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, | |||
413 | struct rx_desc *from = &q->desc[idx]; | 487 | struct rx_desc *from = &q->desc[idx]; |
414 | struct rx_desc *to = &q->desc[q->pidx]; | 488 | struct rx_desc *to = &q->desc[q->pidx]; |
415 | 489 | ||
416 | q->sdesc[q->pidx] = q->sdesc[idx]; | 490 | memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc)); |
417 | to->addr_lo = from->addr_lo; /* already big endian */ | 491 | to->addr_lo = from->addr_lo; /* already big endian */ |
418 | to->addr_hi = from->addr_hi; /* likewise */ | 492 | to->addr_hi = from->addr_hi; /* likewise */ |
419 | wmb(); | 493 | wmb(); |
@@ -446,7 +520,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, | |||
446 | * of the SW ring. | 520 | * of the SW ring. |
447 | */ | 521 | */ |
448 | static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, | 522 | static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, |
449 | size_t sw_size, dma_addr_t *phys, void *metadata) | 523 | size_t sw_size, dma_addr_t * phys, void *metadata) |
450 | { | 524 | { |
451 | size_t len = nelem * elem_size; | 525 | size_t len = nelem * elem_size; |
452 | void *s = NULL; | 526 | void *s = NULL; |
@@ -576,61 +650,6 @@ static inline unsigned int flits_to_desc(unsigned int n) | |||
576 | } | 650 | } |
577 | 651 | ||
578 | /** | 652 | /** |
579 | * get_packet - return the next ingress packet buffer from a free list | ||
580 | * @adap: the adapter that received the packet | ||
581 | * @fl: the SGE free list holding the packet | ||
582 | * @len: the packet length including any SGE padding | ||
583 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
584 | * | ||
585 | * Get the next packet from a free list and complete setup of the | ||
586 | * sk_buff. If the packet is small we make a copy and recycle the | ||
587 | * original buffer, otherwise we use the original buffer itself. If a | ||
588 | * positive drop threshold is supplied packets are dropped and their | ||
589 | * buffers recycled if (a) the number of remaining buffers is under the | ||
590 | * threshold and the packet is too big to copy, or (b) the packet should | ||
591 | * be copied but there is no memory for the copy. | ||
592 | */ | ||
593 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | ||
594 | unsigned int len, unsigned int drop_thres) | ||
595 | { | ||
596 | struct sk_buff *skb = NULL; | ||
597 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
598 | |||
599 | prefetch(sd->skb->data); | ||
600 | |||
601 | if (len <= SGE_RX_COPY_THRES) { | ||
602 | skb = alloc_skb(len, GFP_ATOMIC); | ||
603 | if (likely(skb != NULL)) { | ||
604 | __skb_put(skb, len); | ||
605 | pci_dma_sync_single_for_cpu(adap->pdev, | ||
606 | pci_unmap_addr(sd, | ||
607 | dma_addr), | ||
608 | len, PCI_DMA_FROMDEVICE); | ||
609 | memcpy(skb->data, sd->skb->data, len); | ||
610 | pci_dma_sync_single_for_device(adap->pdev, | ||
611 | pci_unmap_addr(sd, | ||
612 | dma_addr), | ||
613 | len, PCI_DMA_FROMDEVICE); | ||
614 | } else if (!drop_thres) | ||
615 | goto use_orig_buf; | ||
616 | recycle: | ||
617 | recycle_rx_buf(adap, fl, fl->cidx); | ||
618 | return skb; | ||
619 | } | ||
620 | |||
621 | if (unlikely(fl->credits < drop_thres)) | ||
622 | goto recycle; | ||
623 | |||
624 | use_orig_buf: | ||
625 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
626 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
627 | skb = sd->skb; | ||
628 | skb_put(skb, len); | ||
629 | __refill_fl(adap, fl); | ||
630 | return skb; | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * get_imm_packet - return the next ingress packet buffer from a response | 653 | * get_imm_packet - return the next ingress packet buffer from a response |
635 | * @resp: the response descriptor containing the packet data | 654 | * @resp: the response descriptor containing the packet data |
636 | * | 655 | * |
@@ -1227,6 +1246,50 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | |||
1227 | } | 1246 | } |
1228 | 1247 | ||
1229 | /** | 1248 | /** |
1249 | * deferred_unmap_destructor - unmap a packet when it is freed | ||
1250 | * @skb: the packet | ||
1251 | * | ||
1252 | * This is the packet destructor used for Tx packets that need to remain | ||
1253 | * mapped until they are freed rather than until their Tx descriptors are | ||
1254 | * freed. | ||
1255 | */ | ||
1256 | static void deferred_unmap_destructor(struct sk_buff *skb) | ||
1257 | { | ||
1258 | int i; | ||
1259 | const dma_addr_t *p; | ||
1260 | const struct skb_shared_info *si; | ||
1261 | const struct deferred_unmap_info *dui; | ||
1262 | const struct unmap_info *ui = (struct unmap_info *)skb->cb; | ||
1263 | |||
1264 | dui = (struct deferred_unmap_info *)skb->head; | ||
1265 | p = dui->addr; | ||
1266 | |||
1267 | if (ui->len) | ||
1268 | pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE); | ||
1269 | |||
1270 | si = skb_shinfo(skb); | ||
1271 | for (i = 0; i < si->nr_frags; i++) | ||
1272 | pci_unmap_page(dui->pdev, *p++, si->frags[i].size, | ||
1273 | PCI_DMA_TODEVICE); | ||
1274 | } | ||
1275 | |||
1276 | static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | ||
1277 | const struct sg_ent *sgl, int sgl_flits) | ||
1278 | { | ||
1279 | dma_addr_t *p; | ||
1280 | struct deferred_unmap_info *dui; | ||
1281 | |||
1282 | dui = (struct deferred_unmap_info *)skb->head; | ||
1283 | dui->pdev = pdev; | ||
1284 | for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { | ||
1285 | *p++ = be64_to_cpu(sgl->addr[0]); | ||
1286 | *p++ = be64_to_cpu(sgl->addr[1]); | ||
1287 | } | ||
1288 | if (sgl_flits) | ||
1289 | *p = be64_to_cpu(sgl->addr[0]); | ||
1290 | } | ||
1291 | |||
1292 | /** | ||
1230 | * write_ofld_wr - write an offload work request | 1293 | * write_ofld_wr - write an offload work request |
1231 | * @adap: the adapter | 1294 | * @adap: the adapter |
1232 | * @skb: the packet to send | 1295 | * @skb: the packet to send |
@@ -1262,8 +1325,11 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
1262 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1325 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1263 | sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, | 1326 | sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, |
1264 | adap->pdev); | 1327 | adap->pdev); |
1265 | if (need_skb_unmap()) | 1328 | if (need_skb_unmap()) { |
1329 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | ||
1330 | skb->destructor = deferred_unmap_destructor; | ||
1266 | ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; | 1331 | ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; |
1332 | } | ||
1267 | 1333 | ||
1268 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, | 1334 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, |
1269 | gen, from->wr_hi, from->wr_lo); | 1335 | gen, from->wr_hi, from->wr_lo); |
@@ -1617,7 +1683,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1617 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); | 1683 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); |
1618 | struct port_info *pi; | 1684 | struct port_info *pi; |
1619 | 1685 | ||
1620 | rq->eth_pkts++; | ||
1621 | skb_pull(skb, sizeof(*p) + pad); | 1686 | skb_pull(skb, sizeof(*p) + pad); |
1622 | skb->dev = adap->port[p->iff]; | 1687 | skb->dev = adap->port[p->iff]; |
1623 | skb->dev->last_rx = jiffies; | 1688 | skb->dev->last_rx = jiffies; |
@@ -1645,6 +1710,85 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1645 | netif_rx(skb); | 1710 | netif_rx(skb); |
1646 | } | 1711 | } |
1647 | 1712 | ||
1713 | #define SKB_DATA_SIZE 128 | ||
1714 | |||
1715 | static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p, | ||
1716 | unsigned int len) | ||
1717 | { | ||
1718 | skb->len = len; | ||
1719 | if (len <= SKB_DATA_SIZE) { | ||
1720 | memcpy(skb->data, p->va, len); | ||
1721 | skb->tail += len; | ||
1722 | put_page(p->frag.page); | ||
1723 | } else { | ||
1724 | memcpy(skb->data, p->va, SKB_DATA_SIZE); | ||
1725 | skb_shinfo(skb)->frags[0].page = p->frag.page; | ||
1726 | skb_shinfo(skb)->frags[0].page_offset = | ||
1727 | p->frag.page_offset + SKB_DATA_SIZE; | ||
1728 | skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE; | ||
1729 | skb_shinfo(skb)->nr_frags = 1; | ||
1730 | skb->data_len = len - SKB_DATA_SIZE; | ||
1731 | skb->tail += SKB_DATA_SIZE; | ||
1732 | skb->truesize += skb->data_len; | ||
1733 | } | ||
1734 | } | ||
1735 | |||
1736 | /** | ||
1737 | * get_packet - return the next ingress packet buffer from a free list | ||
1738 | * @adap: the adapter that received the packet | ||
1739 | * @fl: the SGE free list holding the packet | ||
1740 | * @len: the packet length including any SGE padding | ||
1741 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
1742 | * | ||
1743 | * Get the next packet from a free list and complete setup of the | ||
1744 | * sk_buff. If the packet is small we make a copy and recycle the | ||
1745 | * original buffer, otherwise we use the original buffer itself. If a | ||
1746 | * positive drop threshold is supplied packets are dropped and their | ||
1747 | * buffers recycled if (a) the number of remaining buffers is under the | ||
1748 | * threshold and the packet is too big to copy, or (b) the packet should | ||
1749 | * be copied but there is no memory for the copy. | ||
1750 | */ | ||
1751 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | ||
1752 | unsigned int len, unsigned int drop_thres) | ||
1753 | { | ||
1754 | struct sk_buff *skb = NULL; | ||
1755 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
1756 | |||
1757 | prefetch(sd->t.skb->data); | ||
1758 | |||
1759 | if (len <= SGE_RX_COPY_THRES) { | ||
1760 | skb = alloc_skb(len, GFP_ATOMIC); | ||
1761 | if (likely(skb != NULL)) { | ||
1762 | struct rx_desc *d = &fl->desc[fl->cidx]; | ||
1763 | dma_addr_t mapping = | ||
1764 | (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 | | ||
1765 | be32_to_cpu(d->addr_lo)); | ||
1766 | |||
1767 | __skb_put(skb, len); | ||
1768 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, | ||
1769 | PCI_DMA_FROMDEVICE); | ||
1770 | memcpy(skb->data, sd->t.skb->data, len); | ||
1771 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, | ||
1772 | PCI_DMA_FROMDEVICE); | ||
1773 | } else if (!drop_thres) | ||
1774 | goto use_orig_buf; | ||
1775 | recycle: | ||
1776 | recycle_rx_buf(adap, fl, fl->cidx); | ||
1777 | return skb; | ||
1778 | } | ||
1779 | |||
1780 | if (unlikely(fl->credits < drop_thres)) | ||
1781 | goto recycle; | ||
1782 | |||
1783 | use_orig_buf: | ||
1784 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
1785 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
1786 | skb = sd->t.skb; | ||
1787 | skb_put(skb, len); | ||
1788 | __refill_fl(adap, fl); | ||
1789 | return skb; | ||
1790 | } | ||
1791 | |||
1648 | /** | 1792 | /** |
1649 | * handle_rsp_cntrl_info - handles control information in a response | 1793 | * handle_rsp_cntrl_info - handles control information in a response |
1650 | * @qs: the queue set corresponding to the response | 1794 | * @qs: the queue set corresponding to the response |
@@ -1767,7 +1911,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1767 | q->next_holdoff = q->holdoff_tmr; | 1911 | q->next_holdoff = q->holdoff_tmr; |
1768 | 1912 | ||
1769 | while (likely(budget_left && is_new_response(r, q))) { | 1913 | while (likely(budget_left && is_new_response(r, q))) { |
1770 | int eth, ethpad = 0; | 1914 | int eth, ethpad = 2; |
1771 | struct sk_buff *skb = NULL; | 1915 | struct sk_buff *skb = NULL; |
1772 | u32 len, flags = ntohl(r->flags); | 1916 | u32 len, flags = ntohl(r->flags); |
1773 | u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val; | 1917 | u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val; |
@@ -1794,18 +1938,56 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1794 | break; | 1938 | break; |
1795 | } | 1939 | } |
1796 | q->imm_data++; | 1940 | q->imm_data++; |
1941 | ethpad = 0; | ||
1797 | } else if ((len = ntohl(r->len_cq)) != 0) { | 1942 | } else if ((len = ntohl(r->len_cq)) != 0) { |
1798 | struct sge_fl *fl; | 1943 | struct sge_fl *fl = |
1944 | (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; | ||
1945 | |||
1946 | if (fl->buf_size == RX_PAGE_SIZE) { | ||
1947 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
1948 | struct sge_fl_page *p = &sd->t.page; | ||
1949 | |||
1950 | prefetch(p->va); | ||
1951 | prefetch(p->va + L1_CACHE_BYTES); | ||
1952 | |||
1953 | __refill_fl(adap, fl); | ||
1954 | |||
1955 | pci_unmap_single(adap->pdev, | ||
1956 | pci_unmap_addr(sd, dma_addr), | ||
1957 | fl->buf_size, | ||
1958 | PCI_DMA_FROMDEVICE); | ||
1959 | |||
1960 | if (eth) { | ||
1961 | if (unlikely(fl->credits < | ||
1962 | SGE_RX_DROP_THRES)) | ||
1963 | goto eth_recycle; | ||
1964 | |||
1965 | skb = alloc_skb(SKB_DATA_SIZE, | ||
1966 | GFP_ATOMIC); | ||
1967 | if (unlikely(!skb)) { | ||
1968 | eth_recycle: | ||
1969 | q->rx_drops++; | ||
1970 | recycle_rx_buf(adap, fl, | ||
1971 | fl->cidx); | ||
1972 | goto eth_done; | ||
1973 | } | ||
1974 | } else { | ||
1975 | skb = alloc_skb(SKB_DATA_SIZE, | ||
1976 | GFP_ATOMIC); | ||
1977 | if (unlikely(!skb)) | ||
1978 | goto no_mem; | ||
1979 | } | ||
1980 | |||
1981 | skb_data_init(skb, p, G_RSPD_LEN(len)); | ||
1982 | eth_done: | ||
1983 | fl->credits--; | ||
1984 | q->eth_pkts++; | ||
1985 | } else { | ||
1986 | fl->credits--; | ||
1987 | skb = get_packet(adap, fl, G_RSPD_LEN(len), | ||
1988 | eth ? SGE_RX_DROP_THRES : 0); | ||
1989 | } | ||
1799 | 1990 | ||
1800 | fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; | ||
1801 | fl->credits--; | ||
1802 | skb = get_packet(adap, fl, G_RSPD_LEN(len), | ||
1803 | eth ? SGE_RX_DROP_THRES : 0); | ||
1804 | if (!skb) | ||
1805 | q->rx_drops++; | ||
1806 | else if (r->rss_hdr.opcode == CPL_TRACE_PKT) | ||
1807 | __skb_pull(skb, 2); | ||
1808 | ethpad = 2; | ||
1809 | if (++fl->cidx == fl->size) | 1991 | if (++fl->cidx == fl->size) |
1810 | fl->cidx = 0; | 1992 | fl->cidx = 0; |
1811 | } else | 1993 | } else |
@@ -1829,18 +2011,23 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1829 | q->credits = 0; | 2011 | q->credits = 0; |
1830 | } | 2012 | } |
1831 | 2013 | ||
1832 | if (likely(skb != NULL)) { | 2014 | if (skb) { |
2015 | /* Preserve the RSS info in csum & priority */ | ||
2016 | skb->csum = rss_hi; | ||
2017 | skb->priority = rss_lo; | ||
2018 | |||
1833 | if (eth) | 2019 | if (eth) |
1834 | rx_eth(adap, q, skb, ethpad); | 2020 | rx_eth(adap, q, skb, ethpad); |
1835 | else { | 2021 | else { |
1836 | /* Preserve the RSS info in csum & priority */ | 2022 | if (unlikely(r->rss_hdr.opcode == |
1837 | skb->csum = rss_hi; | 2023 | CPL_TRACE_PKT)) |
1838 | skb->priority = rss_lo; | 2024 | __skb_pull(skb, ethpad); |
1839 | ngathered = rx_offload(&adap->tdev, q, skb, | 2025 | |
1840 | offload_skbs, ngathered); | 2026 | ngathered = rx_offload(&adap->tdev, q, |
2027 | skb, offload_skbs, | ||
2028 | ngathered); | ||
1841 | } | 2029 | } |
1842 | } | 2030 | } |
1843 | |||
1844 | --budget_left; | 2031 | --budget_left; |
1845 | } | 2032 | } |
1846 | 2033 | ||
@@ -2320,10 +2507,23 @@ static void sge_timer_cb(unsigned long data) | |||
2320 | &adap->sge.qs[0].rspq.lock; | 2507 | &adap->sge.qs[0].rspq.lock; |
2321 | if (spin_trylock_irq(lock)) { | 2508 | if (spin_trylock_irq(lock)) { |
2322 | if (!napi_is_scheduled(qs->netdev)) { | 2509 | if (!napi_is_scheduled(qs->netdev)) { |
2510 | u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); | ||
2511 | |||
2323 | if (qs->fl[0].credits < qs->fl[0].size) | 2512 | if (qs->fl[0].credits < qs->fl[0].size) |
2324 | __refill_fl(adap, &qs->fl[0]); | 2513 | __refill_fl(adap, &qs->fl[0]); |
2325 | if (qs->fl[1].credits < qs->fl[1].size) | 2514 | if (qs->fl[1].credits < qs->fl[1].size) |
2326 | __refill_fl(adap, &qs->fl[1]); | 2515 | __refill_fl(adap, &qs->fl[1]); |
2516 | |||
2517 | if (status & (1 << qs->rspq.cntxt_id)) { | ||
2518 | qs->rspq.starved++; | ||
2519 | if (qs->rspq.credits) { | ||
2520 | refill_rspq(adap, &qs->rspq, 1); | ||
2521 | qs->rspq.credits--; | ||
2522 | qs->rspq.restarted++; | ||
2523 | t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, | ||
2524 | 1 << qs->rspq.cntxt_id); | ||
2525 | } | ||
2526 | } | ||
2327 | } | 2527 | } |
2328 | spin_unlock_irq(lock); | 2528 | spin_unlock_irq(lock); |
2329 | } | 2529 | } |
@@ -2432,13 +2632,21 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2432 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); | 2632 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); |
2433 | 2633 | ||
2434 | if (ntxq == 1) { | 2634 | if (ntxq == 1) { |
2635 | #ifdef USE_RX_PAGE | ||
2636 | q->fl[0].buf_size = RX_PAGE_SIZE; | ||
2637 | #else | ||
2435 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + | 2638 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + |
2436 | sizeof(struct cpl_rx_pkt); | 2639 | sizeof(struct cpl_rx_pkt); |
2640 | #endif | ||
2437 | q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + | 2641 | q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + |
2438 | sizeof(struct cpl_rx_pkt); | 2642 | sizeof(struct cpl_rx_pkt); |
2439 | } else { | 2643 | } else { |
2644 | #ifdef USE_RX_PAGE | ||
2645 | q->fl[0].buf_size = RX_PAGE_SIZE; | ||
2646 | #else | ||
2440 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + | 2647 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + |
2441 | sizeof(struct cpl_rx_data); | 2648 | sizeof(struct cpl_rx_data); |
2649 | #endif | ||
2442 | q->fl[1].buf_size = (16 * 1024) - | 2650 | q->fl[1].buf_size = (16 * 1024) - |
2443 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 2651 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
2444 | } | 2652 | } |
@@ -2632,7 +2840,7 @@ void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p) | |||
2632 | q->polling = adap->params.rev > 0; | 2840 | q->polling = adap->params.rev > 0; |
2633 | q->coalesce_usecs = 5; | 2841 | q->coalesce_usecs = 5; |
2634 | q->rspq_size = 1024; | 2842 | q->rspq_size = 1024; |
2635 | q->fl_size = 4096; | 2843 | q->fl_size = 1024; |
2636 | q->jumbo_size = 512; | 2844 | q->jumbo_size = 512; |
2637 | q->txq_size[TXQ_ETH] = 1024; | 2845 | q->txq_size[TXQ_ETH] = 1024; |
2638 | q->txq_size[TXQ_OFLD] = 1024; | 2846 | q->txq_size[TXQ_OFLD] = 1024; |
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 365a7f5b1f94..eaa7a2e89a30 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c | |||
@@ -884,11 +884,13 @@ int t3_check_fw_version(struct adapter *adapter) | |||
884 | major = G_FW_VERSION_MAJOR(vers); | 884 | major = G_FW_VERSION_MAJOR(vers); |
885 | minor = G_FW_VERSION_MINOR(vers); | 885 | minor = G_FW_VERSION_MINOR(vers); |
886 | 886 | ||
887 | if (type == FW_VERSION_T3 && major == 3 && minor == 1) | 887 | if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR && |
888 | minor == FW_VERSION_MINOR) | ||
888 | return 0; | 889 | return 0; |
889 | 890 | ||
890 | CH_ERR(adapter, "found wrong FW version(%u.%u), " | 891 | CH_ERR(adapter, "found wrong FW version(%u.%u), " |
891 | "driver needs version 3.1\n", major, minor); | 892 | "driver needs version %u.%u\n", major, minor, |
893 | FW_VERSION_MAJOR, FW_VERSION_MINOR); | ||
892 | return -EINVAL; | 894 | return -EINVAL; |
893 | } | 895 | } |
894 | 896 | ||
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h index 2b67dd523cc1..782a6cf158a5 100644 --- a/drivers/net/cxgb3/version.h +++ b/drivers/net/cxgb3/version.h | |||
@@ -36,4 +36,6 @@ | |||
36 | #define DRV_NAME "cxgb3" | 36 | #define DRV_NAME "cxgb3" |
37 | /* Driver version */ | 37 | /* Driver version */ |
38 | #define DRV_VERSION "1.0" | 38 | #define DRV_VERSION "1.0" |
39 | #define FW_VERSION_MAJOR 3 | ||
40 | #define FW_VERSION_MINOR 2 | ||
39 | #endif /* __CHELSIO_VERSION_H */ | 41 | #endif /* __CHELSIO_VERSION_H */ |