diff options
author | Divy Le Ray <divy@chelsio.com> | 2008-10-08 20:37:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-08 20:37:33 -0400 |
commit | 8c26376112fb4b8dfea42069b602c03d53366052 (patch) | |
tree | 4dacc0445b7603263f0e5afa4ce91430a83f8418 /drivers/net/cxgb3 | |
parent | 20d3fc11505a2706a33b4c9a932af036d836727f (diff) |
cxgb3: Allocate multiqueues at init time
Allocate a queue set per core, up to the maximum of available qsets.
Share the queue sets on multi port adapters.
Rename MSI-X interrupt vectors ethX-N, N being the queue set number.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r-- | drivers/net/cxgb3/common.h | 1 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_ioctl.h | 2 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_main.c | 179 |
3 files changed, 145 insertions, 37 deletions
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index d6dbcd403a7d..ace6b58bae8e 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h | |||
@@ -358,6 +358,7 @@ struct qset_params { /* SGE queue set parameters */ | |||
358 | unsigned int jumbo_size; /* # of entries in jumbo free list */ | 358 | unsigned int jumbo_size; /* # of entries in jumbo free list */ |
359 | unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */ | 359 | unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */ |
360 | unsigned int cong_thres; /* FL congestion threshold */ | 360 | unsigned int cong_thres; /* FL congestion threshold */ |
361 | unsigned int vector; /* Interrupt (line or vector) number */ | ||
361 | }; | 362 | }; |
362 | 363 | ||
363 | struct sge_params { | 364 | struct sge_params { |
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h index 68200a14065e..3e8d5faec3a4 100644 --- a/drivers/net/cxgb3/cxgb3_ioctl.h +++ b/drivers/net/cxgb3/cxgb3_ioctl.h | |||
@@ -92,6 +92,8 @@ struct ch_qset_params { | |||
92 | int32_t polling; | 92 | int32_t polling; |
93 | int32_t lro; | 93 | int32_t lro; |
94 | int32_t cong_thres; | 94 | int32_t cong_thres; |
95 | int32_t vector; | ||
96 | int32_t qnum; | ||
95 | }; | 97 | }; |
96 | 98 | ||
97 | struct ch_pktsched_params { | 99 | struct ch_pktsched_params { |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 0e51d49842fa..fea2c719c8f7 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -274,10 +274,10 @@ static void name_msix_vecs(struct adapter *adap) | |||
274 | 274 | ||
275 | for (i = 0; i < pi->nqsets; i++, msi_idx++) { | 275 | for (i = 0; i < pi->nqsets; i++, msi_idx++) { |
276 | snprintf(adap->msix_info[msi_idx].desc, n, | 276 | snprintf(adap->msix_info[msi_idx].desc, n, |
277 | "%s (queue %d)", d->name, i); | 277 | "%s-%d", d->name, pi->first_qset + i); |
278 | adap->msix_info[msi_idx].desc[n] = 0; | 278 | adap->msix_info[msi_idx].desc[n] = 0; |
279 | } | 279 | } |
280 | } | 280 | } |
281 | } | 281 | } |
282 | 282 | ||
283 | static int request_msix_data_irqs(struct adapter *adap) | 283 | static int request_msix_data_irqs(struct adapter *adap) |
@@ -306,6 +306,22 @@ static int request_msix_data_irqs(struct adapter *adap) | |||
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | static void free_irq_resources(struct adapter *adapter) | ||
310 | { | ||
311 | if (adapter->flags & USING_MSIX) { | ||
312 | int i, n = 0; | ||
313 | |||
314 | free_irq(adapter->msix_info[0].vec, adapter); | ||
315 | for_each_port(adapter, i) | ||
316 | n += adap2pinfo(adapter, i)->nqsets; | ||
317 | |||
318 | for (i = 0; i < n; ++i) | ||
319 | free_irq(adapter->msix_info[i + 1].vec, | ||
320 | &adapter->sge.qs[i]); | ||
321 | } else | ||
322 | free_irq(adapter->pdev->irq, adapter); | ||
323 | } | ||
324 | |||
309 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, | 325 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, |
310 | unsigned long n) | 326 | unsigned long n) |
311 | { | 327 | { |
@@ -473,7 +489,10 @@ static int setup_sge_qsets(struct adapter *adap) | |||
473 | struct port_info *pi = netdev_priv(dev); | 489 | struct port_info *pi = netdev_priv(dev); |
474 | 490 | ||
475 | pi->qs = &adap->sge.qs[pi->first_qset]; | 491 | pi->qs = &adap->sge.qs[pi->first_qset]; |
476 | for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { | 492 | for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; |
493 | ++j, ++qset_idx) { | ||
494 | if (!pi->rx_csum_offload) | ||
495 | adap->params.sge.qset[qset_idx].lro = 0; | ||
477 | err = t3_sge_alloc_qset(adap, qset_idx, 1, | 496 | err = t3_sge_alloc_qset(adap, qset_idx, 1, |
478 | (adap->flags & USING_MSIX) ? qset_idx + 1 : | 497 | (adap->flags & USING_MSIX) ? qset_idx + 1 : |
479 | irq_idx, | 498 | irq_idx, |
@@ -740,11 +759,12 @@ static void init_port_mtus(struct adapter *adapter) | |||
740 | t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); | 759 | t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); |
741 | } | 760 | } |
742 | 761 | ||
743 | static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, | 762 | static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, |
744 | int hi, int port) | 763 | int hi, int port) |
745 | { | 764 | { |
746 | struct sk_buff *skb; | 765 | struct sk_buff *skb; |
747 | struct mngt_pktsched_wr *req; | 766 | struct mngt_pktsched_wr *req; |
767 | int ret; | ||
748 | 768 | ||
749 | skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); | 769 | skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); |
750 | req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); | 770 | req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); |
@@ -755,20 +775,28 @@ static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, | |||
755 | req->min = lo; | 775 | req->min = lo; |
756 | req->max = hi; | 776 | req->max = hi; |
757 | req->binding = port; | 777 | req->binding = port; |
758 | t3_mgmt_tx(adap, skb); | 778 | ret = t3_mgmt_tx(adap, skb); |
779 | |||
780 | return ret; | ||
759 | } | 781 | } |
760 | 782 | ||
761 | static void bind_qsets(struct adapter *adap) | 783 | static int bind_qsets(struct adapter *adap) |
762 | { | 784 | { |
763 | int i, j; | 785 | int i, j, err = 0; |
764 | 786 | ||
765 | for_each_port(adap, i) { | 787 | for_each_port(adap, i) { |
766 | const struct port_info *pi = adap2pinfo(adap, i); | 788 | const struct port_info *pi = adap2pinfo(adap, i); |
767 | 789 | ||
768 | for (j = 0; j < pi->nqsets; ++j) | 790 | for (j = 0; j < pi->nqsets; ++j) { |
769 | send_pktsched_cmd(adap, 1, pi->first_qset + j, -1, | 791 | int ret = send_pktsched_cmd(adap, 1, |
770 | -1, i); | 792 | pi->first_qset + j, -1, |
793 | -1, i); | ||
794 | if (ret) | ||
795 | err = ret; | ||
796 | } | ||
771 | } | 797 | } |
798 | |||
799 | return err; | ||
772 | } | 800 | } |
773 | 801 | ||
774 | #define FW_FNAME "t3fw-%d.%d.%d.bin" | 802 | #define FW_FNAME "t3fw-%d.%d.%d.bin" |
@@ -954,9 +982,16 @@ static int cxgb_up(struct adapter *adap) | |||
954 | t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); | 982 | t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); |
955 | } | 983 | } |
956 | 984 | ||
957 | if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX) | 985 | if (!(adap->flags & QUEUES_BOUND)) { |
958 | bind_qsets(adap); | 986 | err = bind_qsets(adap); |
959 | adap->flags |= QUEUES_BOUND; | 987 | if (err) { |
988 | CH_ERR(adap, "failed to bind qsets, err %d\n", err); | ||
989 | t3_intr_disable(adap); | ||
990 | free_irq_resources(adap); | ||
991 | goto out; | ||
992 | } | ||
993 | adap->flags |= QUEUES_BOUND; | ||
994 | } | ||
960 | 995 | ||
961 | out: | 996 | out: |
962 | return err; | 997 | return err; |
@@ -975,19 +1010,7 @@ static void cxgb_down(struct adapter *adapter) | |||
975 | t3_intr_disable(adapter); | 1010 | t3_intr_disable(adapter); |
976 | spin_unlock_irq(&adapter->work_lock); | 1011 | spin_unlock_irq(&adapter->work_lock); |
977 | 1012 | ||
978 | if (adapter->flags & USING_MSIX) { | 1013 | free_irq_resources(adapter); |
979 | int i, n = 0; | ||
980 | |||
981 | free_irq(adapter->msix_info[0].vec, adapter); | ||
982 | for_each_port(adapter, i) | ||
983 | n += adap2pinfo(adapter, i)->nqsets; | ||
984 | |||
985 | for (i = 0; i < n; ++i) | ||
986 | free_irq(adapter->msix_info[i + 1].vec, | ||
987 | &adapter->sge.qs[i]); | ||
988 | } else | ||
989 | free_irq(adapter->pdev->irq, adapter); | ||
990 | |||
991 | flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ | 1014 | flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ |
992 | quiesce_rx(adapter); | 1015 | quiesce_rx(adapter); |
993 | } | 1016 | } |
@@ -1292,8 +1315,8 @@ static unsigned long collect_sge_port_stats(struct adapter *adapter, | |||
1292 | int i; | 1315 | int i; |
1293 | unsigned long tot = 0; | 1316 | unsigned long tot = 0; |
1294 | 1317 | ||
1295 | for (i = 0; i < p->nqsets; ++i) | 1318 | for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) |
1296 | tot += adapter->sge.qs[i + p->first_qset].port_stats[idx]; | 1319 | tot += adapter->sge.qs[i].port_stats[idx]; |
1297 | return tot; | 1320 | return tot; |
1298 | } | 1321 | } |
1299 | 1322 | ||
@@ -1497,7 +1520,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
1497 | struct link_config *lc = &p->link_config; | 1520 | struct link_config *lc = &p->link_config; |
1498 | 1521 | ||
1499 | if (!(lc->supported & SUPPORTED_Autoneg)) | 1522 | if (!(lc->supported & SUPPORTED_Autoneg)) |
1500 | return -EOPNOTSUPP; /* can't change speed/duplex */ | 1523 | return -EOPNOTSUPP; /* can't change speed/duplex */ |
1501 | 1524 | ||
1502 | if (cmd->autoneg == AUTONEG_DISABLE) { | 1525 | if (cmd->autoneg == AUTONEG_DISABLE) { |
1503 | int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); | 1526 | int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); |
@@ -1576,8 +1599,10 @@ static int set_rx_csum(struct net_device *dev, u32 data) | |||
1576 | struct adapter *adap = p->adapter; | 1599 | struct adapter *adap = p->adapter; |
1577 | int i; | 1600 | int i; |
1578 | 1601 | ||
1579 | for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) | 1602 | for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { |
1603 | adap->params.sge.qset[i].lro = 0; | ||
1580 | adap->sge.qs[i].lro_enabled = 0; | 1604 | adap->sge.qs[i].lro_enabled = 0; |
1605 | } | ||
1581 | } | 1606 | } |
1582 | return 0; | 1607 | return 0; |
1583 | } | 1608 | } |
@@ -1783,6 +1808,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1783 | int i; | 1808 | int i; |
1784 | struct qset_params *q; | 1809 | struct qset_params *q; |
1785 | struct ch_qset_params t; | 1810 | struct ch_qset_params t; |
1811 | int q1 = pi->first_qset; | ||
1812 | int nqsets = pi->nqsets; | ||
1786 | 1813 | ||
1787 | if (!capable(CAP_NET_ADMIN)) | 1814 | if (!capable(CAP_NET_ADMIN)) |
1788 | return -EPERM; | 1815 | return -EPERM; |
@@ -1805,6 +1832,16 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1805 | || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, | 1832 | || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, |
1806 | MAX_RSPQ_ENTRIES)) | 1833 | MAX_RSPQ_ENTRIES)) |
1807 | return -EINVAL; | 1834 | return -EINVAL; |
1835 | |||
1836 | if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0) | ||
1837 | for_each_port(adapter, i) { | ||
1838 | pi = adap2pinfo(adapter, i); | ||
1839 | if (t.qset_idx >= pi->first_qset && | ||
1840 | t.qset_idx < pi->first_qset + pi->nqsets && | ||
1841 | !pi->rx_csum_offload) | ||
1842 | return -EINVAL; | ||
1843 | } | ||
1844 | |||
1808 | if ((adapter->flags & FULL_INIT_DONE) && | 1845 | if ((adapter->flags & FULL_INIT_DONE) && |
1809 | (t.rspq_size >= 0 || t.fl_size[0] >= 0 || | 1846 | (t.rspq_size >= 0 || t.fl_size[0] >= 0 || |
1810 | t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || | 1847 | t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || |
@@ -1812,6 +1849,20 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1812 | t.polling >= 0 || t.cong_thres >= 0)) | 1849 | t.polling >= 0 || t.cong_thres >= 0)) |
1813 | return -EBUSY; | 1850 | return -EBUSY; |
1814 | 1851 | ||
1852 | /* Allow setting of any available qset when offload enabled */ | ||
1853 | if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | ||
1854 | q1 = 0; | ||
1855 | for_each_port(adapter, i) { | ||
1856 | pi = adap2pinfo(adapter, i); | ||
1857 | nqsets += pi->first_qset + pi->nqsets; | ||
1858 | } | ||
1859 | } | ||
1860 | |||
1861 | if (t.qset_idx < q1) | ||
1862 | return -EINVAL; | ||
1863 | if (t.qset_idx > q1 + nqsets - 1) | ||
1864 | return -EINVAL; | ||
1865 | |||
1815 | q = &adapter->params.sge.qset[t.qset_idx]; | 1866 | q = &adapter->params.sge.qset[t.qset_idx]; |
1816 | 1867 | ||
1817 | if (t.rspq_size >= 0) | 1868 | if (t.rspq_size >= 0) |
@@ -1861,13 +1912,26 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1861 | case CHELSIO_GET_QSET_PARAMS:{ | 1912 | case CHELSIO_GET_QSET_PARAMS:{ |
1862 | struct qset_params *q; | 1913 | struct qset_params *q; |
1863 | struct ch_qset_params t; | 1914 | struct ch_qset_params t; |
1915 | int q1 = pi->first_qset; | ||
1916 | int nqsets = pi->nqsets; | ||
1917 | int i; | ||
1864 | 1918 | ||
1865 | if (copy_from_user(&t, useraddr, sizeof(t))) | 1919 | if (copy_from_user(&t, useraddr, sizeof(t))) |
1866 | return -EFAULT; | 1920 | return -EFAULT; |
1867 | if (t.qset_idx >= SGE_QSETS) | 1921 | |
1922 | /* Display qsets for all ports when offload enabled */ | ||
1923 | if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | ||
1924 | q1 = 0; | ||
1925 | for_each_port(adapter, i) { | ||
1926 | pi = adap2pinfo(adapter, i); | ||
1927 | nqsets = pi->first_qset + pi->nqsets; | ||
1928 | } | ||
1929 | } | ||
1930 | |||
1931 | if (t.qset_idx >= nqsets) | ||
1868 | return -EINVAL; | 1932 | return -EINVAL; |
1869 | 1933 | ||
1870 | q = &adapter->params.sge.qset[t.qset_idx]; | 1934 | q = &adapter->params.sge.qset[q1 + t.qset_idx]; |
1871 | t.rspq_size = q->rspq_size; | 1935 | t.rspq_size = q->rspq_size; |
1872 | t.txq_size[0] = q->txq_size[0]; | 1936 | t.txq_size[0] = q->txq_size[0]; |
1873 | t.txq_size[1] = q->txq_size[1]; | 1937 | t.txq_size[1] = q->txq_size[1]; |
@@ -1878,6 +1942,12 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1878 | t.lro = q->lro; | 1942 | t.lro = q->lro; |
1879 | t.intr_lat = q->coalesce_usecs; | 1943 | t.intr_lat = q->coalesce_usecs; |
1880 | t.cong_thres = q->cong_thres; | 1944 | t.cong_thres = q->cong_thres; |
1945 | t.qnum = q1; | ||
1946 | |||
1947 | if (adapter->flags & USING_MSIX) | ||
1948 | t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; | ||
1949 | else | ||
1950 | t.vector = adapter->pdev->irq; | ||
1881 | 1951 | ||
1882 | if (copy_to_user(useraddr, &t, sizeof(t))) | 1952 | if (copy_to_user(useraddr, &t, sizeof(t))) |
1883 | return -EFAULT; | 1953 | return -EFAULT; |
@@ -2223,8 +2293,8 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) | |||
2223 | { | 2293 | { |
2224 | int i; | 2294 | int i; |
2225 | 2295 | ||
2226 | for (i = 0; i < p->nqsets; i++) { | 2296 | for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { |
2227 | struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq; | 2297 | struct sge_rspq *q = &adap->sge.qs[i].rspq; |
2228 | 2298 | ||
2229 | spin_lock_irq(&q->lock); | 2299 | spin_lock_irq(&q->lock); |
2230 | spin_unlock_irq(&q->lock); | 2300 | spin_unlock_irq(&q->lock); |
@@ -2581,6 +2651,42 @@ static struct pci_error_handlers t3_err_handler = { | |||
2581 | .resume = t3_io_resume, | 2651 | .resume = t3_io_resume, |
2582 | }; | 2652 | }; |
2583 | 2653 | ||
2654 | /* | ||
2655 | * Set the number of qsets based on the number of CPUs and the number of ports, | ||
2656 | * not to exceed the number of available qsets, assuming there are enough qsets | ||
2657 | * per port in HW. | ||
2658 | */ | ||
2659 | static void set_nqsets(struct adapter *adap) | ||
2660 | { | ||
2661 | int i, j = 0; | ||
2662 | int num_cpus = num_online_cpus(); | ||
2663 | int hwports = adap->params.nports; | ||
2664 | int nqsets = SGE_QSETS; | ||
2665 | |||
2666 | if (adap->params.rev > 0) { | ||
2667 | if (hwports == 2 && | ||
2668 | (hwports * nqsets > SGE_QSETS || | ||
2669 | num_cpus >= nqsets / hwports)) | ||
2670 | nqsets /= hwports; | ||
2671 | if (nqsets > num_cpus) | ||
2672 | nqsets = num_cpus; | ||
2673 | if (nqsets < 1 || hwports == 4) | ||
2674 | nqsets = 1; | ||
2675 | } else | ||
2676 | nqsets = 1; | ||
2677 | |||
2678 | for_each_port(adap, i) { | ||
2679 | struct port_info *pi = adap2pinfo(adap, i); | ||
2680 | |||
2681 | pi->first_qset = j; | ||
2682 | pi->nqsets = nqsets; | ||
2683 | j = pi->first_qset + nqsets; | ||
2684 | |||
2685 | dev_info(&adap->pdev->dev, | ||
2686 | "Port %d using %d queue sets.\n", i, nqsets); | ||
2687 | } | ||
2688 | } | ||
2689 | |||
2584 | static int __devinit cxgb_enable_msix(struct adapter *adap) | 2690 | static int __devinit cxgb_enable_msix(struct adapter *adap) |
2585 | { | 2691 | { |
2586 | struct msix_entry entries[SGE_QSETS + 1]; | 2692 | struct msix_entry entries[SGE_QSETS + 1]; |
@@ -2739,9 +2845,6 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
2739 | pi = netdev_priv(netdev); | 2845 | pi = netdev_priv(netdev); |
2740 | pi->adapter = adapter; | 2846 | pi->adapter = adapter; |
2741 | pi->rx_csum_offload = 1; | 2847 | pi->rx_csum_offload = 1; |
2742 | pi->nqsets = 1; | ||
2743 | pi->first_qset = i; | ||
2744 | pi->activity = 0; | ||
2745 | pi->port_id = i; | 2848 | pi->port_id = i; |
2746 | netif_carrier_off(netdev); | 2849 | netif_carrier_off(netdev); |
2747 | netdev->irq = pdev->irq; | 2850 | netdev->irq = pdev->irq; |
@@ -2818,6 +2921,8 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
2818 | else if (msi > 0 && pci_enable_msi(pdev) == 0) | 2921 | else if (msi > 0 && pci_enable_msi(pdev) == 0) |
2819 | adapter->flags |= USING_MSI; | 2922 | adapter->flags |= USING_MSI; |
2820 | 2923 | ||
2924 | set_nqsets(adapter); | ||
2925 | |||
2821 | err = sysfs_create_group(&adapter->port[0]->dev.kobj, | 2926 | err = sysfs_create_group(&adapter->port[0]->dev.kobj, |
2822 | &cxgb3_attr_group); | 2927 | &cxgb3_attr_group); |
2823 | 2928 | ||