aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
commitf8965467f366fd18f01feafb5db10512d7b4422c (patch)
tree3706a9cd779859271ca61b85c63a1bc3f82d626e /drivers/net/cxgb4
parenta26272e5200765691e67d6780e52b32498fdb659 (diff)
parent2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'drivers/net/cxgb4')
-rw-r--r--drivers/net/cxgb4/cxgb4.h9
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c102
-rw-r--r--drivers/net/cxgb4/sge.c11
-rw-r--r--drivers/net/cxgb4/t4_hw.c117
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h4
6 files changed, 139 insertions, 105 deletions
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d8ff4889b56..dd1770e075e6 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -53,7 +53,7 @@
53 53
54enum { 54enum {
55 MAX_NPORTS = 4, /* max # of ports */ 55 MAX_NPORTS = 4, /* max # of ports */
56 SERNUM_LEN = 16, /* Serial # length */ 56 SERNUM_LEN = 24, /* Serial # length */
57 EC_LEN = 16, /* E/C length */ 57 EC_LEN = 16, /* E/C length */
58 ID_LEN = 16, /* ID length */ 58 ID_LEN = 16, /* ID length */
59}; 59};
@@ -477,7 +477,6 @@ struct adapter {
477 struct pci_dev *pdev; 477 struct pci_dev *pdev;
478 struct device *pdev_dev; 478 struct device *pdev_dev;
479 unsigned long registered_device_map; 479 unsigned long registered_device_map;
480 unsigned long open_device_map;
481 unsigned long flags; 480 unsigned long flags;
482 481
483 const char *name; 482 const char *name;
@@ -651,14 +650,11 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
651 struct link_config *lc); 650 struct link_config *lc);
652int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 651int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
653int t4_seeprom_wp(struct adapter *adapter, bool enable); 652int t4_seeprom_wp(struct adapter *adapter, bool enable);
654int t4_read_flash(struct adapter *adapter, unsigned int addr,
655 unsigned int nwords, u32 *data, int byte_oriented);
656int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 653int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
657int t4_check_fw_version(struct adapter *adapter); 654int t4_check_fw_version(struct adapter *adapter);
658int t4_prep_adapter(struct adapter *adapter); 655int t4_prep_adapter(struct adapter *adapter);
659int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 656int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
660void t4_fatal_err(struct adapter *adapter); 657void t4_fatal_err(struct adapter *adapter);
661void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
662int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, 658int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
663 int filter_index, int enable); 659 int filter_index, int enable);
664void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, 660void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
@@ -709,7 +705,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
709int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 705int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
710 unsigned int vf, unsigned int viid); 706 unsigned int vf, unsigned int viid);
711int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 707int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
712 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok); 708 int mtu, int promisc, int all_multi, int bcast, int vlanex,
709 bool sleep_ok);
713int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 710int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
714 unsigned int viid, bool free, unsigned int naddr, 711 unsigned int viid, bool free, unsigned int naddr,
715 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); 712 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index a7e30a23d322..58045b00cf40 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -240,9 +240,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
240 u16 filt_idx[7]; 240 u16 filt_idx[7];
241 const u8 *addr[7]; 241 const u8 *addr[7];
242 int ret, naddr = 0; 242 int ret, naddr = 0;
243 const struct dev_addr_list *d;
244 const struct netdev_hw_addr *ha; 243 const struct netdev_hw_addr *ha;
245 int uc_cnt = netdev_uc_count(dev); 244 int uc_cnt = netdev_uc_count(dev);
245 int mc_cnt = netdev_mc_count(dev);
246 const struct port_info *pi = netdev_priv(dev); 246 const struct port_info *pi = netdev_priv(dev);
247 247
248 /* first do the secondary unicast addresses */ 248 /* first do the secondary unicast addresses */
@@ -260,9 +260,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
260 } 260 }
261 261
262 /* next set up the multicast addresses */ 262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(d, dev) { 263 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = d->dmi_addr; 264 addr[naddr++] = ha->addr;
265 if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) { 265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, 266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep); 267 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0) 268 if (ret < 0)
@@ -290,7 +290,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
290 if (ret == 0) 290 if (ret == 0)
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, 291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0, 292 (dev->flags & IFF_PROMISC) ? 1 : 0,
293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, 293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
294 sleep_ok); 294 sleep_ok);
295 return ret; 295 return ret;
296} 296}
@@ -311,11 +311,11 @@ static int link_start(struct net_device *dev)
311 * that step explicitly. 311 * that step explicitly.
312 */ 312 */
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, 313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
314 true); 314 pi->vlan_grp != NULL, true);
315 if (ret == 0) { 315 if (ret == 0) {
316 ret = t4_change_mac(pi->adapter, 0, pi->viid, 316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true, 317 pi->xact_addr_filt, dev->dev_addr, true,
318 false); 318 true);
319 if (ret >= 0) { 319 if (ret >= 0) {
320 pi->xact_addr_filt = ret; 320 pi->xact_addr_filt = ret;
321 ret = 0; 321 ret = 0;
@@ -859,6 +859,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
859 "RxCsumGood ", 859 "RxCsumGood ",
860 "VLANextractions ", 860 "VLANextractions ",
861 "VLANinsertions ", 861 "VLANinsertions ",
862 "GROpackets ",
863 "GROmerged ",
862}; 864};
863 865
864static int get_sset_count(struct net_device *dev, int sset) 866static int get_sset_count(struct net_device *dev, int sset)
@@ -922,6 +924,8 @@ struct queue_port_stats {
922 u64 rx_csum; 924 u64 rx_csum;
923 u64 vlan_ex; 925 u64 vlan_ex;
924 u64 vlan_ins; 926 u64 vlan_ins;
927 u64 gro_pkts;
928 u64 gro_merged;
925}; 929};
926 930
927static void collect_sge_port_stats(const struct adapter *adap, 931static void collect_sge_port_stats(const struct adapter *adap,
@@ -938,6 +942,8 @@ static void collect_sge_port_stats(const struct adapter *adap,
938 s->rx_csum += rx->stats.rx_cso; 942 s->rx_csum += rx->stats.rx_cso;
939 s->vlan_ex += rx->stats.vlan_ex; 943 s->vlan_ex += rx->stats.vlan_ex;
940 s->vlan_ins += tx->vlan_ins; 944 s->vlan_ins += tx->vlan_ins;
945 s->gro_pkts += rx->stats.lro_pkts;
946 s->gro_merged += rx->stats.lro_merged;
941 } 947 }
942} 948}
943 949
@@ -1711,6 +1717,18 @@ static int set_tso(struct net_device *dev, u32 value)
1711 return 0; 1717 return 0;
1712} 1718}
1713 1719
1720static int set_flags(struct net_device *dev, u32 flags)
1721{
1722 if (flags & ~ETH_FLAG_RXHASH)
1723 return -EOPNOTSUPP;
1724
1725 if (flags & ETH_FLAG_RXHASH)
1726 dev->features |= NETIF_F_RXHASH;
1727 else
1728 dev->features &= ~NETIF_F_RXHASH;
1729 return 0;
1730}
1731
1714static struct ethtool_ops cxgb_ethtool_ops = { 1732static struct ethtool_ops cxgb_ethtool_ops = {
1715 .get_settings = get_settings, 1733 .get_settings = get_settings,
1716 .set_settings = set_settings, 1734 .set_settings = set_settings,
@@ -1741,6 +1759,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
1741 .get_wol = get_wol, 1759 .get_wol = get_wol,
1742 .set_wol = set_wol, 1760 .set_wol = set_wol,
1743 .set_tso = set_tso, 1761 .set_tso = set_tso,
1762 .set_flags = set_flags,
1744 .flash_device = set_flash, 1763 .flash_device = set_flash,
1745}; 1764};
1746 1765
@@ -2308,6 +2327,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2308 register_netevent_notifier(&cxgb4_netevent_nb); 2327 register_netevent_notifier(&cxgb4_netevent_nb);
2309 netevent_registered = true; 2328 netevent_registered = true;
2310 } 2329 }
2330
2331 if (adap->flags & FULL_INIT_DONE)
2332 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2311} 2333}
2312 2334
2313static void attach_ulds(struct adapter *adap) 2335static void attach_ulds(struct adapter *adap)
@@ -2414,23 +2436,17 @@ EXPORT_SYMBOL(cxgb4_unregister_uld);
2414 */ 2436 */
2415static int cxgb_up(struct adapter *adap) 2437static int cxgb_up(struct adapter *adap)
2416{ 2438{
2417 int err = 0; 2439 int err;
2418 2440
2419 if (!(adap->flags & FULL_INIT_DONE)) { 2441 err = setup_sge_queues(adap);
2420 err = setup_sge_queues(adap); 2442 if (err)
2421 if (err) 2443 goto out;
2422 goto out; 2444 err = setup_rss(adap);
2423 err = setup_rss(adap); 2445 if (err)
2424 if (err) { 2446 goto freeq;
2425 t4_free_sge_resources(adap);
2426 goto out;
2427 }
2428 if (adap->flags & USING_MSIX)
2429 name_msix_vecs(adap);
2430 adap->flags |= FULL_INIT_DONE;
2431 }
2432 2447
2433 if (adap->flags & USING_MSIX) { 2448 if (adap->flags & USING_MSIX) {
2449 name_msix_vecs(adap);
2434 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, 2450 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2435 adap->msix_info[0].desc, adap); 2451 adap->msix_info[0].desc, adap);
2436 if (err) 2452 if (err)
@@ -2451,11 +2467,14 @@ static int cxgb_up(struct adapter *adap)
2451 enable_rx(adap); 2467 enable_rx(adap);
2452 t4_sge_start(adap); 2468 t4_sge_start(adap);
2453 t4_intr_enable(adap); 2469 t4_intr_enable(adap);
2470 adap->flags |= FULL_INIT_DONE;
2454 notify_ulds(adap, CXGB4_STATE_UP); 2471 notify_ulds(adap, CXGB4_STATE_UP);
2455 out: 2472 out:
2456 return err; 2473 return err;
2457 irq_err: 2474 irq_err:
2458 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2475 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2476 freeq:
2477 t4_free_sge_resources(adap);
2459 goto out; 2478 goto out;
2460} 2479}
2461 2480
@@ -2471,6 +2490,9 @@ static void cxgb_down(struct adapter *adapter)
2471 } else 2490 } else
2472 free_irq(adapter->pdev->irq, adapter); 2491 free_irq(adapter->pdev->irq, adapter);
2473 quiesce_rx(adapter); 2492 quiesce_rx(adapter);
2493 t4_sge_stop(adapter);
2494 t4_free_sge_resources(adapter);
2495 adapter->flags &= ~FULL_INIT_DONE;
2474} 2496}
2475 2497
2476/* 2498/*
@@ -2482,11 +2504,13 @@ static int cxgb_open(struct net_device *dev)
2482 struct port_info *pi = netdev_priv(dev); 2504 struct port_info *pi = netdev_priv(dev);
2483 struct adapter *adapter = pi->adapter; 2505 struct adapter *adapter = pi->adapter;
2484 2506
2485 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) 2507 if (!(adapter->flags & FULL_INIT_DONE)) {
2486 return err; 2508 err = cxgb_up(adapter);
2509 if (err < 0)
2510 return err;
2511 }
2487 2512
2488 dev->real_num_tx_queues = pi->nqsets; 2513 dev->real_num_tx_queues = pi->nqsets;
2489 set_bit(pi->tx_chan, &adapter->open_device_map);
2490 link_start(dev); 2514 link_start(dev);
2491 netif_tx_start_all_queues(dev); 2515 netif_tx_start_all_queues(dev);
2492 return 0; 2516 return 0;
@@ -2494,19 +2518,12 @@ static int cxgb_open(struct net_device *dev)
2494 2518
2495static int cxgb_close(struct net_device *dev) 2519static int cxgb_close(struct net_device *dev)
2496{ 2520{
2497 int ret;
2498 struct port_info *pi = netdev_priv(dev); 2521 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->adapter; 2522 struct adapter *adapter = pi->adapter;
2500 2523
2501 netif_tx_stop_all_queues(dev); 2524 netif_tx_stop_all_queues(dev);
2502 netif_carrier_off(dev); 2525 netif_carrier_off(dev);
2503 ret = t4_enable_vi(adapter, 0, pi->viid, false, false); 2526 return t4_enable_vi(adapter, 0, pi->viid, false, false);
2504
2505 clear_bit(pi->tx_chan, &adapter->open_device_map);
2506
2507 if (!adapter->open_device_map)
2508 cxgb_down(adapter);
2509 return 0;
2510} 2527}
2511 2528
2512static struct net_device_stats *cxgb_get_stats(struct net_device *dev) 2529static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
@@ -2601,7 +2618,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2601 2618
2602 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ 2619 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2603 return -EINVAL; 2620 return -EINVAL;
2604 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, 2621 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
2605 true); 2622 true);
2606 if (!ret) 2623 if (!ret)
2607 dev->mtu = new_mtu; 2624 dev->mtu = new_mtu;
@@ -2632,7 +2649,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2632 struct port_info *pi = netdev_priv(dev); 2649 struct port_info *pi = netdev_priv(dev);
2633 2650
2634 pi->vlan_grp = grp; 2651 pi->vlan_grp = grp;
2635 t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL); 2652 t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
2653 true);
2636} 2654}
2637 2655
2638#ifdef CONFIG_NET_POLL_CONTROLLER 2656#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3066,6 +3084,12 @@ static void __devinit print_port_info(struct adapter *adap)
3066 3084
3067 int i; 3085 int i;
3068 char buf[80]; 3086 char buf[80];
3087 const char *spd = "";
3088
3089 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3090 spd = " 2.5 GT/s";
3091 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3092 spd = " 5 GT/s";
3069 3093
3070 for_each_port(adap, i) { 3094 for_each_port(adap, i) {
3071 struct net_device *dev = adap->port[i]; 3095 struct net_device *dev = adap->port[i];
@@ -3085,10 +3109,10 @@ static void __devinit print_port_info(struct adapter *adap)
3085 --bufp; 3109 --bufp;
3086 sprintf(bufp, "BASE-%s", base[pi->port_type]); 3110 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3087 3111
3088 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n", 3112 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3089 adap->params.vpd.id, adap->params.rev, 3113 adap->params.vpd.id, adap->params.rev,
3090 buf, is_offload(adap) ? "R" : "", 3114 buf, is_offload(adap) ? "R" : "",
3091 adap->params.pci.width, 3115 adap->params.pci.width, spd,
3092 (adap->flags & USING_MSIX) ? " MSI-X" : 3116 (adap->flags & USING_MSIX) ? " MSI-X" :
3093 (adap->flags & USING_MSI) ? " MSI" : ""); 3117 (adap->flags & USING_MSI) ? " MSI" : "");
3094 if (adap->name == dev->name) 3118 if (adap->name == dev->name)
@@ -3203,7 +3227,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3203 3227
3204 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; 3228 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3205 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3229 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3206 netdev->features |= NETIF_F_GRO | highdma; 3230 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
3207 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3231 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3208 netdev->vlan_features = netdev->features & VLAN_FEAT; 3232 netdev->vlan_features = netdev->features & VLAN_FEAT;
3209 3233
@@ -3334,8 +3358,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
3334 if (adapter->debugfs_root) 3358 if (adapter->debugfs_root)
3335 debugfs_remove_recursive(adapter->debugfs_root); 3359 debugfs_remove_recursive(adapter->debugfs_root);
3336 3360
3337 t4_sge_stop(adapter); 3361 if (adapter->flags & FULL_INIT_DONE)
3338 t4_free_sge_resources(adapter); 3362 cxgb_down(adapter);
3339 t4_free_mem(adapter->l2t); 3363 t4_free_mem(adapter->l2t);
3340 t4_free_mem(adapter->tids.tid_tab); 3364 t4_free_mem(adapter->tids.tid_tab);
3341 disable_msi(adapter); 3365 disable_msi(adapter);
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 14adc58e71c3..d1f8f225e45a 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1471,7 +1471,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1471 * Releases the pages of a packet gather list. We do not own the last 1471 * Releases the pages of a packet gather list. We do not own the last
1472 * page on the list and do not free it. 1472 * page on the list and do not free it.
1473 */ 1473 */
1474void t4_pktgl_free(const struct pkt_gl *gl) 1474static void t4_pktgl_free(const struct pkt_gl *gl)
1475{ 1475{
1476 int n; 1476 int n;
1477 const skb_frag_t *p; 1477 const skb_frag_t *p;
@@ -1524,6 +1524,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1524 skb->truesize += skb->data_len; 1524 skb->truesize += skb->data_len;
1525 skb->ip_summed = CHECKSUM_UNNECESSARY; 1525 skb->ip_summed = CHECKSUM_UNNECESSARY;
1526 skb_record_rx_queue(skb, rxq->rspq.idx); 1526 skb_record_rx_queue(skb, rxq->rspq.idx);
1527 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1528 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1527 1529
1528 if (unlikely(pkt->vlan_ex)) { 1530 if (unlikely(pkt->vlan_ex)) {
1529 struct port_info *pi = netdev_priv(rxq->rspq.netdev); 1531 struct port_info *pi = netdev_priv(rxq->rspq.netdev);
@@ -1565,7 +1567,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1565 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1567 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1566 return handle_trace_pkt(q->adap, si); 1568 return handle_trace_pkt(q->adap, si);
1567 1569
1568 pkt = (void *)&rsp[1]; 1570 pkt = (const struct cpl_rx_pkt *)rsp;
1569 csum_ok = pkt->csum_calc && !pkt->err_vec; 1571 csum_ok = pkt->csum_calc && !pkt->err_vec;
1570 if ((pkt->l2info & htonl(RXF_TCP)) && 1572 if ((pkt->l2info & htonl(RXF_TCP)) &&
1571 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1573 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -1583,6 +1585,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1583 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ 1585 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
1584 skb->protocol = eth_type_trans(skb, q->netdev); 1586 skb->protocol = eth_type_trans(skb, q->netdev);
1585 skb_record_rx_queue(skb, q->idx); 1587 skb_record_rx_queue(skb, q->idx);
1588 if (skb->dev->features & NETIF_F_RXHASH)
1589 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1590
1586 pi = netdev_priv(skb->dev); 1591 pi = netdev_priv(skb->dev);
1587 rxq->stats.pkts++; 1592 rxq->stats.pkts++;
1588 1593
@@ -2047,7 +2052,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2047 adap->sge.ingr_map[iq->cntxt_id] = iq; 2052 adap->sge.ingr_map[iq->cntxt_id] = iq;
2048 2053
2049 if (fl) { 2054 if (fl) {
2050 fl->cntxt_id = htons(c.fl0id); 2055 fl->cntxt_id = ntohs(c.fl0id);
2051 fl->avail = fl->pend_cred = 0; 2056 fl->avail = fl->pend_cred = 0;
2052 fl->pidx = fl->cidx = 0; 2057 fl->pidx = fl->cidx = 0;
2053 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 2058 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index a814a3afe123..da272a98fdbc 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -53,8 +53,8 @@
53 * at the time it indicated completion is stored there. Returns 0 if the 53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise. 54 * operation completes and -EAGAIN otherwise.
55 */ 55 */
56int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp) 57 int polarity, int attempts, int delay, u32 *valp)
58{ 58{
59 while (1) { 59 while (1) {
60 u32 val = t4_read_reg(adapter, reg); 60 u32 val = t4_read_reg(adapter, reg);
@@ -109,9 +109,9 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
109 * Reads registers that are accessed indirectly through an address/data 109 * Reads registers that are accessed indirectly through an address/data
110 * register pair. 110 * register pair.
111 */ 111 */
112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals, unsigned int nregs, 113 unsigned int data_reg, u32 *vals,
114 unsigned int start_idx) 114 unsigned int nregs, unsigned int start_idx)
115{ 115{
116 while (nregs--) { 116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx); 117 t4_write_reg(adap, addr_reg, start_idx);
@@ -120,6 +120,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 } 120 }
121} 121}
122 122
123#if 0
123/** 124/**
124 * t4_write_indirect - write indirectly addressed registers 125 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter 126 * @adap: the adapter
@@ -132,15 +133,16 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
132 * Writes a sequential block of registers that are accessed indirectly 133 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair. 134 * through an address/data register pair.
134 */ 135 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals, 137 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx) 138 unsigned int nregs, unsigned int start_idx)
138{ 139{
139 while (nregs--) { 140 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++); 142 t4_write_reg(adap, data_reg, *vals++);
142 } 143 }
143} 144}
145#endif
144 146
145/* 147/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
@@ -345,33 +347,21 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
345 return 0; 347 return 0;
346} 348}
347 349
348#define VPD_ENTRY(name, len) \
349 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
350
351/* 350/*
352 * Partial EEPROM Vital Product Data structure. Includes only the ID and 351 * Partial EEPROM Vital Product Data structure. Includes only the ID and
353 * VPD-R sections. 352 * VPD-R header.
354 */ 353 */
355struct t4_vpd { 354struct t4_vpd_hdr {
356 u8 id_tag; 355 u8 id_tag;
357 u8 id_len[2]; 356 u8 id_len[2];
358 u8 id_data[ID_LEN]; 357 u8 id_data[ID_LEN];
359 u8 vpdr_tag; 358 u8 vpdr_tag;
360 u8 vpdr_len[2]; 359 u8 vpdr_len[2];
361 VPD_ENTRY(pn, 16); /* part number */
362 VPD_ENTRY(ec, EC_LEN); /* EC level */
363 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
364 VPD_ENTRY(na, 12); /* MAC address base */
365 VPD_ENTRY(port_type, 8); /* port types */
366 VPD_ENTRY(gpio, 14); /* GPIO usage */
367 VPD_ENTRY(cclk, 6); /* core clock */
368 VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
369 VPD_ENTRY(rv, 1); /* csum */
370 u32 pad; /* for multiple-of-4 sizing and alignment */
371}; 360};
372 361
373#define EEPROM_STAT_ADDR 0x7bfc 362#define EEPROM_STAT_ADDR 0x7bfc
374#define VPD_BASE 0 363#define VPD_BASE 0
364#define VPD_LEN 512
375 365
376/** 366/**
377 * t4_seeprom_wp - enable/disable EEPROM write protection 367 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -396,16 +386,36 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
396 */ 386 */
397static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 387static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
398{ 388{
399 int ret; 389 int i, ret;
400 struct t4_vpd vpd; 390 int ec, sn, v2;
401 u8 *q = (u8 *)&vpd, csum; 391 u8 vpd[VPD_LEN], csum;
392 unsigned int vpdr_len;
393 const struct t4_vpd_hdr *v;
402 394
403 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); 395 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
404 if (ret < 0) 396 if (ret < 0)
405 return ret; 397 return ret;
406 398
407 for (csum = 0; q <= vpd.rv_data; q++) 399 v = (const struct t4_vpd_hdr *)vpd;
408 csum += *q; 400 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
401 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
402 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
403 return -EINVAL;
404 }
405
406#define FIND_VPD_KW(var, name) do { \
407 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
408 vpdr_len, name); \
409 if (var < 0) { \
410 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
411 return -EINVAL; \
412 } \
413 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
414} while (0)
415
416 FIND_VPD_KW(i, "RV");
417 for (csum = 0; i >= 0; i--)
418 csum += vpd[i];
409 419
410 if (csum) { 420 if (csum) {
411 dev_err(adapter->pdev_dev, 421 dev_err(adapter->pdev_dev,
@@ -413,12 +423,18 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
413 return -EINVAL; 423 return -EINVAL;
414 } 424 }
415 425
416 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); 426 FIND_VPD_KW(ec, "EC");
417 memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); 427 FIND_VPD_KW(sn, "SN");
428 FIND_VPD_KW(v2, "V2");
429#undef FIND_VPD_KW
430
431 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
432 memcpy(p->id, v->id_data, ID_LEN);
418 strim(p->id); 433 strim(p->id);
419 memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); 434 memcpy(p->ec, vpd + ec, EC_LEN);
420 strim(p->ec); 435 strim(p->ec);
421 memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); 436 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
437 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
422 strim(p->sn); 438 strim(p->sn);
423 return 0; 439 return 0;
424} 440}
@@ -537,8 +553,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
537 * (i.e., big-endian), otherwise as 32-bit words in the platform's 553 * (i.e., big-endian), otherwise as 32-bit words in the platform's
538 * natural endianess. 554 * natural endianess.
539 */ 555 */
540int t4_read_flash(struct adapter *adapter, unsigned int addr, 556static int t4_read_flash(struct adapter *adapter, unsigned int addr,
541 unsigned int nwords, u32 *data, int byte_oriented) 557 unsigned int nwords, u32 *data, int byte_oriented)
542{ 558{
543 int ret; 559 int ret;
544 560
@@ -870,22 +886,6 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
870 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 886 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
871} 887}
872 888
873/**
874 * t4_set_vlan_accel - configure HW VLAN extraction
875 * @adap: the adapter
876 * @ports: bitmap of adapter ports to operate on
877 * @on: enable (1) or disable (0) HW VLAN extraction
878 *
879 * Enables or disables HW extraction of VLAN tags for the ports specified
880 * by @ports. @ports is a bitmap with the ith bit designating the port
881 * associated with the ith adapter channel.
882 */
883void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
884{
885 ports <<= VLANEXTENABLE_SHIFT;
886 t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
887}
888
889struct intr_info { 889struct intr_info {
890 unsigned int mask; /* bits to check in interrupt status */ 890 unsigned int mask; /* bits to check in interrupt status */
891 const char *msg; /* message to print or NULL */ 891 const char *msg; /* message to print or NULL */
@@ -2608,12 +2608,14 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2608 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 2608 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2609 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 2609 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2610 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 2610 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2611 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
2611 * @sleep_ok: if true we may sleep while awaiting command completion 2612 * @sleep_ok: if true we may sleep while awaiting command completion
2612 * 2613 *
2613 * Sets Rx properties of a virtual interface. 2614 * Sets Rx properties of a virtual interface.
2614 */ 2615 */
2615int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 2616int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2616 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok) 2617 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2618 bool sleep_ok)
2617{ 2619{
2618 struct fw_vi_rxmode_cmd c; 2620 struct fw_vi_rxmode_cmd c;
2619 2621
@@ -2626,15 +2628,18 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2626 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; 2628 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2627 if (bcast < 0) 2629 if (bcast < 0)
2628 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; 2630 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2631 if (vlanex < 0)
2632 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
2629 2633
2630 memset(&c, 0, sizeof(c)); 2634 memset(&c, 0, sizeof(c));
2631 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | 2635 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2632 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); 2636 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2633 c.retval_len16 = htonl(FW_LEN16(c)); 2637 c.retval_len16 = htonl(FW_LEN16(c));
2634 c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | 2638 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2635 FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 2639 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2636 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 2640 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2637 FW_VI_RXMODE_CMD_BROADCASTEN(bcast)); 2641 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2642 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2638 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 2643 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2639} 2644}
2640 2645
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index fdb117443144..7a981b81afaf 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -503,6 +503,7 @@ struct cpl_rx_data_ack {
503}; 503};
504 504
505struct cpl_rx_pkt { 505struct cpl_rx_pkt {
506 struct rss_header rsshdr;
506 u8 opcode; 507 u8 opcode;
507#if defined(__LITTLE_ENDIAN_BITFIELD) 508#if defined(__LITTLE_ENDIAN_BITFIELD)
508 u8 iff:4; 509 u8 iff:4;
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 3393d05a388a..63991d68950e 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -876,7 +876,7 @@ struct fw_vi_mac_cmd {
876struct fw_vi_rxmode_cmd { 876struct fw_vi_rxmode_cmd {
877 __be32 op_to_viid; 877 __be32 op_to_viid;
878 __be32 retval_len16; 878 __be32 retval_len16;
879 __be32 mtu_to_broadcasten; 879 __be32 mtu_to_vlanexen;
880 __be32 r4_lo; 880 __be32 r4_lo;
881}; 881};
882 882
@@ -888,6 +888,8 @@ struct fw_vi_rxmode_cmd {
888#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) 888#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
889#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 889#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
890#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) 890#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
891#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
892#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
891 893
892struct fw_vi_enable_cmd { 894struct fw_vi_enable_cmd {
893 __be32 op_to_viid; 895 __be32 op_to_viid;