aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/cxgb3_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/cxgb3_main.c')
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c152
1 files changed, 88 insertions, 64 deletions
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 17858b9a5830..538dda4422dc 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -37,7 +37,7 @@
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/mii.h> 40#include <linux/mdio.h>
41#include <linux/sockios.h> 41#include <linux/sockios.h>
42#include <linux/workqueue.h> 42#include <linux/workqueue.h>
43#include <linux/proc_fs.h> 43#include <linux/proc_fs.h>
@@ -91,6 +91,8 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
91 CH_DEVICE(0x31, 3), /* T3B20 */ 91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */ 92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */ 93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
94 {0,} 96 {0,}
95}; 97};
96 98
@@ -431,40 +433,78 @@ static int init_tp_parity(struct adapter *adap)
431 for (i = 0; i < 16; i++) { 433 for (i = 0; i < 16; i++) {
432 struct cpl_smt_write_req *req; 434 struct cpl_smt_write_req *req;
433 435
434 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437 if (!skb)
438 skb = adap->nofail_skb;
439 if (!skb)
440 goto alloc_skb_fail;
441
435 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); 442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436 memset(req, 0, sizeof(*req)); 443 memset(req, 0, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
439 req->iff = i; 446 req->iff = i;
440 t3_mgmt_tx(adap, skb); 447 t3_mgmt_tx(adap, skb);
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
452 goto alloc_skb_fail;
453 }
441 } 454 }
442 455
443 for (i = 0; i < 2048; i++) { 456 for (i = 0; i < 2048; i++) {
444 struct cpl_l2t_write_req *req; 457 struct cpl_l2t_write_req *req;
445 458
446 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460 if (!skb)
461 skb = adap->nofail_skb;
462 if (!skb)
463 goto alloc_skb_fail;
464
447 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); 465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448 memset(req, 0, sizeof(*req)); 466 memset(req, 0, sizeof(*req));
449 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451 req->params = htonl(V_L2T_W_IDX(i)); 469 req->params = htonl(V_L2T_W_IDX(i));
452 t3_mgmt_tx(adap, skb); 470 t3_mgmt_tx(adap, skb);
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
475 goto alloc_skb_fail;
476 }
453 } 477 }
454 478
455 for (i = 0; i < 2048; i++) { 479 for (i = 0; i < 2048; i++) {
456 struct cpl_rte_write_req *req; 480 struct cpl_rte_write_req *req;
457 481
458 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483 if (!skb)
484 skb = adap->nofail_skb;
485 if (!skb)
486 goto alloc_skb_fail;
487
459 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); 488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460 memset(req, 0, sizeof(*req)); 489 memset(req, 0, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463 req->l2t_idx = htonl(V_L2T_W_IDX(i)); 492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464 t3_mgmt_tx(adap, skb); 493 t3_mgmt_tx(adap, skb);
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
498 goto alloc_skb_fail;
499 }
465 } 500 }
466 501
467 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL); 502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!skb)
504 skb = adap->nofail_skb;
505 if (!skb)
506 goto alloc_skb_fail;
507
468 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); 508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469 memset(greq, 0, sizeof(*greq)); 509 memset(greq, 0, sizeof(*greq));
470 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
@@ -473,8 +513,17 @@ static int init_tp_parity(struct adapter *adap)
473 t3_mgmt_tx(adap, skb); 513 t3_mgmt_tx(adap, skb);
474 514
475 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519 }
520
476 t3_tp_set_offload_mode(adap, 0); 521 t3_tp_set_offload_mode(adap, 0);
477 return i; 522 return i;
523
524alloc_skb_fail:
525 t3_tp_set_offload_mode(adap, 0);
526 return -ENOMEM;
478} 527}
479 528
480/** 529/**
@@ -869,7 +918,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
869 struct mngt_pktsched_wr *req; 918 struct mngt_pktsched_wr *req;
870 int ret; 919 int ret;
871 920
872 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 921 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922 if (!skb)
923 skb = adap->nofail_skb;
924 if (!skb)
925 return -ENOMEM;
926
873 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); 927 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
874 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 928 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
875 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 929 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
@@ -879,6 +933,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
879 req->max = hi; 933 req->max = hi;
880 req->binding = port; 934 req->binding = port;
881 ret = t3_mgmt_tx(adap, skb); 935 ret = t3_mgmt_tx(adap, skb);
936 if (skb == adap->nofail_skb) {
937 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
938 GFP_KERNEL);
939 if (!adap->nofail_skb)
940 ret = -ENOMEM;
941 }
882 942
883 return ret; 943 return ret;
884} 944}
@@ -1593,7 +1653,7 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1593 } 1653 }
1594 1654
1595 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1655 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1596 cmd->phy_address = p->phy.addr; 1656 cmd->phy_address = p->phy.mdio.prtad;
1597 cmd->transceiver = XCVR_EXTERNAL; 1657 cmd->transceiver = XCVR_EXTERNAL;
1598 cmd->autoneg = p->link_config.autoneg; 1658 cmd->autoneg = p->link_config.autoneg;
1599 cmd->maxtxpkt = 0; 1659 cmd->maxtxpkt = 0;
@@ -2308,70 +2368,25 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2308 struct mii_ioctl_data *data = if_mii(req); 2368 struct mii_ioctl_data *data = if_mii(req);
2309 struct port_info *pi = netdev_priv(dev); 2369 struct port_info *pi = netdev_priv(dev);
2310 struct adapter *adapter = pi->adapter; 2370 struct adapter *adapter = pi->adapter;
2311 int ret, mmd;
2312 2371
2313 switch (cmd) { 2372 switch (cmd) {
2314 case SIOCGMIIPHY: 2373 case SIOCGMIIREG:
2315 data->phy_id = pi->phy.addr; 2374 case SIOCSMIIREG:
2375 /* Convert phy_id from older PRTAD/DEVAD format */
2376 if (is_10G(adapter) &&
2377 !mdio_phy_id_is_c45(data->phy_id) &&
2378 (data->phy_id & 0x1f00) &&
2379 !(data->phy_id & 0xe0e0))
2380 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2381 data->phy_id & 0x1f);
2316 /* FALLTHRU */ 2382 /* FALLTHRU */
2317 case SIOCGMIIREG:{ 2383 case SIOCGMIIPHY:
2318 u32 val; 2384 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2319 struct cphy *phy = &pi->phy;
2320
2321 if (!phy->mdio_read)
2322 return -EOPNOTSUPP;
2323 if (is_10G(adapter)) {
2324 mmd = data->phy_id >> 8;
2325 if (!mmd)
2326 mmd = MDIO_DEV_PCS;
2327 else if (mmd > MDIO_DEV_VEND2)
2328 return -EINVAL;
2329
2330 ret =
2331 phy->mdio_read(adapter, data->phy_id & 0x1f,
2332 mmd, data->reg_num, &val);
2333 } else
2334 ret =
2335 phy->mdio_read(adapter, data->phy_id & 0x1f,
2336 0, data->reg_num & 0x1f,
2337 &val);
2338 if (!ret)
2339 data->val_out = val;
2340 break;
2341 }
2342 case SIOCSMIIREG:{
2343 struct cphy *phy = &pi->phy;
2344
2345 if (!capable(CAP_NET_ADMIN))
2346 return -EPERM;
2347 if (!phy->mdio_write)
2348 return -EOPNOTSUPP;
2349 if (is_10G(adapter)) {
2350 mmd = data->phy_id >> 8;
2351 if (!mmd)
2352 mmd = MDIO_DEV_PCS;
2353 else if (mmd > MDIO_DEV_VEND2)
2354 return -EINVAL;
2355
2356 ret =
2357 phy->mdio_write(adapter,
2358 data->phy_id & 0x1f, mmd,
2359 data->reg_num,
2360 data->val_in);
2361 } else
2362 ret =
2363 phy->mdio_write(adapter,
2364 data->phy_id & 0x1f, 0,
2365 data->reg_num & 0x1f,
2366 data->val_in);
2367 break;
2368 }
2369 case SIOCCHIOCTL: 2385 case SIOCCHIOCTL:
2370 return cxgb_extension_ioctl(dev, req->ifr_data); 2386 return cxgb_extension_ioctl(dev, req->ifr_data);
2371 default: 2387 default:
2372 return -EOPNOTSUPP; 2388 return -EOPNOTSUPP;
2373 } 2389 }
2374 return ret;
2375} 2390}
2376 2391
2377static int cxgb_change_mtu(struct net_device *dev, int new_mtu) 2392static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
@@ -3063,6 +3078,14 @@ static int __devinit init_one(struct pci_dev *pdev,
3063 goto out_disable_device; 3078 goto out_disable_device;
3064 } 3079 }
3065 3080
3081 adapter->nofail_skb =
3082 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3083 if (!adapter->nofail_skb) {
3084 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3085 err = -ENOMEM;
3086 goto out_free_adapter;
3087 }
3088
3066 adapter->regs = ioremap_nocache(mmio_start, mmio_len); 3089 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3067 if (!adapter->regs) { 3090 if (!adapter->regs) {
3068 dev_err(&pdev->dev, "cannot map device registers\n"); 3091 dev_err(&pdev->dev, "cannot map device registers\n");
@@ -3106,7 +3129,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3106 netdev->mem_start = mmio_start; 3129 netdev->mem_start = mmio_start;
3107 netdev->mem_end = mmio_start + mmio_len - 1; 3130 netdev->mem_end = mmio_start + mmio_len - 1;
3108 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 3131 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3109 netdev->features |= NETIF_F_LLTX;
3110 netdev->features |= NETIF_F_GRO; 3132 netdev->features |= NETIF_F_GRO;
3111 if (pci_using_dac) 3133 if (pci_using_dac)
3112 netdev->features |= NETIF_F_HIGHDMA; 3134 netdev->features |= NETIF_F_HIGHDMA;
@@ -3220,6 +3242,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
3220 free_netdev(adapter->port[i]); 3242 free_netdev(adapter->port[i]);
3221 3243
3222 iounmap(adapter->regs); 3244 iounmap(adapter->regs);
3245 if (adapter->nofail_skb)
3246 kfree_skb(adapter->nofail_skb);
3223 kfree(adapter); 3247 kfree(adapter);
3224 pci_release_regions(pdev); 3248 pci_release_regions(pdev);
3225 pci_disable_device(pdev); 3249 pci_disable_device(pdev);