aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/cxgb3_main.c
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2009-06-09 19:25:21 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-11 05:47:13 -0400
commit74b793e1ef79edc49bc031a88d62f1e93fc6b30f (patch)
treedc32a12c1883f882f524e6a79561787aa36cc9ec /drivers/net/cxgb3/cxgb3_main.c
parent87433bfc75f34599c38137e172b6bf8fd41971ba (diff)
cxgb3: remove __GFP_NOFAIL usage
Pre-allocate a skb at init time to be used for control messages to the HW if skb allocation fails. Tolerate failures to send messages initializing some memories at the cost of parity error detection for these memories. Retry sending connection id release messages if both alloc_skb(GFP_ATOMIC) and alloc_skb(GFP_KERNEL) fail. Do not bring the interface up if messages binding queue set to port fail to be sent. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3/cxgb3_main.c')
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c78
1 files changed, 73 insertions, 5 deletions
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index aef3ab21f5f7..538dda4422dc 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -433,40 +433,78 @@ static int init_tp_parity(struct adapter *adap)
433 for (i = 0; i < 16; i++) { 433 for (i = 0; i < 16; i++) {
434 struct cpl_smt_write_req *req; 434 struct cpl_smt_write_req *req;
435 435
436 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437 if (!skb)
438 skb = adap->nofail_skb;
439 if (!skb)
440 goto alloc_skb_fail;
441
437 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); 442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
438 memset(req, 0, sizeof(*req)); 443 memset(req, 0, sizeof(*req));
439 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
440 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
441 req->iff = i; 446 req->iff = i;
442 t3_mgmt_tx(adap, skb); 447 t3_mgmt_tx(adap, skb);
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
452 goto alloc_skb_fail;
453 }
443 } 454 }
444 455
445 for (i = 0; i < 2048; i++) { 456 for (i = 0; i < 2048; i++) {
446 struct cpl_l2t_write_req *req; 457 struct cpl_l2t_write_req *req;
447 458
448 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460 if (!skb)
461 skb = adap->nofail_skb;
462 if (!skb)
463 goto alloc_skb_fail;
464
449 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); 465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
450 memset(req, 0, sizeof(*req)); 466 memset(req, 0, sizeof(*req));
451 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
452 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
453 req->params = htonl(V_L2T_W_IDX(i)); 469 req->params = htonl(V_L2T_W_IDX(i));
454 t3_mgmt_tx(adap, skb); 470 t3_mgmt_tx(adap, skb);
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
475 goto alloc_skb_fail;
476 }
455 } 477 }
456 478
457 for (i = 0; i < 2048; i++) { 479 for (i = 0; i < 2048; i++) {
458 struct cpl_rte_write_req *req; 480 struct cpl_rte_write_req *req;
459 481
460 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483 if (!skb)
484 skb = adap->nofail_skb;
485 if (!skb)
486 goto alloc_skb_fail;
487
461 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); 488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
462 memset(req, 0, sizeof(*req)); 489 memset(req, 0, sizeof(*req));
463 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
464 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
465 req->l2t_idx = htonl(V_L2T_W_IDX(i)); 492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
466 t3_mgmt_tx(adap, skb); 493 t3_mgmt_tx(adap, skb);
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
498 goto alloc_skb_fail;
499 }
467 } 500 }
468 501
469 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL); 502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!skb)
504 skb = adap->nofail_skb;
505 if (!skb)
506 goto alloc_skb_fail;
507
470 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); 508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
471 memset(greq, 0, sizeof(*greq)); 509 memset(greq, 0, sizeof(*greq));
472 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
@@ -475,8 +513,17 @@ static int init_tp_parity(struct adapter *adap)
475 t3_mgmt_tx(adap, skb); 513 t3_mgmt_tx(adap, skb);
476 514
477 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519 }
520
478 t3_tp_set_offload_mode(adap, 0); 521 t3_tp_set_offload_mode(adap, 0);
479 return i; 522 return i;
523
524alloc_skb_fail:
525 t3_tp_set_offload_mode(adap, 0);
526 return -ENOMEM;
480} 527}
481 528
482/** 529/**
@@ -871,7 +918,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
871 struct mngt_pktsched_wr *req; 918 struct mngt_pktsched_wr *req;
872 int ret; 919 int ret;
873 920
874 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 921 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922 if (!skb)
923 skb = adap->nofail_skb;
924 if (!skb)
925 return -ENOMEM;
926
875 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); 927 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
876 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 928 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
877 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 929 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
@@ -881,6 +933,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
881 req->max = hi; 933 req->max = hi;
882 req->binding = port; 934 req->binding = port;
883 ret = t3_mgmt_tx(adap, skb); 935 ret = t3_mgmt_tx(adap, skb);
936 if (skb == adap->nofail_skb) {
937 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
938 GFP_KERNEL);
939 if (!adap->nofail_skb)
940 ret = -ENOMEM;
941 }
884 942
885 return ret; 943 return ret;
886} 944}
@@ -3020,6 +3078,14 @@ static int __devinit init_one(struct pci_dev *pdev,
3020 goto out_disable_device; 3078 goto out_disable_device;
3021 } 3079 }
3022 3080
3081 adapter->nofail_skb =
3082 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3083 if (!adapter->nofail_skb) {
3084 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3085 err = -ENOMEM;
3086 goto out_free_adapter;
3087 }
3088
3023 adapter->regs = ioremap_nocache(mmio_start, mmio_len); 3089 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3024 if (!adapter->regs) { 3090 if (!adapter->regs) {
3025 dev_err(&pdev->dev, "cannot map device registers\n"); 3091 dev_err(&pdev->dev, "cannot map device registers\n");
@@ -3176,6 +3242,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
3176 free_netdev(adapter->port[i]); 3242 free_netdev(adapter->port[i]);
3177 3243
3178 iounmap(adapter->regs); 3244 iounmap(adapter->regs);
3245 if (adapter->nofail_skb)
3246 kfree_skb(adapter->nofail_skb);
3179 kfree(adapter); 3247 kfree(adapter);
3180 pci_release_regions(pdev); 3248 pci_release_regions(pdev);
3181 pci_disable_device(pdev); 3249 pci_disable_device(pdev);