aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2009-06-09 19:25:21 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-11 05:47:13 -0400
commit74b793e1ef79edc49bc031a88d62f1e93fc6b30f (patch)
treedc32a12c1883f882f524e6a79561787aa36cc9ec /drivers/net
parent87433bfc75f34599c38137e172b6bf8fd41971ba (diff)
cxgb3: remove __GFP_NOFAIL usage
Pre-allocate a skb at init time to be used for control messages to the HW if skb allocation fails. Tolerate failures to send messages initializing some memories at the cost of parity error detection for these memories. Retry sending connection id release messages if both alloc_skb(GFP_ATOMIC) and alloc_skb(GFP_KERNEL) fail. Do not bring the interface up if messages binding queue set to port fail to be sent. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/cxgb3/adapter.h2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c78
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c27
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h3
4 files changed, 103 insertions, 7 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index e48e508b9632..1694fad38720 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -253,6 +253,8 @@ struct adapter {
253 struct mutex mdio_lock; 253 struct mutex mdio_lock;
254 spinlock_t stats_lock; 254 spinlock_t stats_lock;
255 spinlock_t work_lock; 255 spinlock_t work_lock;
256
257 struct sk_buff *nofail_skb;
256}; 258};
257 259
258static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr) 260static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index aef3ab21f5f7..538dda4422dc 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -433,40 +433,78 @@ static int init_tp_parity(struct adapter *adap)
433 for (i = 0; i < 16; i++) { 433 for (i = 0; i < 16; i++) {
434 struct cpl_smt_write_req *req; 434 struct cpl_smt_write_req *req;
435 435
436 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437 if (!skb)
438 skb = adap->nofail_skb;
439 if (!skb)
440 goto alloc_skb_fail;
441
437 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); 442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
438 memset(req, 0, sizeof(*req)); 443 memset(req, 0, sizeof(*req));
439 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
440 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
441 req->iff = i; 446 req->iff = i;
442 t3_mgmt_tx(adap, skb); 447 t3_mgmt_tx(adap, skb);
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
452 goto alloc_skb_fail;
453 }
443 } 454 }
444 455
445 for (i = 0; i < 2048; i++) { 456 for (i = 0; i < 2048; i++) {
446 struct cpl_l2t_write_req *req; 457 struct cpl_l2t_write_req *req;
447 458
448 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460 if (!skb)
461 skb = adap->nofail_skb;
462 if (!skb)
463 goto alloc_skb_fail;
464
449 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); 465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
450 memset(req, 0, sizeof(*req)); 466 memset(req, 0, sizeof(*req));
451 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
452 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
453 req->params = htonl(V_L2T_W_IDX(i)); 469 req->params = htonl(V_L2T_W_IDX(i));
454 t3_mgmt_tx(adap, skb); 470 t3_mgmt_tx(adap, skb);
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
475 goto alloc_skb_fail;
476 }
455 } 477 }
456 478
457 for (i = 0; i < 2048; i++) { 479 for (i = 0; i < 2048; i++) {
458 struct cpl_rte_write_req *req; 480 struct cpl_rte_write_req *req;
459 481
460 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483 if (!skb)
484 skb = adap->nofail_skb;
485 if (!skb)
486 goto alloc_skb_fail;
487
461 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); 488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
462 memset(req, 0, sizeof(*req)); 489 memset(req, 0, sizeof(*req));
463 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
464 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
465 req->l2t_idx = htonl(V_L2T_W_IDX(i)); 492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
466 t3_mgmt_tx(adap, skb); 493 t3_mgmt_tx(adap, skb);
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
498 goto alloc_skb_fail;
499 }
467 } 500 }
468 501
469 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL); 502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!skb)
504 skb = adap->nofail_skb;
505 if (!skb)
506 goto alloc_skb_fail;
507
470 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); 508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
471 memset(greq, 0, sizeof(*greq)); 509 memset(greq, 0, sizeof(*greq));
472 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
@@ -475,8 +513,17 @@ static int init_tp_parity(struct adapter *adap)
475 t3_mgmt_tx(adap, skb); 513 t3_mgmt_tx(adap, skb);
476 514
477 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519 }
520
478 t3_tp_set_offload_mode(adap, 0); 521 t3_tp_set_offload_mode(adap, 0);
479 return i; 522 return i;
523
524alloc_skb_fail:
525 t3_tp_set_offload_mode(adap, 0);
526 return -ENOMEM;
480} 527}
481 528
482/** 529/**
@@ -871,7 +918,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
871 struct mngt_pktsched_wr *req; 918 struct mngt_pktsched_wr *req;
872 int ret; 919 int ret;
873 920
874 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 921 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922 if (!skb)
923 skb = adap->nofail_skb;
924 if (!skb)
925 return -ENOMEM;
926
875 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); 927 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
876 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 928 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
877 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 929 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
@@ -881,6 +933,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
881 req->max = hi; 933 req->max = hi;
882 req->binding = port; 934 req->binding = port;
883 ret = t3_mgmt_tx(adap, skb); 935 ret = t3_mgmt_tx(adap, skb);
936 if (skb == adap->nofail_skb) {
937 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
938 GFP_KERNEL);
939 if (!adap->nofail_skb)
940 ret = -ENOMEM;
941 }
884 942
885 return ret; 943 return ret;
886} 944}
@@ -3020,6 +3078,14 @@ static int __devinit init_one(struct pci_dev *pdev,
3020 goto out_disable_device; 3078 goto out_disable_device;
3021 } 3079 }
3022 3080
3081 adapter->nofail_skb =
3082 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3083 if (!adapter->nofail_skb) {
3084 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3085 err = -ENOMEM;
3086 goto out_free_adapter;
3087 }
3088
3023 adapter->regs = ioremap_nocache(mmio_start, mmio_len); 3089 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3024 if (!adapter->regs) { 3090 if (!adapter->regs) {
3025 dev_err(&pdev->dev, "cannot map device registers\n"); 3091 dev_err(&pdev->dev, "cannot map device registers\n");
@@ -3176,6 +3242,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
3176 free_netdev(adapter->port[i]); 3242 free_netdev(adapter->port[i]);
3177 3243
3178 iounmap(adapter->regs); 3244 iounmap(adapter->regs);
3245 if (adapter->nofail_skb)
3246 kfree_skb(adapter->nofail_skb);
3179 kfree(adapter); 3247 kfree(adapter);
3180 pci_release_regions(pdev); 3248 pci_release_regions(pdev);
3181 pci_disable_device(pdev); 3249 pci_disable_device(pdev);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 620d80be6aac..f9f54b57b28c 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -566,13 +566,31 @@ static void t3_process_tid_release_list(struct work_struct *work)
566 spin_unlock_bh(&td->tid_release_lock); 566 spin_unlock_bh(&td->tid_release_lock);
567 567
568 skb = alloc_skb(sizeof(struct cpl_tid_release), 568 skb = alloc_skb(sizeof(struct cpl_tid_release),
569 GFP_KERNEL | __GFP_NOFAIL); 569 GFP_KERNEL);
570 if (!skb)
571 skb = td->nofail_skb;
572 if (!skb) {
573 spin_lock_bh(&td->tid_release_lock);
574 p->ctx = (void *)td->tid_release_list;
575 td->tid_release_list = (struct t3c_tid_entry *)p;
576 break;
577 }
570 mk_tid_release(skb, p - td->tid_maps.tid_tab); 578 mk_tid_release(skb, p - td->tid_maps.tid_tab);
571 cxgb3_ofld_send(tdev, skb); 579 cxgb3_ofld_send(tdev, skb);
572 p->ctx = NULL; 580 p->ctx = NULL;
581 if (skb == td->nofail_skb)
582 td->nofail_skb =
583 alloc_skb(sizeof(struct cpl_tid_release),
584 GFP_KERNEL);
573 spin_lock_bh(&td->tid_release_lock); 585 spin_lock_bh(&td->tid_release_lock);
574 } 586 }
587 td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
575 spin_unlock_bh(&td->tid_release_lock); 588 spin_unlock_bh(&td->tid_release_lock);
589
590 if (!td->nofail_skb)
591 td->nofail_skb =
592 alloc_skb(sizeof(struct cpl_tid_release),
593 GFP_KERNEL);
576} 594}
577 595
578/* use ctx as a next pointer in the tid release list */ 596/* use ctx as a next pointer in the tid release list */
@@ -585,7 +603,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
585 p->ctx = (void *)td->tid_release_list; 603 p->ctx = (void *)td->tid_release_list;
586 p->client = NULL; 604 p->client = NULL;
587 td->tid_release_list = p; 605 td->tid_release_list = p;
588 if (!p->ctx) 606 if (!p->ctx || td->release_list_incomplete)
589 schedule_work(&td->tid_release_task); 607 schedule_work(&td->tid_release_task);
590 spin_unlock_bh(&td->tid_release_lock); 608 spin_unlock_bh(&td->tid_release_lock);
591} 609}
@@ -1274,6 +1292,9 @@ int cxgb3_offload_activate(struct adapter *adapter)
1274 if (list_empty(&adapter_list)) 1292 if (list_empty(&adapter_list))
1275 register_netevent_notifier(&nb); 1293 register_netevent_notifier(&nb);
1276 1294
1295 t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1296 t->release_list_incomplete = 0;
1297
1277 add_adapter(adapter); 1298 add_adapter(adapter);
1278 return 0; 1299 return 0;
1279 1300
@@ -1298,6 +1319,8 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
1298 T3C_DATA(tdev) = NULL; 1319 T3C_DATA(tdev) = NULL;
1299 t3_free_l2t(L2DATA(tdev)); 1320 t3_free_l2t(L2DATA(tdev));
1300 L2DATA(tdev) = NULL; 1321 L2DATA(tdev) = NULL;
1322 if (t->nofail_skb)
1323 kfree_skb(t->nofail_skb);
1301 kfree(t); 1324 kfree(t);
1302} 1325}
1303 1326
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index a8e8e5fcdf84..55945f422aec 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -191,6 +191,9 @@ struct t3c_data {
191 struct t3c_tid_entry *tid_release_list; 191 struct t3c_tid_entry *tid_release_list;
192 spinlock_t tid_release_lock; 192 spinlock_t tid_release_lock;
193 struct work_struct tid_release_task; 193 struct work_struct tid_release_task;
194
195 struct sk_buff *nofail_skb;
196 unsigned int release_list_incomplete;
194}; 197};
195 198
196/* 199/*