aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2007-04-17 14:06:30 -0400
committerJeff Garzik <jeff@garzik.org>2007-04-19 15:01:16 -0400
commit606fcd0b94f7531f52a9b07008a4461213cbcd27 (patch)
tree3fafe8545cf86795b3db456663aaec8520f050fd
parent895e1fc7226e6732bc77138955b6c7dfa279f57a (diff)
cxgb3 - Fix low memory conditions
Reuse the incoming skb when a clientless abort req is recieved. The release of RDMA connections HW resources might be deferred in low memory situations. Ensure that no further activity is passed up to the RDMA driver for these connections. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c69
2 files changed, 55 insertions, 19 deletions
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
index e14862b43d17..483a594210a7 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -67,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t,
67static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, 67static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
68 unsigned int tid) 68 unsigned int tid)
69{ 69{
70 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL; 70 struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
71 &(t->tid_tab[tid]) : NULL;
72
73 return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
71} 74}
72 75
73/* 76/*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 48649244673e..199e5066acf3 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -508,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
508 508
509 spin_lock_bh(&td->tid_release_lock); 509 spin_lock_bh(&td->tid_release_lock);
510 p->ctx = (void *)td->tid_release_list; 510 p->ctx = (void *)td->tid_release_list;
511 p->client = NULL;
511 td->tid_release_list = p; 512 td->tid_release_list = p;
512 if (!p->ctx) 513 if (!p->ctx)
513 schedule_work(&td->tid_release_task); 514 schedule_work(&td->tid_release_task);
@@ -623,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
623 struct t3c_tid_entry *t3c_tid; 624 struct t3c_tid_entry *t3c_tid;
624 625
625 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 626 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
626 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers && 627 if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
628 t3c_tid->client->handlers &&
627 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 629 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
628 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 630 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
629 t3c_tid-> 631 t3c_tid->
@@ -642,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
642 struct t3c_tid_entry *t3c_tid; 644 struct t3c_tid_entry *t3c_tid;
643 645
644 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 646 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
645 if (t3c_tid->ctx && t3c_tid->client->handlers && 647 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
646 t3c_tid->client->handlers[p->opcode]) { 648 t3c_tid->client->handlers[p->opcode]) {
647 return t3c_tid->client->handlers[p->opcode] (dev, skb, 649 return t3c_tid->client->handlers[p->opcode] (dev, skb,
648 t3c_tid->ctx); 650 t3c_tid->ctx);
@@ -660,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
660 struct t3c_tid_entry *t3c_tid; 662 struct t3c_tid_entry *t3c_tid;
661 663
662 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 664 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
663 if (t3c_tid->ctx && t3c_tid->client->handlers && 665 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
664 t3c_tid->client->handlers[p->opcode]) { 666 t3c_tid->client->handlers[p->opcode]) {
665 return t3c_tid->client->handlers[p->opcode] 667 return t3c_tid->client->handlers[p->opcode]
666 (dev, skb, t3c_tid->ctx); 668 (dev, skb, t3c_tid->ctx);
@@ -689,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
689 } 691 }
690} 692}
691 693
694/*
695 * Returns an sk_buff for a reply CPL message of size len. If the input
696 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
697 * is allocated. The input skb must be of size at least len. Note that this
698 * operation does not destroy the original skb data even if it decides to reuse
699 * the buffer.
700 */
701static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
702 int gfp)
703{
704 if (likely(!skb_cloned(skb))) {
705 BUG_ON(skb->len < len);
706 __skb_trim(skb, len);
707 skb_get(skb);
708 } else {
709 skb = alloc_skb(len, gfp);
710 if (skb)
711 __skb_put(skb, len);
712 }
713 return skb;
714}
715
692static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 716static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
693{ 717{
694 union opcode_tid *p = cplhdr(skb); 718 union opcode_tid *p = cplhdr(skb);
@@ -696,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
696 struct t3c_tid_entry *t3c_tid; 720 struct t3c_tid_entry *t3c_tid;
697 721
698 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 722 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
699 if (t3c_tid->ctx && t3c_tid->client->handlers && 723 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
700 t3c_tid->client->handlers[p->opcode]) { 724 t3c_tid->client->handlers[p->opcode]) {
701 return t3c_tid->client->handlers[p->opcode] 725 return t3c_tid->client->handlers[p->opcode]
702 (dev, skb, t3c_tid->ctx); 726 (dev, skb, t3c_tid->ctx);
703 } else { 727 } else {
704 struct cpl_abort_req_rss *req = cplhdr(skb); 728 struct cpl_abort_req_rss *req = cplhdr(skb);
705 struct cpl_abort_rpl *rpl; 729 struct cpl_abort_rpl *rpl;
730 struct sk_buff *reply_skb;
731 unsigned int tid = GET_TID(req);
732 u8 cmd = req->status;
733
734 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
735 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
736 goto out;
706 737
707 struct sk_buff *skb = 738 reply_skb = cxgb3_get_cpl_reply_skb(skb,
708 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC); 739 sizeof(struct
709 if (!skb) { 740 cpl_abort_rpl),
741 GFP_ATOMIC);
742
743 if (!reply_skb) {
710 printk("do_abort_req_rss: couldn't get skb!\n"); 744 printk("do_abort_req_rss: couldn't get skb!\n");
711 goto out; 745 goto out;
712 } 746 }
713 skb->priority = CPL_PRIORITY_DATA; 747 reply_skb->priority = CPL_PRIORITY_DATA;
714 __skb_put(skb, sizeof(struct cpl_abort_rpl)); 748 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
715 rpl = cplhdr(skb); 749 rpl = cplhdr(reply_skb);
716 rpl->wr.wr_hi = 750 rpl->wr.wr_hi =
717 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 751 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
718 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req))); 752 rpl->wr.wr_lo = htonl(V_WR_TID(tid));
719 OPCODE_TID(rpl) = 753 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
720 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req))); 754 rpl->cmd = cmd;
721 rpl->cmd = req->status; 755 cxgb3_ofld_send(dev, reply_skb);
722 cxgb3_ofld_send(dev, skb);
723out: 756out:
724 return CPL_RET_BUF_DONE; 757 return CPL_RET_BUF_DONE;
725 } 758 }
@@ -732,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
732 struct t3c_tid_entry *t3c_tid; 765 struct t3c_tid_entry *t3c_tid;
733 766
734 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 767 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
735 if (t3c_tid->ctx && t3c_tid->client->handlers && 768 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
736 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 769 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
737 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 770 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
738 (dev, skb, t3c_tid->ctx); 771 (dev, skb, t3c_tid->ctx);
@@ -762,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
762 struct t3c_tid_entry *t3c_tid; 795 struct t3c_tid_entry *t3c_tid;
763 796
764 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 797 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
765 if (t3c_tid->ctx && t3c_tid->client->handlers && 798 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
766 t3c_tid->client->handlers[opcode]) { 799 t3c_tid->client->handlers[opcode]) {
767 return t3c_tid->client->handlers[opcode] (dev, skb, 800 return t3c_tid->client->handlers[opcode] (dev, skb,
768 t3c_tid->ctx); 801 t3c_tid->ctx);
@@ -961,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
961 for (tid = 0; tid < ti->ntids; tid++) { 994 for (tid = 0; tid < ti->ntids; tid++) {
962 te = lookup_tid(ti, tid); 995 te = lookup_tid(ti, tid);
963 BUG_ON(!te); 996 BUG_ON(!te);
964 if (te->ctx && te->client && te->client->redirect) { 997 if (te && te->ctx && te->client && te->client->redirect) {
965 update_tcb = te->client->redirect(te->ctx, old, new, e); 998 update_tcb = te->client->redirect(te->ctx, old, new, e);
966 if (update_tcb) { 999 if (update_tcb) {
967 l2t_hold(L2DATA(tdev), e); 1000 l2t_hold(L2DATA(tdev), e);