diff options
author | Michal Hocko <mhocko@suse.cz> | 2015-04-14 16:24:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-16 12:03:01 -0400 |
commit | f72f116a2a70f616ea44f86775ae6404c84ea8ef (patch) | |
tree | a225b58667b2963b0e682095a36122658679190c | |
parent | 6b9107d6a10b69cc0a675447bde9383dff8a1d4f (diff) |
cxgb4: drop __GFP_NOFAIL allocation
set_filter_wr is requesting __GFP_NOFAIL allocation although it can return
ENOMEM without any problems obviously (t4_l2t_set_switching does that
already). So the non-failing requirement is too strong without any
obvious reason. Drop __GFP_NOFAIL and reorganize the code to have the
failure paths easier.
The same applies to _c4iw_write_mem_dma_aligned which uses __GFP_NOFAIL
and then checks the return value and returns -ENOMEM on failure. This
doesn't make any sense what so ever. Either the allocation cannot fail or
it can.
del_filter_wr seems to be safe as well because the filter entry is not
marked as pending and the return value is propagated up the stack up to
c4iw_destroy_listen.
Signed-off-by: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Hariprasad S <hariprasad@chelsio.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 15 |
2 files changed, 13 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 6791fd16272c..3ef0cf9f5c44 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -73,7 +73,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, | |||
73 | c4iw_init_wr_wait(&wr_wait); | 73 | c4iw_init_wr_wait(&wr_wait); |
74 | wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); | 74 | wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); |
75 | 75 | ||
76 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 76 | skb = alloc_skb(wr_len, GFP_KERNEL); |
77 | if (!skb) | 77 | if (!skb) |
78 | return -ENOMEM; | 78 | return -ENOMEM; |
79 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | 79 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6de054404156..803d91beec6f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -1140,6 +1140,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx) | |||
1140 | struct fw_filter_wr *fwr; | 1140 | struct fw_filter_wr *fwr; |
1141 | unsigned int ftid; | 1141 | unsigned int ftid; |
1142 | 1142 | ||
1143 | skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); | ||
1144 | if (!skb) | ||
1145 | return -ENOMEM; | ||
1146 | |||
1143 | /* If the new filter requires loopback Destination MAC and/or VLAN | 1147 | /* If the new filter requires loopback Destination MAC and/or VLAN |
1144 | * rewriting then we need to allocate a Layer 2 Table (L2T) entry for | 1148 | * rewriting then we need to allocate a Layer 2 Table (L2T) entry for |
1145 | * the filter. | 1149 | * the filter. |
@@ -1147,19 +1151,21 @@ static int set_filter_wr(struct adapter *adapter, int fidx) | |||
1147 | if (f->fs.newdmac || f->fs.newvlan) { | 1151 | if (f->fs.newdmac || f->fs.newvlan) { |
1148 | /* allocate L2T entry for new filter */ | 1152 | /* allocate L2T entry for new filter */ |
1149 | f->l2t = t4_l2t_alloc_switching(adapter->l2t); | 1153 | f->l2t = t4_l2t_alloc_switching(adapter->l2t); |
1150 | if (f->l2t == NULL) | 1154 | if (f->l2t == NULL) { |
1155 | kfree_skb(skb); | ||
1151 | return -EAGAIN; | 1156 | return -EAGAIN; |
1157 | } | ||
1152 | if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, | 1158 | if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, |
1153 | f->fs.eport, f->fs.dmac)) { | 1159 | f->fs.eport, f->fs.dmac)) { |
1154 | cxgb4_l2t_release(f->l2t); | 1160 | cxgb4_l2t_release(f->l2t); |
1155 | f->l2t = NULL; | 1161 | f->l2t = NULL; |
1162 | kfree_skb(skb); | ||
1156 | return -ENOMEM; | 1163 | return -ENOMEM; |
1157 | } | 1164 | } |
1158 | } | 1165 | } |
1159 | 1166 | ||
1160 | ftid = adapter->tids.ftid_base + fidx; | 1167 | ftid = adapter->tids.ftid_base + fidx; |
1161 | 1168 | ||
1162 | skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL); | ||
1163 | fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); | 1169 | fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); |
1164 | memset(fwr, 0, sizeof(*fwr)); | 1170 | memset(fwr, 0, sizeof(*fwr)); |
1165 | 1171 | ||
@@ -1257,7 +1263,10 @@ static int del_filter_wr(struct adapter *adapter, int fidx) | |||
1257 | len = sizeof(*fwr); | 1263 | len = sizeof(*fwr); |
1258 | ftid = adapter->tids.ftid_base + fidx; | 1264 | ftid = adapter->tids.ftid_base + fidx; |
1259 | 1265 | ||
1260 | skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); | 1266 | skb = alloc_skb(len, GFP_KERNEL); |
1267 | if (!skb) | ||
1268 | return -ENOMEM; | ||
1269 | |||
1261 | fwr = (struct fw_filter_wr *)__skb_put(skb, len); | 1270 | fwr = (struct fw_filter_wr *)__skb_put(skb, len); |
1262 | t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); | 1271 | t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); |
1263 | 1272 | ||