diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-07-07 09:13:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-07-07 09:13:32 -0400 |
commit | 68ac31918ec359a2bfb9f897bb62c2940652d2b8 (patch) | |
tree | 052c7ecb3ffd5ed3a5a60ff21e0441f88be87065 | |
parent | 3c878d4746ca12cc8022f9e02f055f175b847dbc (diff) |
sky2: use GFP_KERNEL allocations at device setup
In process and sleep allowed context, favor GFP_KERNEL allocations over
GFP_ATOMIC ones.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/sky2.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index e14b86e27dc9..c91513ed7847 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1362,13 +1362,14 @@ static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) | |||
1362 | * Allocate an skb for receiving. If the MTU is large enough | 1362 | * Allocate an skb for receiving. If the MTU is large enough |
1363 | * make the skb non-linear with a fragment list of pages. | 1363 | * make the skb non-linear with a fragment list of pages. |
1364 | */ | 1364 | */ |
1365 | static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) | 1365 | static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp) |
1366 | { | 1366 | { |
1367 | struct sk_buff *skb; | 1367 | struct sk_buff *skb; |
1368 | int i; | 1368 | int i; |
1369 | 1369 | ||
1370 | skb = netdev_alloc_skb(sky2->netdev, | 1370 | skb = __netdev_alloc_skb(sky2->netdev, |
1371 | sky2->rx_data_size + sky2_rx_pad(sky2->hw)); | 1371 | sky2->rx_data_size + sky2_rx_pad(sky2->hw), |
1372 | gfp); | ||
1372 | if (!skb) | 1373 | if (!skb) |
1373 | goto nomem; | 1374 | goto nomem; |
1374 | 1375 | ||
@@ -1386,7 +1387,7 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) | |||
1386 | skb_reserve(skb, NET_IP_ALIGN); | 1387 | skb_reserve(skb, NET_IP_ALIGN); |
1387 | 1388 | ||
1388 | for (i = 0; i < sky2->rx_nfrags; i++) { | 1389 | for (i = 0; i < sky2->rx_nfrags; i++) { |
1389 | struct page *page = alloc_page(GFP_ATOMIC); | 1390 | struct page *page = alloc_page(gfp); |
1390 | 1391 | ||
1391 | if (!page) | 1392 | if (!page) |
1392 | goto free_partial; | 1393 | goto free_partial; |
@@ -1416,7 +1417,7 @@ static int sky2_alloc_rx_skbs(struct sky2_port *sky2) | |||
1416 | for (i = 0; i < sky2->rx_pending; i++) { | 1417 | for (i = 0; i < sky2->rx_pending; i++) { |
1417 | struct rx_ring_info *re = sky2->rx_ring + i; | 1418 | struct rx_ring_info *re = sky2->rx_ring + i; |
1418 | 1419 | ||
1419 | re->skb = sky2_rx_alloc(sky2); | 1420 | re->skb = sky2_rx_alloc(sky2, GFP_KERNEL); |
1420 | if (!re->skb) | 1421 | if (!re->skb) |
1421 | return -ENOMEM; | 1422 | return -ENOMEM; |
1422 | 1423 | ||
@@ -2384,7 +2385,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2, | |||
2384 | struct rx_ring_info nre; | 2385 | struct rx_ring_info nre; |
2385 | unsigned hdr_space = sky2->rx_data_size; | 2386 | unsigned hdr_space = sky2->rx_data_size; |
2386 | 2387 | ||
2387 | nre.skb = sky2_rx_alloc(sky2); | 2388 | nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC); |
2388 | if (unlikely(!nre.skb)) | 2389 | if (unlikely(!nre.skb)) |
2389 | goto nobuf; | 2390 | goto nobuf; |
2390 | 2391 | ||