aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
authorStanislaw Gruszka <sgruszka@redhat.com>2010-07-15 18:55:40 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-18 17:42:48 -0400
commita2df00aa33f741096e977456573ebb08eece0b6f (patch)
treedc973da20af6bcd0637394cecc59ab1bc2064f0a /drivers/net/bnx2.c
parentb97d13a53d63c7db1d05d54298c7a12f86c4fbad (diff)
bnx2: allocate with GFP_KERNEL flag on RX path init
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Acked-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a203f39e2b8c..a7df539f29d3 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2664,13 +2664,13 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2664} 2664}
2665 2665
2666static inline int 2666static inline int
2667bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) 2667bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2668{ 2668{
2669 dma_addr_t mapping; 2669 dma_addr_t mapping;
2670 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; 2670 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2671 struct rx_bd *rxbd = 2671 struct rx_bd *rxbd =
2672 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; 2672 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2673 struct page *page = alloc_page(GFP_ATOMIC); 2673 struct page *page = alloc_page(gfp);
2674 2674
2675 if (!page) 2675 if (!page)
2676 return -ENOMEM; 2676 return -ENOMEM;
@@ -2705,7 +2705,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2705} 2705}
2706 2706
2707static inline int 2707static inline int
2708bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) 2708bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2709{ 2709{
2710 struct sk_buff *skb; 2710 struct sk_buff *skb;
2711 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; 2711 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
@@ -2713,7 +2713,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2713 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 2713 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2714 unsigned long align; 2714 unsigned long align;
2715 2715
2716 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 2716 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2717 if (skb == NULL) { 2717 if (skb == NULL) {
2718 return -ENOMEM; 2718 return -ENOMEM;
2719 } 2719 }
@@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2974 int err; 2974 int err;
2975 u16 prod = ring_idx & 0xffff; 2975 u16 prod = ring_idx & 0xffff;
2976 2976
2977 err = bnx2_alloc_rx_skb(bp, rxr, prod); 2977 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2978 if (unlikely(err)) { 2978 if (unlikely(err)) {
2979 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); 2979 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2980 if (hdr_len) { 2980 if (hdr_len) {
@@ -3039,7 +3039,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3039 rx_pg->page = NULL; 3039 rx_pg->page = NULL;
3040 3040
3041 err = bnx2_alloc_rx_page(bp, rxr, 3041 err = bnx2_alloc_rx_page(bp, rxr,
3042 RX_PG_RING_IDX(pg_prod)); 3042 RX_PG_RING_IDX(pg_prod),
3043 GFP_ATOMIC);
3043 if (unlikely(err)) { 3044 if (unlikely(err)) {
3044 rxr->rx_pg_cons = pg_cons; 3045 rxr->rx_pg_cons = pg_cons;
3045 rxr->rx_pg_prod = pg_prod; 3046 rxr->rx_pg_prod = pg_prod;
@@ -5179,7 +5180,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5179 5180
5180 ring_prod = prod = rxr->rx_pg_prod; 5181 ring_prod = prod = rxr->rx_pg_prod;
5181 for (i = 0; i < bp->rx_pg_ring_size; i++) { 5182 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5182 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) { 5183 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5183 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", 5184 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5184 ring_num, i, bp->rx_pg_ring_size); 5185 ring_num, i, bp->rx_pg_ring_size);
5185 break; 5186 break;
@@ -5191,7 +5192,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5191 5192
5192 ring_prod = prod = rxr->rx_prod; 5193 ring_prod = prod = rxr->rx_prod;
5193 for (i = 0; i < bp->rx_ring_size; i++) { 5194 for (i = 0; i < bp->rx_ring_size; i++) {
5194 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) { 5195 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5195 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", 5196 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5196 ring_num, i, bp->rx_ring_size); 5197 ring_num, i, bp->rx_ring_size);
5197 break; 5198 break;