aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-03-01 00:48:12 -0500
committerDavid S. Miller <davem@davemloft.net>2011-03-03 16:02:30 -0500
commit1829b086d175ba07a01ff6934fd51a59bc9be4ce (patch)
tree6370f458a59f9a3065ae12213885f76ce40c7da6
parent6b8a66ee919e40111e3d257b2c22b5773e34ead1 (diff)
benet: use GFP_KERNEL allocations when possible
Extend be_alloc_pages() with a gfp parameter, so that we use GFP_KERNEL allocations instead of GFP_ATOMIC when not running in softirq context. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Ajit Khaparde <ajit.khaparde@emulex.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/benet/be_main.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 0bdccb10aac5..ef66dc61e6ea 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1169,20 +1169,20 @@ static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1169 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; 1169 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1170} 1170}
1171 1171
1172static inline struct page *be_alloc_pages(u32 size) 1172static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1173{ 1173{
1174 gfp_t alloc_flags = GFP_ATOMIC;
1175 u32 order = get_order(size); 1174 u32 order = get_order(size);
1175
1176 if (order > 0) 1176 if (order > 0)
1177 alloc_flags |= __GFP_COMP; 1177 gfp |= __GFP_COMP;
1178 return alloc_pages(alloc_flags, order); 1178 return alloc_pages(gfp, order);
1179} 1179}
1180 1180
1181/* 1181/*
1182 * Allocate a page, split it to fragments of size rx_frag_size and post as 1182 * Allocate a page, split it to fragments of size rx_frag_size and post as
1183 * receive buffers to BE 1183 * receive buffers to BE
1184 */ 1184 */
1185static void be_post_rx_frags(struct be_rx_obj *rxo) 1185static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1186{ 1186{
1187 struct be_adapter *adapter = rxo->adapter; 1187 struct be_adapter *adapter = rxo->adapter;
1188 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; 1188 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1196,7 +1196,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
1196 page_info = &rxo->page_info_tbl[rxq->head]; 1196 page_info = &rxo->page_info_tbl[rxq->head];
1197 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { 1197 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1198 if (!pagep) { 1198 if (!pagep) {
1199 pagep = be_alloc_pages(adapter->big_page_size); 1199 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1200 if (unlikely(!pagep)) { 1200 if (unlikely(!pagep)) {
1201 rxo->stats.rx_post_fail++; 1201 rxo->stats.rx_post_fail++;
1202 break; 1202 break;
@@ -1753,7 +1753,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1753 1753
1754 /* Refill the queue */ 1754 /* Refill the queue */
1755 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) 1755 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1756 be_post_rx_frags(rxo); 1756 be_post_rx_frags(rxo, GFP_ATOMIC);
1757 1757
1758 /* All consumed */ 1758 /* All consumed */
1759 if (work_done < budget) { 1759 if (work_done < budget) {
@@ -1890,7 +1890,7 @@ static void be_worker(struct work_struct *work)
1890 1890
1891 if (rxo->rx_post_starved) { 1891 if (rxo->rx_post_starved) {
1892 rxo->rx_post_starved = false; 1892 rxo->rx_post_starved = false;
1893 be_post_rx_frags(rxo); 1893 be_post_rx_frags(rxo, GFP_KERNEL);
1894 } 1894 }
1895 } 1895 }
1896 if (!adapter->ue_detected && !lancer_chip(adapter)) 1896 if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2138,7 +2138,7 @@ static int be_open(struct net_device *netdev)
2138 u16 link_speed; 2138 u16 link_speed;
2139 2139
2140 for_all_rx_queues(adapter, rxo, i) { 2140 for_all_rx_queues(adapter, rxo, i) {
2141 be_post_rx_frags(rxo); 2141 be_post_rx_frags(rxo, GFP_KERNEL);
2142 napi_enable(&rxo->rx_eq.napi); 2142 napi_enable(&rxo->rx_eq.napi);
2143 } 2143 }
2144 napi_enable(&tx_eq->napi); 2144 napi_enable(&tx_eq->napi);