diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-11-14 13:43:58 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:22:32 -0500 |
commit | a1bcfacd0577ff477e934731d4ceb3d26eab947d (patch) | |
tree | f5a7b2e9933a72135a49a1348b7839e8afdb020e | |
parent | d23ca15a21804631d8f787a0cc5646df81b9c2ea (diff) |
netpoll: private skb pool (rev3)
It was a dark and stormy night when Steve first saw the
netpoll beast. The beast was odd, and misshapen but not
extremely ugly.
"Let me take off one of your warts" he said. This wart
is where you tried to make an skb list yourself. If the
beast had ever run out of memory, he would have stupefied
himself unnecessarily.
The first try was painful, so he tried again till the bleeding
stopped. And again, and again...
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
-rw-r--r-- | net/core/netpoll.c | 53 |
1 files changed, 21 insertions, 32 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6589adb14cbf..4de62f1f4134 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -36,9 +36,7 @@ | |||
36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) | 36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) |
37 | #define MAX_RETRIES 20000 | 37 | #define MAX_RETRIES 20000 |
38 | 38 | ||
39 | static DEFINE_SPINLOCK(skb_list_lock); | 39 | static struct sk_buff_head skb_pool; |
40 | static int nr_skbs; | ||
41 | static struct sk_buff *skbs; | ||
42 | 40 | ||
43 | static DEFINE_SPINLOCK(queue_lock); | 41 | static DEFINE_SPINLOCK(queue_lock); |
44 | static int queue_depth; | 42 | static int queue_depth; |
@@ -190,17 +188,15 @@ static void refill_skbs(void) | |||
190 | struct sk_buff *skb; | 188 | struct sk_buff *skb; |
191 | unsigned long flags; | 189 | unsigned long flags; |
192 | 190 | ||
193 | spin_lock_irqsave(&skb_list_lock, flags); | 191 | spin_lock_irqsave(&skb_pool.lock, flags); |
194 | while (nr_skbs < MAX_SKBS) { | 192 | while (skb_pool.qlen < MAX_SKBS) { |
195 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); | 193 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); |
196 | if (!skb) | 194 | if (!skb) |
197 | break; | 195 | break; |
198 | 196 | ||
199 | skb->next = skbs; | 197 | __skb_queue_tail(&skb_pool, skb); |
200 | skbs = skb; | ||
201 | nr_skbs++; | ||
202 | } | 198 | } |
203 | spin_unlock_irqrestore(&skb_list_lock, flags); | 199 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
204 | } | 200 | } |
205 | 201 | ||
206 | static void zap_completion_queue(void) | 202 | static void zap_completion_queue(void) |
@@ -229,38 +225,25 @@ static void zap_completion_queue(void) | |||
229 | put_cpu_var(softnet_data); | 225 | put_cpu_var(softnet_data); |
230 | } | 226 | } |
231 | 227 | ||
232 | static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) | 228 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
233 | { | 229 | { |
234 | int once = 1, count = 0; | 230 | int count = 0; |
235 | unsigned long flags; | 231 | struct sk_buff *skb; |
236 | struct sk_buff *skb = NULL; | ||
237 | 232 | ||
238 | zap_completion_queue(); | 233 | zap_completion_queue(); |
234 | refill_skbs(); | ||
239 | repeat: | 235 | repeat: |
240 | if (nr_skbs < MAX_SKBS) | ||
241 | refill_skbs(); | ||
242 | 236 | ||
243 | skb = alloc_skb(len, GFP_ATOMIC); | 237 | skb = alloc_skb(len, GFP_ATOMIC); |
244 | 238 | if (!skb) | |
245 | if (!skb) { | 239 | skb = skb_dequeue(&skb_pool); |
246 | spin_lock_irqsave(&skb_list_lock, flags); | ||
247 | skb = skbs; | ||
248 | if (skb) { | ||
249 | skbs = skb->next; | ||
250 | skb->next = NULL; | ||
251 | nr_skbs--; | ||
252 | } | ||
253 | spin_unlock_irqrestore(&skb_list_lock, flags); | ||
254 | } | ||
255 | 240 | ||
256 | if(!skb) { | 241 | if(!skb) { |
257 | count++; | 242 | if (++count < 10) { |
258 | if (once && (count == 1000000)) { | 243 | netpoll_poll(np); |
259 | printk("out of netpoll skbs!\n"); | 244 | goto repeat; |
260 | once = 0; | ||
261 | } | 245 | } |
262 | netpoll_poll(np); | 246 | return NULL; |
263 | goto repeat; | ||
264 | } | 247 | } |
265 | 248 | ||
266 | atomic_set(&skb->users, 1); | 249 | atomic_set(&skb->users, 1); |
@@ -770,6 +753,12 @@ int netpoll_setup(struct netpoll *np) | |||
770 | return -1; | 753 | return -1; |
771 | } | 754 | } |
772 | 755 | ||
756 | static int __init netpoll_init(void) { | ||
757 | skb_queue_head_init(&skb_pool); | ||
758 | return 0; | ||
759 | } | ||
760 | core_initcall(netpoll_init); | ||
761 | |||
773 | void netpoll_cleanup(struct netpoll *np) | 762 | void netpoll_cleanup(struct netpoll *np) |
774 | { | 763 | { |
775 | struct netpoll_info *npinfo; | 764 | struct netpoll_info *npinfo; |