diff options
author | Eric Dumazet <edumazet@google.com> | 2012-12-10 07:32:03 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-12-11 12:49:53 -0500 |
commit | f8e8f97c11d5ff3cc47d85b97c7c35e443dcf490 (patch) | |
tree | dc56e7e2030cfd0fd0e8b876f86d18a08514a38c /include | |
parent | d46d132cc0212ef08c22b9179dfa5fe21d07d253 (diff) |
net: fix a race in gro_cell_poll()
Dmitry Kravkov reported packet drops for GRE packets since GRO support
was added.
There is a race in gro_cell_poll() because we call napi_complete()
without any synchronization with a concurrent gro_cells_receive()
Once bug was triggered, we queued packets but did not schedule NAPI
poll.
We can fix this issue using the spinlock protected the napi_skbs queue,
as we have to hold it to perform skb dequeue anyway.
As we open-code skb_dequeue(), we no longer need to mask IRQS, as both
producer and consumer run under BH context.
Bug added in commit c9e6bc644e (net: add gro_cells infrastructure)
Reported-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Tested-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/gro_cells.h | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h index 4fd8a4b4b7ee..e5062c955ea6 100644 --- a/include/net/gro_cells.h +++ b/include/net/gro_cells.h | |||
@@ -17,7 +17,6 @@ struct gro_cells { | |||
17 | 17 | ||
18 | static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) | 18 | static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) |
19 | { | 19 | { |
20 | unsigned long flags; | ||
21 | struct gro_cell *cell = gcells->cells; | 20 | struct gro_cell *cell = gcells->cells; |
22 | struct net_device *dev = skb->dev; | 21 | struct net_device *dev = skb->dev; |
23 | 22 | ||
@@ -35,32 +34,37 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s | |||
35 | return; | 34 | return; |
36 | } | 35 | } |
37 | 36 | ||
38 | spin_lock_irqsave(&cell->napi_skbs.lock, flags); | 37 | /* We run in BH context */ |
38 | spin_lock(&cell->napi_skbs.lock); | ||
39 | 39 | ||
40 | __skb_queue_tail(&cell->napi_skbs, skb); | 40 | __skb_queue_tail(&cell->napi_skbs, skb); |
41 | if (skb_queue_len(&cell->napi_skbs) == 1) | 41 | if (skb_queue_len(&cell->napi_skbs) == 1) |
42 | napi_schedule(&cell->napi); | 42 | napi_schedule(&cell->napi); |
43 | 43 | ||
44 | spin_unlock_irqrestore(&cell->napi_skbs.lock, flags); | 44 | spin_unlock(&cell->napi_skbs.lock); |
45 | } | 45 | } |
46 | 46 | ||
47 | /* called unser BH context */ | ||
47 | static inline int gro_cell_poll(struct napi_struct *napi, int budget) | 48 | static inline int gro_cell_poll(struct napi_struct *napi, int budget) |
48 | { | 49 | { |
49 | struct gro_cell *cell = container_of(napi, struct gro_cell, napi); | 50 | struct gro_cell *cell = container_of(napi, struct gro_cell, napi); |
50 | struct sk_buff *skb; | 51 | struct sk_buff *skb; |
51 | int work_done = 0; | 52 | int work_done = 0; |
52 | 53 | ||
54 | spin_lock(&cell->napi_skbs.lock); | ||
53 | while (work_done < budget) { | 55 | while (work_done < budget) { |
54 | skb = skb_dequeue(&cell->napi_skbs); | 56 | skb = __skb_dequeue(&cell->napi_skbs); |
55 | if (!skb) | 57 | if (!skb) |
56 | break; | 58 | break; |
57 | 59 | spin_unlock(&cell->napi_skbs.lock); | |
58 | napi_gro_receive(napi, skb); | 60 | napi_gro_receive(napi, skb); |
59 | work_done++; | 61 | work_done++; |
62 | spin_lock(&cell->napi_skbs.lock); | ||
60 | } | 63 | } |
61 | 64 | ||
62 | if (work_done < budget) | 65 | if (work_done < budget) |
63 | napi_complete(napi); | 66 | napi_complete(napi); |
67 | spin_unlock(&cell->napi_skbs.lock); | ||
64 | return work_done; | 68 | return work_done; |
65 | } | 69 | } |
66 | 70 | ||