diff options
author | Yunsheng Lin <linyunsheng@huawei.com> | 2019-06-27 21:13:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-07-01 22:02:47 -0400 |
commit | 27ba4059e06b3bbd38a7d944fd5a78cdf47534f4 (patch) | |
tree | 717af81b11215303a6c0a7d5fbfeee53284e1825 | |
parent | 0d0bcacc54e65540b8a3d680c130b741010e23a3 (diff) |
net: link_watch: prevent starvation when processing linkwatch wq
When user has configured a large number of virtual netdev, such
as 4K vlans, the carrier on/off operation of the real netdev
will also cause it's virtual netdev's link state to be processed
in linkwatch. Currently, the processing is done in a work queue,
which may cause rtnl locking starvation problem and worker
starvation problem for other work queue, such as irqfd_inject wq.
This patch releases the cpu when link watch worker has processed
a fixed number of netdev' link watch event, and schedule the
work queue again when there is still link watch event remaining.
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/core/link_watch.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 04fdc9535772..f153e0601838 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -163,9 +163,16 @@ static void linkwatch_do_dev(struct net_device *dev) | |||
163 | 163 | ||
164 | static void __linkwatch_run_queue(int urgent_only) | 164 | static void __linkwatch_run_queue(int urgent_only) |
165 | { | 165 | { |
166 | #define MAX_DO_DEV_PER_LOOP 100 | ||
167 | |||
168 | int do_dev = MAX_DO_DEV_PER_LOOP; | ||
166 | struct net_device *dev; | 169 | struct net_device *dev; |
167 | LIST_HEAD(wrk); | 170 | LIST_HEAD(wrk); |
168 | 171 | ||
172 | /* Give urgent case more budget */ | ||
173 | if (urgent_only) | ||
174 | do_dev += MAX_DO_DEV_PER_LOOP; | ||
175 | |||
169 | /* | 176 | /* |
170 | * Limit the number of linkwatch events to one | 177 | * Limit the number of linkwatch events to one |
171 | * per second so that a runaway driver does not | 178 | * per second so that a runaway driver does not |
@@ -184,7 +191,7 @@ static void __linkwatch_run_queue(int urgent_only) | |||
184 | spin_lock_irq(&lweventlist_lock); | 191 | spin_lock_irq(&lweventlist_lock); |
185 | list_splice_init(&lweventlist, &wrk); | 192 | list_splice_init(&lweventlist, &wrk); |
186 | 193 | ||
187 | while (!list_empty(&wrk)) { | 194 | while (!list_empty(&wrk) && do_dev > 0) { |
188 | 195 | ||
189 | dev = list_first_entry(&wrk, struct net_device, link_watch_list); | 196 | dev = list_first_entry(&wrk, struct net_device, link_watch_list); |
190 | list_del_init(&dev->link_watch_list); | 197 | list_del_init(&dev->link_watch_list); |
@@ -195,9 +202,13 @@ static void __linkwatch_run_queue(int urgent_only) | |||
195 | } | 202 | } |
196 | spin_unlock_irq(&lweventlist_lock); | 203 | spin_unlock_irq(&lweventlist_lock); |
197 | linkwatch_do_dev(dev); | 204 | linkwatch_do_dev(dev); |
205 | do_dev--; | ||
198 | spin_lock_irq(&lweventlist_lock); | 206 | spin_lock_irq(&lweventlist_lock); |
199 | } | 207 | } |
200 | 208 | ||
209 | /* Add the remaining work back to lweventlist */ | ||
210 | list_splice_init(&wrk, &lweventlist); | ||
211 | |||
201 | if (!list_empty(&lweventlist)) | 212 | if (!list_empty(&lweventlist)) |
202 | linkwatch_schedule_work(0); | 213 | linkwatch_schedule_work(0); |
203 | spin_unlock_irq(&lweventlist_lock); | 214 | spin_unlock_irq(&lweventlist_lock); |