aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/link_watch.c9
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/xprtsock.c6
4 files changed, 11 insertions, 11 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114744c5..f2ed09e25dfd 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -35,7 +35,7 @@ static unsigned long linkwatch_flags;
35static unsigned long linkwatch_nextevent; 35static unsigned long linkwatch_nextevent;
36 36
37static void linkwatch_event(void *dummy); 37static void linkwatch_event(void *dummy);
38static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); 38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL);
39 39
40static LIST_HEAD(lweventlist); 40static LIST_HEAD(lweventlist);
41static DEFINE_SPINLOCK(lweventlist_lock); 41static DEFINE_SPINLOCK(lweventlist_lock);
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
171 unsigned long delay = linkwatch_nextevent - jiffies; 171 unsigned long delay = linkwatch_nextevent - jiffies;
172 172
173 /* If we wrap around we'll delay it by at most HZ. */ 173 /* If we wrap around we'll delay it by at most HZ. */
174 if (!delay || delay > HZ) 174 if (delay > HZ)
175 schedule_work(&linkwatch_work); 175 delay = 0;
176 else 176 schedule_delayed_work(&linkwatch_work, delay);
177 schedule_delayed_work(&linkwatch_work, delay);
178 } 177 }
179 } 178 }
180} 179}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388ece03..d5725cb1491e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -285,7 +285,7 @@ static struct file_operations content_file_operations;
285static struct file_operations cache_flush_operations; 285static struct file_operations cache_flush_operations;
286 286
287static void do_cache_clean(void *data); 287static void do_cache_clean(void *data);
288static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); 288static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL);
289 289
290void cache_register(struct cache_detail *cd) 290void cache_register(struct cache_detail *cd)
291{ 291{
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
337 spin_unlock(&cache_list_lock); 337 spin_unlock(&cache_list_lock);
338 338
339 /* start the cleaning process */ 339 /* start the cleaning process */
340 schedule_work(&cache_cleaner); 340 schedule_delayed_work(&cache_cleaner, 0);
341} 341}
342 342
343int cache_unregister(struct cache_detail *cd) 343int cache_unregister(struct cache_detail *cd)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a97f90..97be3f7fed44 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
837 INIT_LIST_HEAD(&rpci->pipe); 837 INIT_LIST_HEAD(&rpci->pipe);
838 rpci->pipelen = 0; 838 rpci->pipelen = 0;
839 init_waitqueue_head(&rpci->waitq); 839 init_waitqueue_head(&rpci->waitq);
840 INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); 840 INIT_DELAYED_WORK(&rpci->queue_timeout,
841 rpc_timeout_upcall_queue, rpci);
841 rpci->ops = NULL; 842 rpci->ops = NULL;
842 } 843 }
843} 844}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91ef25d..3c7532cd009e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task)
1262 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 1262 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
1263 } else { 1263 } else {
1264 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1264 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
1265 schedule_work(&xprt->connect_worker); 1265 schedule_delayed_work(&xprt->connect_worker, 0);
1266 1266
1267 /* flush_scheduled_work can sleep... */ 1267 /* flush_scheduled_work can sleep... */
1268 if (!RPC_IS_ASYNC(task)) 1268 if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1375 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1375 /* XXX: header size can vary due to auth type, IPv6, etc. */
1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1377 1377
1378 INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); 1378 INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
1379 xprt->bind_timeout = XS_BIND_TO; 1379 xprt->bind_timeout = XS_BIND_TO;
1380 xprt->connect_timeout = XS_UDP_CONN_TO; 1380 xprt->connect_timeout = XS_UDP_CONN_TO;
1381 xprt->reestablish_timeout = XS_UDP_REEST_TO; 1381 xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1422 1422
1423 INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); 1423 INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
1424 xprt->bind_timeout = XS_BIND_TO; 1424 xprt->bind_timeout = XS_BIND_TO;
1425 xprt->connect_timeout = XS_TCP_CONN_TO; 1425 xprt->connect_timeout = XS_TCP_CONN_TO;
1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;