diff options
author | David Howells <dhowells@redhat.com> | 2006-11-22 09:54:01 -0500 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2006-11-22 09:54:01 -0500 |
commit | 52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c (patch) | |
tree | 5849b4e3c17daa70a7e81cfdeaddac9ac8a0e953 /net | |
parent | 0f9005a6f7a82f4aacbd72f7b92322a8ca1c3f97 (diff) |
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them
into a separate structure (delayed_work), which incorporates a work_struct and
the timer_list removed from work_struct.
The work_struct struct is huge, and this limits it's usefulness. On a 64-bit
architecture it's nearly 100 bytes in size. This reduces that by half for the
non-delayable type of event.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/link_watch.c | 9 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 4 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 6 |
4 files changed, 11 insertions, 11 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4b36114744c5..f2ed09e25dfd 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -35,7 +35,7 @@ static unsigned long linkwatch_flags; | |||
35 | static unsigned long linkwatch_nextevent; | 35 | static unsigned long linkwatch_nextevent; |
36 | 36 | ||
37 | static void linkwatch_event(void *dummy); | 37 | static void linkwatch_event(void *dummy); |
38 | static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); | 38 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL); |
39 | 39 | ||
40 | static LIST_HEAD(lweventlist); | 40 | static LIST_HEAD(lweventlist); |
41 | static DEFINE_SPINLOCK(lweventlist_lock); | 41 | static DEFINE_SPINLOCK(lweventlist_lock); |
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev) | |||
171 | unsigned long delay = linkwatch_nextevent - jiffies; | 171 | unsigned long delay = linkwatch_nextevent - jiffies; |
172 | 172 | ||
173 | /* If we wrap around we'll delay it by at most HZ. */ | 173 | /* If we wrap around we'll delay it by at most HZ. */ |
174 | if (!delay || delay > HZ) | 174 | if (delay > HZ) |
175 | schedule_work(&linkwatch_work); | 175 | delay = 0; |
176 | else | 176 | schedule_delayed_work(&linkwatch_work, delay); |
177 | schedule_delayed_work(&linkwatch_work, delay); | ||
178 | } | 177 | } |
179 | } | 178 | } |
180 | } | 179 | } |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 00cb388ece03..d5725cb1491e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -285,7 +285,7 @@ static struct file_operations content_file_operations; | |||
285 | static struct file_operations cache_flush_operations; | 285 | static struct file_operations cache_flush_operations; |
286 | 286 | ||
287 | static void do_cache_clean(void *data); | 287 | static void do_cache_clean(void *data); |
288 | static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); | 288 | static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL); |
289 | 289 | ||
290 | void cache_register(struct cache_detail *cd) | 290 | void cache_register(struct cache_detail *cd) |
291 | { | 291 | { |
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) | |||
337 | spin_unlock(&cache_list_lock); | 337 | spin_unlock(&cache_list_lock); |
338 | 338 | ||
339 | /* start the cleaning process */ | 339 | /* start the cleaning process */ |
340 | schedule_work(&cache_cleaner); | 340 | schedule_delayed_work(&cache_cleaner, 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | int cache_unregister(struct cache_detail *cd) | 343 | int cache_unregister(struct cache_detail *cd) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a0b41a97f90..97be3f7fed44 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
837 | INIT_LIST_HEAD(&rpci->pipe); | 837 | INIT_LIST_HEAD(&rpci->pipe); |
838 | rpci->pipelen = 0; | 838 | rpci->pipelen = 0; |
839 | init_waitqueue_head(&rpci->waitq); | 839 | init_waitqueue_head(&rpci->waitq); |
840 | INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); | 840 | INIT_DELAYED_WORK(&rpci->queue_timeout, |
841 | rpc_timeout_upcall_queue, rpci); | ||
841 | rpci->ops = NULL; | 842 | rpci->ops = NULL; |
842 | } | 843 | } |
843 | } | 844 | } |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 757fc91ef25d..3c7532cd009e 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task) | |||
1262 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1262 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1263 | } else { | 1263 | } else { |
1264 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1264 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1265 | schedule_work(&xprt->connect_worker); | 1265 | schedule_delayed_work(&xprt->connect_worker, 0); |
1266 | 1266 | ||
1267 | /* flush_scheduled_work can sleep... */ | 1267 | /* flush_scheduled_work can sleep... */ |
1268 | if (!RPC_IS_ASYNC(task)) | 1268 | if (!RPC_IS_ASYNC(task)) |
@@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1375 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1375 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1376 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1376 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1377 | 1377 | ||
1378 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); | 1378 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); |
1379 | xprt->bind_timeout = XS_BIND_TO; | 1379 | xprt->bind_timeout = XS_BIND_TO; |
1380 | xprt->connect_timeout = XS_UDP_CONN_TO; | 1380 | xprt->connect_timeout = XS_UDP_CONN_TO; |
1381 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 1381 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
@@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1420 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1420 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1421 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1421 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1422 | 1422 | ||
1423 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); | 1423 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); |
1424 | xprt->bind_timeout = XS_BIND_TO; | 1424 | xprt->bind_timeout = XS_BIND_TO; |
1425 | xprt->connect_timeout = XS_TCP_CONN_TO; | 1425 | xprt->connect_timeout = XS_TCP_CONN_TO; |
1426 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 1426 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |