diff options
author | Tejun Heo <htejun@gmail.com> | 2006-12-06 23:36:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:39:32 -0500 |
commit | 593be07ae8f6f4a1b1b98813fabb155328f8bc0c (patch) | |
tree | 570686c676986d79ff0868f88c499a8b8fc1d3b4 /fs | |
parent | e59e2ae2c29700117a54e85c106017c24837119f (diff) |
[PATCH] file: kill unnecessary timer in fdtable_defer
free_fdtable_rc() schedules timer to reschedule fddef->wq if
schedule_work() on it returns 0. However, schedule_work() guarantees that
the target work is executed at least once after the scheduling regardless
of its return value. 0 return simply means that the work was already
pending and thus no further action was required.
Another problem is that it used contant '5' as @expires argument to
mod_timer().
Kill unnecessary fddef->timer.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/file.c | 29 |
1 files changed, 2 insertions, 27 deletions
@@ -21,7 +21,6 @@ | |||
21 | struct fdtable_defer { | 21 | struct fdtable_defer { |
22 | spinlock_t lock; | 22 | spinlock_t lock; |
23 | struct work_struct wq; | 23 | struct work_struct wq; |
24 | struct timer_list timer; | ||
25 | struct fdtable *next; | 24 | struct fdtable *next; |
26 | }; | 25 | }; |
27 | 26 | ||
@@ -75,22 +74,6 @@ static void __free_fdtable(struct fdtable *fdt) | |||
75 | kfree(fdt); | 74 | kfree(fdt); |
76 | } | 75 | } |
77 | 76 | ||
78 | static void fdtable_timer(unsigned long data) | ||
79 | { | ||
80 | struct fdtable_defer *fddef = (struct fdtable_defer *)data; | ||
81 | |||
82 | spin_lock(&fddef->lock); | ||
83 | /* | ||
84 | * If someone already emptied the queue return. | ||
85 | */ | ||
86 | if (!fddef->next) | ||
87 | goto out; | ||
88 | if (!schedule_work(&fddef->wq)) | ||
89 | mod_timer(&fddef->timer, 5); | ||
90 | out: | ||
91 | spin_unlock(&fddef->lock); | ||
92 | } | ||
93 | |||
94 | static void free_fdtable_work(struct work_struct *work) | 77 | static void free_fdtable_work(struct work_struct *work) |
95 | { | 78 | { |
96 | struct fdtable_defer *f = | 79 | struct fdtable_defer *f = |
@@ -144,13 +127,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) | |||
144 | spin_lock(&fddef->lock); | 127 | spin_lock(&fddef->lock); |
145 | fdt->next = fddef->next; | 128 | fdt->next = fddef->next; |
146 | fddef->next = fdt; | 129 | fddef->next = fdt; |
147 | /* | 130 | /* vmallocs are handled from the workqueue context */ |
148 | * vmallocs are handled from the workqueue context. | 131 | schedule_work(&fddef->wq); |
149 | * If the per-cpu workqueue is running, then we | ||
150 | * defer work scheduling through a timer. | ||
151 | */ | ||
152 | if (!schedule_work(&fddef->wq)) | ||
153 | mod_timer(&fddef->timer, 5); | ||
154 | spin_unlock(&fddef->lock); | 132 | spin_unlock(&fddef->lock); |
155 | put_cpu_var(fdtable_defer_list); | 133 | put_cpu_var(fdtable_defer_list); |
156 | } | 134 | } |
@@ -354,9 +332,6 @@ static void __devinit fdtable_defer_list_init(int cpu) | |||
354 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); | 332 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); |
355 | spin_lock_init(&fddef->lock); | 333 | spin_lock_init(&fddef->lock); |
356 | INIT_WORK(&fddef->wq, free_fdtable_work); | 334 | INIT_WORK(&fddef->wq, free_fdtable_work); |
357 | init_timer(&fddef->timer); | ||
358 | fddef->timer.data = (unsigned long)fddef; | ||
359 | fddef->timer.function = fdtable_timer; | ||
360 | fddef->next = NULL; | 335 | fddef->next = NULL; |
361 | } | 336 | } |
362 | 337 | ||