aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-03-31 05:30:30 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-31 15:18:52 -0500
commit3691c5199e8a4be1c7a91b5ab925db5feb866e19 (patch)
tree025210545818b080df13faa31c9266432579794c /kernel
parent5ce29646ebe352587e3b3160d599010c5da1b9dd (diff)
[PATCH] kill __init_timer_base in favor of boot_tvec_bases
Commit a4a6198b80cf82eb8160603c98da218d1bd5e104: [PATCH] tvec_bases too large for per-cpu data introduced "struct tvec_t_base_s boot_tvec_bases" which is visible at compile time. This means we can kill __init_timer_base and move timer_base_s's content into tvec_t_base_s. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/timer.c84
1 files changed, 35 insertions, 49 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index ab189dd187cb..b04dc03b5934 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(jiffies_64);
54/* 54/*
55 * per-CPU timer vector definitions: 55 * per-CPU timer vector definitions:
56 */ 56 */
57
58#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) 57#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
59#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) 58#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
60#define TVN_SIZE (1 << TVN_BITS) 59#define TVN_SIZE (1 << TVN_BITS)
@@ -62,11 +61,6 @@ EXPORT_SYMBOL(jiffies_64);
62#define TVN_MASK (TVN_SIZE - 1) 61#define TVN_MASK (TVN_SIZE - 1)
63#define TVR_MASK (TVR_SIZE - 1) 62#define TVR_MASK (TVR_SIZE - 1)
64 63
65struct timer_base_s {
66 spinlock_t lock;
67 struct timer_list *running_timer;
68};
69
70typedef struct tvec_s { 64typedef struct tvec_s {
71 struct list_head vec[TVN_SIZE]; 65 struct list_head vec[TVN_SIZE];
72} tvec_t; 66} tvec_t;
@@ -76,7 +70,8 @@ typedef struct tvec_root_s {
76} tvec_root_t; 70} tvec_root_t;
77 71
78struct tvec_t_base_s { 72struct tvec_t_base_s {
79 struct timer_base_s t_base; 73 spinlock_t lock;
74 struct timer_list *running_timer;
80 unsigned long timer_jiffies; 75 unsigned long timer_jiffies;
81 tvec_root_t tv1; 76 tvec_root_t tv1;
82 tvec_t tv2; 77 tvec_t tv2;
@@ -87,13 +82,14 @@ struct tvec_t_base_s {
87 82
88typedef struct tvec_t_base_s tvec_base_t; 83typedef struct tvec_t_base_s tvec_base_t;
89static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); 84static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
90static tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases);
91 87
92static inline void set_running_timer(tvec_base_t *base, 88static inline void set_running_timer(tvec_base_t *base,
93 struct timer_list *timer) 89 struct timer_list *timer)
94{ 90{
95#ifdef CONFIG_SMP 91#ifdef CONFIG_SMP
96 base->t_base.running_timer = timer; 92 base->running_timer = timer;
97#endif 93#endif
98} 94}
99 95
@@ -139,15 +135,6 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
139 list_add_tail(&timer->entry, vec); 135 list_add_tail(&timer->entry, vec);
140} 136}
141 137
142typedef struct timer_base_s timer_base_t;
143/*
144 * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
145 * at compile time, and we need timer->base to lock the timer.
146 */
147timer_base_t __init_timer_base
148 ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
149EXPORT_SYMBOL(__init_timer_base);
150
151/*** 138/***
152 * init_timer - initialize a timer. 139 * init_timer - initialize a timer.
153 * @timer: the timer to be initialized 140 * @timer: the timer to be initialized
@@ -158,7 +145,7 @@ EXPORT_SYMBOL(__init_timer_base);
158void fastcall init_timer(struct timer_list *timer) 145void fastcall init_timer(struct timer_list *timer)
159{ 146{
160 timer->entry.next = NULL; 147 timer->entry.next = NULL;
161 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base; 148 timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
162} 149}
163EXPORT_SYMBOL(init_timer); 150EXPORT_SYMBOL(init_timer);
164 151
@@ -174,7 +161,7 @@ static inline void detach_timer(struct timer_list *timer,
174} 161}
175 162
176/* 163/*
177 * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock 164 * We are using hashed locking: holding per_cpu(tvec_bases).lock
178 * means that all timers which are tied to this base via timer->base are 165 * means that all timers which are tied to this base via timer->base are
179 * locked, and the base itself is locked too. 166 * locked, and the base itself is locked too.
180 * 167 *
@@ -185,10 +172,10 @@ static inline void detach_timer(struct timer_list *timer,
185 * possible to set timer->base = NULL and drop the lock: the timer remains 172 * possible to set timer->base = NULL and drop the lock: the timer remains
186 * locked. 173 * locked.
187 */ 174 */
188static timer_base_t *lock_timer_base(struct timer_list *timer, 175static tvec_base_t *lock_timer_base(struct timer_list *timer,
189 unsigned long *flags) 176 unsigned long *flags)
190{ 177{
191 timer_base_t *base; 178 tvec_base_t *base;
192 179
193 for (;;) { 180 for (;;) {
194 base = timer->base; 181 base = timer->base;
@@ -205,8 +192,7 @@ static timer_base_t *lock_timer_base(struct timer_list *timer,
205 192
206int __mod_timer(struct timer_list *timer, unsigned long expires) 193int __mod_timer(struct timer_list *timer, unsigned long expires)
207{ 194{
208 timer_base_t *base; 195 tvec_base_t *base, *new_base;
209 tvec_base_t *new_base;
210 unsigned long flags; 196 unsigned long flags;
211 int ret = 0; 197 int ret = 0;
212 198
@@ -221,7 +207,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
221 207
222 new_base = __get_cpu_var(tvec_bases); 208 new_base = __get_cpu_var(tvec_bases);
223 209
224 if (base != &new_base->t_base) { 210 if (base != new_base) {
225 /* 211 /*
226 * We are trying to schedule the timer on the local CPU. 212 * We are trying to schedule the timer on the local CPU.
227 * However we can't change timer's base while it is running, 213 * However we can't change timer's base while it is running,
@@ -231,19 +217,19 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
231 */ 217 */
232 if (unlikely(base->running_timer == timer)) { 218 if (unlikely(base->running_timer == timer)) {
233 /* The timer remains on a former base */ 219 /* The timer remains on a former base */
234 new_base = container_of(base, tvec_base_t, t_base); 220 new_base = base;
235 } else { 221 } else {
236 /* See the comment in lock_timer_base() */ 222 /* See the comment in lock_timer_base() */
237 timer->base = NULL; 223 timer->base = NULL;
238 spin_unlock(&base->lock); 224 spin_unlock(&base->lock);
239 spin_lock(&new_base->t_base.lock); 225 spin_lock(&new_base->lock);
240 timer->base = &new_base->t_base; 226 timer->base = new_base;
241 } 227 }
242 } 228 }
243 229
244 timer->expires = expires; 230 timer->expires = expires;
245 internal_add_timer(new_base, timer); 231 internal_add_timer(new_base, timer);
246 spin_unlock_irqrestore(&new_base->t_base.lock, flags); 232 spin_unlock_irqrestore(&new_base->lock, flags);
247 233
248 return ret; 234 return ret;
249} 235}
@@ -263,10 +249,10 @@ void add_timer_on(struct timer_list *timer, int cpu)
263 unsigned long flags; 249 unsigned long flags;
264 250
265 BUG_ON(timer_pending(timer) || !timer->function); 251 BUG_ON(timer_pending(timer) || !timer->function);
266 spin_lock_irqsave(&base->t_base.lock, flags); 252 spin_lock_irqsave(&base->lock, flags);
267 timer->base = &base->t_base; 253 timer->base = base;
268 internal_add_timer(base, timer); 254 internal_add_timer(base, timer);
269 spin_unlock_irqrestore(&base->t_base.lock, flags); 255 spin_unlock_irqrestore(&base->lock, flags);
270} 256}
271 257
272 258
@@ -319,7 +305,7 @@ EXPORT_SYMBOL(mod_timer);
319 */ 305 */
320int del_timer(struct timer_list *timer) 306int del_timer(struct timer_list *timer)
321{ 307{
322 timer_base_t *base; 308 tvec_base_t *base;
323 unsigned long flags; 309 unsigned long flags;
324 int ret = 0; 310 int ret = 0;
325 311
@@ -346,7 +332,7 @@ EXPORT_SYMBOL(del_timer);
346 */ 332 */
347int try_to_del_timer_sync(struct timer_list *timer) 333int try_to_del_timer_sync(struct timer_list *timer)
348{ 334{
349 timer_base_t *base; 335 tvec_base_t *base;
350 unsigned long flags; 336 unsigned long flags;
351 int ret = -1; 337 int ret = -1;
352 338
@@ -410,7 +396,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
410 struct timer_list *tmp; 396 struct timer_list *tmp;
411 397
412 tmp = list_entry(curr, struct timer_list, entry); 398 tmp = list_entry(curr, struct timer_list, entry);
413 BUG_ON(tmp->base != &base->t_base); 399 BUG_ON(tmp->base != base);
414 curr = curr->next; 400 curr = curr->next;
415 internal_add_timer(base, tmp); 401 internal_add_timer(base, tmp);
416 } 402 }
@@ -432,7 +418,7 @@ static inline void __run_timers(tvec_base_t *base)
432{ 418{
433 struct timer_list *timer; 419 struct timer_list *timer;
434 420
435 spin_lock_irq(&base->t_base.lock); 421 spin_lock_irq(&base->lock);
436 while (time_after_eq(jiffies, base->timer_jiffies)) { 422 while (time_after_eq(jiffies, base->timer_jiffies)) {
437 struct list_head work_list = LIST_HEAD_INIT(work_list); 423 struct list_head work_list = LIST_HEAD_INIT(work_list);
438 struct list_head *head = &work_list; 424 struct list_head *head = &work_list;
@@ -458,7 +444,7 @@ static inline void __run_timers(tvec_base_t *base)
458 444
459 set_running_timer(base, timer); 445 set_running_timer(base, timer);
460 detach_timer(timer, 1); 446 detach_timer(timer, 1);
461 spin_unlock_irq(&base->t_base.lock); 447 spin_unlock_irq(&base->lock);
462 { 448 {
463 int preempt_count = preempt_count(); 449 int preempt_count = preempt_count();
464 fn(data); 450 fn(data);
@@ -471,11 +457,11 @@ static inline void __run_timers(tvec_base_t *base)
471 BUG(); 457 BUG();
472 } 458 }
473 } 459 }
474 spin_lock_irq(&base->t_base.lock); 460 spin_lock_irq(&base->lock);
475 } 461 }
476 } 462 }
477 set_running_timer(base, NULL); 463 set_running_timer(base, NULL);
478 spin_unlock_irq(&base->t_base.lock); 464 spin_unlock_irq(&base->lock);
479} 465}
480 466
481#ifdef CONFIG_NO_IDLE_HZ 467#ifdef CONFIG_NO_IDLE_HZ
@@ -506,7 +492,7 @@ unsigned long next_timer_interrupt(void)
506 hr_expires += jiffies; 492 hr_expires += jiffies;
507 493
508 base = __get_cpu_var(tvec_bases); 494 base = __get_cpu_var(tvec_bases);
509 spin_lock(&base->t_base.lock); 495 spin_lock(&base->lock);
510 expires = base->timer_jiffies + (LONG_MAX >> 1); 496 expires = base->timer_jiffies + (LONG_MAX >> 1);
511 list = NULL; 497 list = NULL;
512 498
@@ -554,7 +540,7 @@ found:
554 expires = nte->expires; 540 expires = nte->expires;
555 } 541 }
556 } 542 }
557 spin_unlock(&base->t_base.lock); 543 spin_unlock(&base->lock);
558 544
559 if (time_before(hr_expires, expires)) 545 if (time_before(hr_expires, expires))
560 return hr_expires; 546 return hr_expires;
@@ -1262,7 +1248,7 @@ static int __devinit init_timers_cpu(int cpu)
1262 } 1248 }
1263 per_cpu(tvec_bases, cpu) = base; 1249 per_cpu(tvec_bases, cpu) = base;
1264 } 1250 }
1265 spin_lock_init(&base->t_base.lock); 1251 spin_lock_init(&base->lock);
1266 for (j = 0; j < TVN_SIZE; j++) { 1252 for (j = 0; j < TVN_SIZE; j++) {
1267 INIT_LIST_HEAD(base->tv5.vec + j); 1253 INIT_LIST_HEAD(base->tv5.vec + j);
1268 INIT_LIST_HEAD(base->tv4.vec + j); 1254 INIT_LIST_HEAD(base->tv4.vec + j);
@@ -1284,7 +1270,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1284 while (!list_empty(head)) { 1270 while (!list_empty(head)) {
1285 timer = list_entry(head->next, struct timer_list, entry); 1271 timer = list_entry(head->next, struct timer_list, entry);
1286 detach_timer(timer, 0); 1272 detach_timer(timer, 0);
1287 timer->base = &new_base->t_base; 1273 timer->base = new_base;
1288 internal_add_timer(new_base, timer); 1274 internal_add_timer(new_base, timer);
1289 } 1275 }
1290} 1276}
@@ -1300,11 +1286,11 @@ static void __devinit migrate_timers(int cpu)
1300 new_base = get_cpu_var(tvec_bases); 1286 new_base = get_cpu_var(tvec_bases);
1301 1287
1302 local_irq_disable(); 1288 local_irq_disable();
1303 spin_lock(&new_base->t_base.lock); 1289 spin_lock(&new_base->lock);
1304 spin_lock(&old_base->t_base.lock); 1290 spin_lock(&old_base->lock);
1291
1292 BUG_ON(old_base->running_timer);
1305 1293
1306 if (old_base->t_base.running_timer)
1307 BUG();
1308 for (i = 0; i < TVR_SIZE; i++) 1294 for (i = 0; i < TVR_SIZE; i++)
1309 migrate_timer_list(new_base, old_base->tv1.vec + i); 1295 migrate_timer_list(new_base, old_base->tv1.vec + i);
1310 for (i = 0; i < TVN_SIZE; i++) { 1296 for (i = 0; i < TVN_SIZE; i++) {
@@ -1314,8 +1300,8 @@ static void __devinit migrate_timers(int cpu)
1314 migrate_timer_list(new_base, old_base->tv5.vec + i); 1300 migrate_timer_list(new_base, old_base->tv5.vec + i);
1315 } 1301 }
1316 1302
1317 spin_unlock(&old_base->t_base.lock); 1303 spin_unlock(&old_base->lock);
1318 spin_unlock(&new_base->t_base.lock); 1304 spin_unlock(&new_base->lock);
1319 local_irq_enable(); 1305 local_irq_enable();
1320 put_cpu_var(tvec_bases); 1306 put_cpu_var(tvec_bases);
1321} 1307}