aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/mmtimer.c
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2008-04-30 03:53:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-30 11:29:41 -0400
commitcbacdd9572285c86848dd323dc764abb3681ddbc (patch)
treed0568712f7371598cd5d7b43693b04f99f9265ef /drivers/char/mmtimer.c
parentd17468c73e138e1108b279acf892dd35937d43ed (diff)
SGI Altix mmtimer: allow larger number of timers per node
The purpose of this patch to the SGI Altix specific mmtimer (posix timer) driver is to allow a virtually infinite number of timers to be set per node. Timers will now be kept on a sorted per-node list and a single node-based hardware comparator is used to trigger the next timer. [akpm@linux-foundation.org: mark things static] [akpm@linux-foundation.org: fix warning] Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/char/mmtimer.c')
-rw-r--r--drivers/char/mmtimer.c400
1 files changed, 244 insertions, 156 deletions
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index e60a74c66e3d..d83db5d880e0 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -74,9 +74,8 @@ static const struct file_operations mmtimer_fops = {
74 * We only have comparison registers RTC1-4 currently available per 74 * We only have comparison registers RTC1-4 currently available per
75 * node. RTC0 is used by SAL. 75 * node. RTC0 is used by SAL.
76 */ 76 */
77#define NUM_COMPARATORS 3
78/* Check for an RTC interrupt pending */ 77/* Check for an RTC interrupt pending */
79static int inline mmtimer_int_pending(int comparator) 78static int mmtimer_int_pending(int comparator)
80{ 79{
81 if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & 80 if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
82 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) 81 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
@@ -84,15 +83,16 @@ static int inline mmtimer_int_pending(int comparator)
84 else 83 else
85 return 0; 84 return 0;
86} 85}
86
87/* Clear the RTC interrupt pending bit */ 87/* Clear the RTC interrupt pending bit */
88static void inline mmtimer_clr_int_pending(int comparator) 88static void mmtimer_clr_int_pending(int comparator)
89{ 89{
90 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), 90 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
91 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); 91 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
92} 92}
93 93
94/* Setup timer on comparator RTC1 */ 94/* Setup timer on comparator RTC1 */
95static void inline mmtimer_setup_int_0(u64 expires) 95static void mmtimer_setup_int_0(int cpu, u64 expires)
96{ 96{
97 u64 val; 97 u64 val;
98 98
@@ -106,7 +106,7 @@ static void inline mmtimer_setup_int_0(u64 expires)
106 mmtimer_clr_int_pending(0); 106 mmtimer_clr_int_pending(0);
107 107
108 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | 108 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
109 ((u64)cpu_physical_id(smp_processor_id()) << 109 ((u64)cpu_physical_id(cpu) <<
110 SH_RTC1_INT_CONFIG_PID_SHFT); 110 SH_RTC1_INT_CONFIG_PID_SHFT);
111 111
112 /* Set configuration */ 112 /* Set configuration */
@@ -122,7 +122,7 @@ static void inline mmtimer_setup_int_0(u64 expires)
122} 122}
123 123
124/* Setup timer on comparator RTC2 */ 124/* Setup timer on comparator RTC2 */
125static void inline mmtimer_setup_int_1(u64 expires) 125static void mmtimer_setup_int_1(int cpu, u64 expires)
126{ 126{
127 u64 val; 127 u64 val;
128 128
@@ -133,7 +133,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
133 mmtimer_clr_int_pending(1); 133 mmtimer_clr_int_pending(1);
134 134
135 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | 135 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
136 ((u64)cpu_physical_id(smp_processor_id()) << 136 ((u64)cpu_physical_id(cpu) <<
137 SH_RTC2_INT_CONFIG_PID_SHFT); 137 SH_RTC2_INT_CONFIG_PID_SHFT);
138 138
139 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); 139 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
@@ -144,7 +144,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
144} 144}
145 145
146/* Setup timer on comparator RTC3 */ 146/* Setup timer on comparator RTC3 */
147static void inline mmtimer_setup_int_2(u64 expires) 147static void mmtimer_setup_int_2(int cpu, u64 expires)
148{ 148{
149 u64 val; 149 u64 val;
150 150
@@ -155,7 +155,7 @@ static void inline mmtimer_setup_int_2(u64 expires)
155 mmtimer_clr_int_pending(2); 155 mmtimer_clr_int_pending(2);
156 156
157 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | 157 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
158 ((u64)cpu_physical_id(smp_processor_id()) << 158 ((u64)cpu_physical_id(cpu) <<
159 SH_RTC3_INT_CONFIG_PID_SHFT); 159 SH_RTC3_INT_CONFIG_PID_SHFT);
160 160
161 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); 161 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
@@ -170,22 +170,22 @@ static void inline mmtimer_setup_int_2(u64 expires)
170 * in order to insure that the setup succeeds in a deterministic time frame. 170 * in order to insure that the setup succeeds in a deterministic time frame.
171 * It will check if the interrupt setup succeeded. 171 * It will check if the interrupt setup succeeded.
172 */ 172 */
173static int inline mmtimer_setup(int comparator, unsigned long expires) 173static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
174{ 174{
175 175
176 switch (comparator) { 176 switch (comparator) {
177 case 0: 177 case 0:
178 mmtimer_setup_int_0(expires); 178 mmtimer_setup_int_0(cpu, expires);
179 break; 179 break;
180 case 1: 180 case 1:
181 mmtimer_setup_int_1(expires); 181 mmtimer_setup_int_1(cpu, expires);
182 break; 182 break;
183 case 2: 183 case 2:
184 mmtimer_setup_int_2(expires); 184 mmtimer_setup_int_2(cpu, expires);
185 break; 185 break;
186 } 186 }
187 /* We might've missed our expiration time */ 187 /* We might've missed our expiration time */
188 if (rtc_time() < expires) 188 if (rtc_time() <= expires)
189 return 1; 189 return 1;
190 190
191 /* 191 /*
@@ -195,7 +195,7 @@ static int inline mmtimer_setup(int comparator, unsigned long expires)
195 return mmtimer_int_pending(comparator); 195 return mmtimer_int_pending(comparator);
196} 196}
197 197
198static int inline mmtimer_disable_int(long nasid, int comparator) 198static int mmtimer_disable_int(long nasid, int comparator)
199{ 199{
200 switch (comparator) { 200 switch (comparator) {
201 case 0: 201 case 0:
@@ -216,18 +216,124 @@ static int inline mmtimer_disable_int(long nasid, int comparator)
216 return 0; 216 return 0;
217} 217}
218 218
219#define TIMER_OFF 0xbadcabLL 219#define COMPARATOR 1 /* The comparator to use */
220 220
221/* There is one of these for each comparator */ 221#define TIMER_OFF 0xbadcabLL /* Timer is not setup */
222typedef struct mmtimer { 222#define TIMER_SET 0 /* Comparator is set for this timer */
223 spinlock_t lock ____cacheline_aligned; 223
224/* There is one of these for each timer */
225struct mmtimer {
226 struct rb_node list;
224 struct k_itimer *timer; 227 struct k_itimer *timer;
225 int i;
226 int cpu; 228 int cpu;
229};
230
231struct mmtimer_node {
232 spinlock_t lock ____cacheline_aligned;
233 struct rb_root timer_head;
234 struct rb_node *next;
227 struct tasklet_struct tasklet; 235 struct tasklet_struct tasklet;
228} mmtimer_t; 236};
237static struct mmtimer_node *timers;
238
239
240/*
241 * Add a new mmtimer struct to the node's mmtimer list.
242 * This function assumes the struct mmtimer_node is locked.
243 */
244static void mmtimer_add_list(struct mmtimer *n)
245{
246 int nodeid = n->timer->it.mmtimer.node;
247 unsigned long expires = n->timer->it.mmtimer.expires;
248 struct rb_node **link = &timers[nodeid].timer_head.rb_node;
249 struct rb_node *parent = NULL;
250 struct mmtimer *x;
251
252 /*
253 * Find the right place in the rbtree:
254 */
255 while (*link) {
256 parent = *link;
257 x = rb_entry(parent, struct mmtimer, list);
258
259 if (expires < x->timer->it.mmtimer.expires)
260 link = &(*link)->rb_left;
261 else
262 link = &(*link)->rb_right;
263 }
264
265 /*
266 * Insert the timer to the rbtree and check whether it
267 * replaces the first pending timer
268 */
269 rb_link_node(&n->list, parent, link);
270 rb_insert_color(&n->list, &timers[nodeid].timer_head);
271
272 if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
273 struct mmtimer, list)->timer->it.mmtimer.expires)
274 timers[nodeid].next = &n->list;
275}
276
277/*
278 * Set the comparator for the next timer.
279 * This function assumes the struct mmtimer_node is locked.
280 */
281static void mmtimer_set_next_timer(int nodeid)
282{
283 struct mmtimer_node *n = &timers[nodeid];
284 struct mmtimer *x;
285 struct k_itimer *t;
286 int o;
287
288restart:
289 if (n->next == NULL)
290 return;
229 291
230static mmtimer_t ** timers; 292 x = rb_entry(n->next, struct mmtimer, list);
293 t = x->timer;
294 if (!t->it.mmtimer.incr) {
295 /* Not an interval timer */
296 if (!mmtimer_setup(x->cpu, COMPARATOR,
297 t->it.mmtimer.expires)) {
298 /* Late setup, fire now */
299 tasklet_schedule(&n->tasklet);
300 }
301 return;
302 }
303
304 /* Interval timer */
305 o = 0;
306 while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
307 unsigned long e, e1;
308 struct rb_node *next;
309 t->it.mmtimer.expires += t->it.mmtimer.incr << o;
310 t->it_overrun += 1 << o;
311 o++;
312 if (o > 20) {
313 printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
314 t->it.mmtimer.clock = TIMER_OFF;
315 n->next = rb_next(&x->list);
316 rb_erase(&x->list, &n->timer_head);
317 kfree(x);
318 goto restart;
319 }
320
321 e = t->it.mmtimer.expires;
322 next = rb_next(&x->list);
323
324 if (next == NULL)
325 continue;
326
327 e1 = rb_entry(next, struct mmtimer, list)->
328 timer->it.mmtimer.expires;
329 if (e > e1) {
330 n->next = next;
331 rb_erase(&x->list, &n->timer_head);
332 mmtimer_add_list(x);
333 goto restart;
334 }
335 }
336}
231 337
232/** 338/**
233 * mmtimer_ioctl - ioctl interface for /dev/mmtimer 339 * mmtimer_ioctl - ioctl interface for /dev/mmtimer
@@ -390,35 +496,6 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
390 return 0; 496 return 0;
391} 497}
392 498
393/*
394 * Schedule the next periodic interrupt. This function will attempt
395 * to schedule a periodic interrupt later if necessary. If the scheduling
396 * of an interrupt fails then the time to skip is lengthened
397 * exponentially in order to ensure that the next interrupt
398 * can be properly scheduled..
399 */
400static int inline reschedule_periodic_timer(mmtimer_t *x)
401{
402 int n;
403 struct k_itimer *t = x->timer;
404
405 t->it.mmtimer.clock = x->i;
406 t->it_overrun--;
407
408 n = 0;
409 do {
410
411 t->it.mmtimer.expires += t->it.mmtimer.incr << n;
412 t->it_overrun += 1 << n;
413 n++;
414 if (n > 20)
415 return 1;
416
417 } while (!mmtimer_setup(x->i, t->it.mmtimer.expires));
418
419 return 0;
420}
421
422/** 499/**
423 * mmtimer_interrupt - timer interrupt handler 500 * mmtimer_interrupt - timer interrupt handler
424 * @irq: irq received 501 * @irq: irq received
@@ -435,71 +512,75 @@ static int inline reschedule_periodic_timer(mmtimer_t *x)
435static irqreturn_t 512static irqreturn_t
436mmtimer_interrupt(int irq, void *dev_id) 513mmtimer_interrupt(int irq, void *dev_id)
437{ 514{
438 int i;
439 unsigned long expires = 0; 515 unsigned long expires = 0;
440 int result = IRQ_NONE; 516 int result = IRQ_NONE;
441 unsigned indx = cpu_to_node(smp_processor_id()); 517 unsigned indx = cpu_to_node(smp_processor_id());
518 struct mmtimer *base;
442 519
443 /* 520 spin_lock(&timers[indx].lock);
444 * Do this once for each comparison register 521 base = rb_entry(timers[indx].next, struct mmtimer, list);
445 */ 522 if (base == NULL) {
446 for (i = 0; i < NUM_COMPARATORS; i++) { 523 spin_unlock(&timers[indx].lock);
447 mmtimer_t *base = timers[indx] + i; 524 return result;
448 /* Make sure this doesn't get reused before tasklet_sched */ 525 }
449 spin_lock(&base->lock); 526
450 if (base->cpu == smp_processor_id()) { 527 if (base->cpu == smp_processor_id()) {
451 if (base->timer) 528 if (base->timer)
452 expires = base->timer->it.mmtimer.expires; 529 expires = base->timer->it.mmtimer.expires;
453 /* expires test won't work with shared irqs */ 530 /* expires test won't work with shared irqs */
454 if ((mmtimer_int_pending(i) > 0) || 531 if ((mmtimer_int_pending(COMPARATOR) > 0) ||
455 (expires && (expires < rtc_time()))) { 532 (expires && (expires <= rtc_time()))) {
456 mmtimer_clr_int_pending(i); 533 mmtimer_clr_int_pending(COMPARATOR);
457 tasklet_schedule(&base->tasklet); 534 tasklet_schedule(&timers[indx].tasklet);
458 result = IRQ_HANDLED; 535 result = IRQ_HANDLED;
459 }
460 } 536 }
461 spin_unlock(&base->lock);
462 expires = 0;
463 } 537 }
538 spin_unlock(&timers[indx].lock);
464 return result; 539 return result;
465} 540}
466 541
467void mmtimer_tasklet(unsigned long data) { 542static void mmtimer_tasklet(unsigned long data)
468 mmtimer_t *x = (mmtimer_t *)data; 543{
469 struct k_itimer *t = x->timer; 544 int nodeid = data;
545 struct mmtimer_node *mn = &timers[nodeid];
546 struct mmtimer *x = rb_entry(mn->next, struct mmtimer, list);
547 struct k_itimer *t;
470 unsigned long flags; 548 unsigned long flags;
471 549
472 if (t == NULL)
473 return;
474
475 /* Send signal and deal with periodic signals */ 550 /* Send signal and deal with periodic signals */
476 spin_lock_irqsave(&t->it_lock, flags); 551 spin_lock_irqsave(&mn->lock, flags);
477 spin_lock(&x->lock); 552 if (!mn->next)
478 /* If timer was deleted between interrupt and here, leave */
479 if (t != x->timer)
480 goto out; 553 goto out;
481 t->it_overrun = 0;
482 554
483 if (posix_timer_event(t, 0) != 0) { 555 x = rb_entry(mn->next, struct mmtimer, list);
556 t = x->timer;
557
558 if (t->it.mmtimer.clock == TIMER_OFF)
559 goto out;
560
561 t->it_overrun = 0;
484 562
485 // printk(KERN_WARNING "mmtimer: cannot deliver signal.\n"); 563 mn->next = rb_next(&x->list);
564 rb_erase(&x->list, &mn->timer_head);
486 565
566 if (posix_timer_event(t, 0) != 0)
487 t->it_overrun++; 567 t->it_overrun++;
488 } 568
489 if(t->it.mmtimer.incr) { 569 if(t->it.mmtimer.incr) {
490 /* Periodic timer */ 570 t->it.mmtimer.expires += t->it.mmtimer.incr;
491 if (reschedule_periodic_timer(x)) { 571 mmtimer_add_list(x);
492 printk(KERN_WARNING "mmtimer: unable to reschedule\n");
493 x->timer = NULL;
494 }
495 } else { 572 } else {
496 /* Ensure we don't false trigger in mmtimer_interrupt */ 573 /* Ensure we don't false trigger in mmtimer_interrupt */
574 t->it.mmtimer.clock = TIMER_OFF;
497 t->it.mmtimer.expires = 0; 575 t->it.mmtimer.expires = 0;
576 kfree(x);
498 } 577 }
578 /* Set comparator for next timer, if there is one */
579 mmtimer_set_next_timer(nodeid);
580
499 t->it_overrun_last = t->it_overrun; 581 t->it_overrun_last = t->it_overrun;
500out: 582out:
501 spin_unlock(&x->lock); 583 spin_unlock_irqrestore(&mn->lock, flags);
502 spin_unlock_irqrestore(&t->it_lock, flags);
503} 584}
504 585
505static int sgi_timer_create(struct k_itimer *timer) 586static int sgi_timer_create(struct k_itimer *timer)
@@ -516,19 +597,50 @@ static int sgi_timer_create(struct k_itimer *timer)
516 */ 597 */
517static int sgi_timer_del(struct k_itimer *timr) 598static int sgi_timer_del(struct k_itimer *timr)
518{ 599{
519 int i = timr->it.mmtimer.clock;
520 cnodeid_t nodeid = timr->it.mmtimer.node; 600 cnodeid_t nodeid = timr->it.mmtimer.node;
521 mmtimer_t *t = timers[nodeid] + i;
522 unsigned long irqflags; 601 unsigned long irqflags;
523 602
524 if (i != TIMER_OFF) { 603 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
525 spin_lock_irqsave(&t->lock, irqflags); 604 if (timr->it.mmtimer.clock != TIMER_OFF) {
526 mmtimer_disable_int(cnodeid_to_nasid(nodeid),i); 605 unsigned long expires = timr->it.mmtimer.expires;
527 t->timer = NULL; 606 struct rb_node *n = timers[nodeid].timer_head.rb_node;
607 struct mmtimer *uninitialized_var(t);
608 int r = 0;
609
528 timr->it.mmtimer.clock = TIMER_OFF; 610 timr->it.mmtimer.clock = TIMER_OFF;
529 timr->it.mmtimer.expires = 0; 611 timr->it.mmtimer.expires = 0;
530 spin_unlock_irqrestore(&t->lock, irqflags); 612
613 while (n) {
614 t = rb_entry(n, struct mmtimer, list);
615 if (t->timer == timr)
616 break;
617
618 if (expires < t->timer->it.mmtimer.expires)
619 n = n->rb_left;
620 else
621 n = n->rb_right;
622 }
623
624 if (!n) {
625 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
626 return 0;
627 }
628
629 if (timers[nodeid].next == n) {
630 timers[nodeid].next = rb_next(n);
631 r = 1;
632 }
633
634 rb_erase(n, &timers[nodeid].timer_head);
635 kfree(t);
636
637 if (r) {
638 mmtimer_disable_int(cnodeid_to_nasid(nodeid),
639 COMPARATOR);
640 mmtimer_set_next_timer(nodeid);
641 }
531 } 642 }
643 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
532 return 0; 644 return 0;
533} 645}
534 646
@@ -557,12 +669,11 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
557 struct itimerspec * new_setting, 669 struct itimerspec * new_setting,
558 struct itimerspec * old_setting) 670 struct itimerspec * old_setting)
559{ 671{
560
561 int i;
562 unsigned long when, period, irqflags; 672 unsigned long when, period, irqflags;
563 int err = 0; 673 int err = 0;
564 cnodeid_t nodeid; 674 cnodeid_t nodeid;
565 mmtimer_t *base; 675 struct mmtimer *base;
676 struct rb_node *n;
566 677
567 if (old_setting) 678 if (old_setting)
568 sgi_timer_get(timr, old_setting); 679 sgi_timer_get(timr, old_setting);
@@ -575,6 +686,10 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
575 /* Clear timer */ 686 /* Clear timer */
576 return 0; 687 return 0;
577 688
689 base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
690 if (base == NULL)
691 return -ENOMEM;
692
578 if (flags & TIMER_ABSTIME) { 693 if (flags & TIMER_ABSTIME) {
579 struct timespec n; 694 struct timespec n;
580 unsigned long now; 695 unsigned long now;
@@ -604,47 +719,38 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
604 preempt_disable(); 719 preempt_disable();
605 720
606 nodeid = cpu_to_node(smp_processor_id()); 721 nodeid = cpu_to_node(smp_processor_id());
607retry:
608 /* Don't use an allocated timer, or a deleted one that's pending */
609 for(i = 0; i< NUM_COMPARATORS; i++) {
610 base = timers[nodeid] + i;
611 if (!base->timer && !base->tasklet.state) {
612 break;
613 }
614 }
615
616 if (i == NUM_COMPARATORS) {
617 preempt_enable();
618 return -EBUSY;
619 }
620 722
621 spin_lock_irqsave(&base->lock, irqflags); 723 /* Lock the node timer structure */
724 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
622 725
623 if (base->timer || base->tasklet.state != 0) {
624 spin_unlock_irqrestore(&base->lock, irqflags);
625 goto retry;
626 }
627 base->timer = timr; 726 base->timer = timr;
628 base->cpu = smp_processor_id(); 727 base->cpu = smp_processor_id();
629 728
630 timr->it.mmtimer.clock = i; 729 timr->it.mmtimer.clock = TIMER_SET;
631 timr->it.mmtimer.node = nodeid; 730 timr->it.mmtimer.node = nodeid;
632 timr->it.mmtimer.incr = period; 731 timr->it.mmtimer.incr = period;
633 timr->it.mmtimer.expires = when; 732 timr->it.mmtimer.expires = when;
634 733
635 if (period == 0) { 734 n = timers[nodeid].next;
636 if (!mmtimer_setup(i, when)) { 735
637 mmtimer_disable_int(-1, i); 736 /* Add the new struct mmtimer to node's timer list */
638 posix_timer_event(timr, 0); 737 mmtimer_add_list(base);
639 timr->it.mmtimer.expires = 0; 738
640 } 739 if (timers[nodeid].next == n) {
641 } else { 740 /* No need to reprogram comparator for now */
642 timr->it.mmtimer.expires -= period; 741 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
643 if (reschedule_periodic_timer(base)) 742 preempt_enable();
644 err = -EINVAL; 743 return err;
645 } 744 }
646 745
647 spin_unlock_irqrestore(&base->lock, irqflags); 746 /* We need to reprogram the comparator */
747 if (n)
748 mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
749
750 mmtimer_set_next_timer(nodeid);
751
752 /* Unlock the node timer structure */
753 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
648 754
649 preempt_enable(); 755 preempt_enable();
650 756
@@ -669,7 +775,6 @@ static struct k_clock sgi_clock = {
669 */ 775 */
670static int __init mmtimer_init(void) 776static int __init mmtimer_init(void)
671{ 777{
672 unsigned i;
673 cnodeid_t node, maxn = -1; 778 cnodeid_t node, maxn = -1;
674 779
675 if (!ia64_platform_is("sn2")) 780 if (!ia64_platform_is("sn2"))
@@ -706,31 +811,18 @@ static int __init mmtimer_init(void)
706 maxn++; 811 maxn++;
707 812
708 /* Allocate list of node ptrs to mmtimer_t's */ 813 /* Allocate list of node ptrs to mmtimer_t's */
709 timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); 814 timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
710 if (timers == NULL) { 815 if (timers == NULL) {
711 printk(KERN_ERR "%s: failed to allocate memory for device\n", 816 printk(KERN_ERR "%s: failed to allocate memory for device\n",
712 MMTIMER_NAME); 817 MMTIMER_NAME);
713 goto out3; 818 goto out3;
714 } 819 }
715 820
716 /* Allocate mmtimer_t's for each online node */ 821 /* Initialize struct mmtimer's for each online node */
717 for_each_online_node(node) { 822 for_each_online_node(node) {
718 timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); 823 spin_lock_init(&timers[node].lock);
719 if (timers[node] == NULL) { 824 tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
720 printk(KERN_ERR "%s: failed to allocate memory for device\n", 825 (unsigned long) node);
721 MMTIMER_NAME);
722 goto out4;
723 }
724 for (i=0; i< NUM_COMPARATORS; i++) {
725 mmtimer_t * base = timers[node] + i;
726
727 spin_lock_init(&base->lock);
728 base->timer = NULL;
729 base->cpu = 0;
730 base->i = i;
731 tasklet_init(&base->tasklet, mmtimer_tasklet,
732 (unsigned long) (base));
733 }
734 } 826 }
735 827
736 sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second; 828 sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
@@ -741,11 +833,8 @@ static int __init mmtimer_init(void)
741 833
742 return 0; 834 return 0;
743 835
744out4:
745 for_each_online_node(node) {
746 kfree(timers[node]);
747 }
748out3: 836out3:
837 kfree(timers);
749 misc_deregister(&mmtimer_miscdev); 838 misc_deregister(&mmtimer_miscdev);
750out2: 839out2:
751 free_irq(SGI_MMTIMER_VECTOR, NULL); 840 free_irq(SGI_MMTIMER_VECTOR, NULL);
@@ -754,4 +843,3 @@ out1:
754} 843}
755 844
756module_init(mmtimer_init); 845module_init(mmtimer_init);
757