aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Yang <yang@cs.unc.edu>2016-02-10 10:58:42 -0500
committerMing Yang <yang@cs.unc.edu>2016-02-10 18:05:12 -0500
commit696546dd52d9baf73920a61e6525a41f3460ba4d (patch)
tree7093c4b6ba8af2714101d400fdc3e987098c9d47
parent3dfe2804d7ccc6b7f0e2e44175249b38299c83e7 (diff)
Patched reservation implementation of MC^2 version
Patched using Namhoon's implementation of reservation for MC^2 to support global multiprocessor scheduling. Patched compiler-gcc5 to support gcc v5 compiler. Bug fixed regarding user input big cpu id number causing kernel crash.
-rw-r--r--include/linux/compiler-gcc5.h66
-rw-r--r--include/litmus/reservation.h65
-rw-r--r--litmus/reservation.c369
-rw-r--r--litmus/sched_pres.c5
4 files changed, 498 insertions, 7 deletions
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644
index 000000000000..cec40d7428cf
--- /dev/null
+++ b/include/linux/compiler-gcc5.h
@@ -0,0 +1,66 @@
1#ifndef __LINUX_COMPILER_H
2#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
3#endif
4
5#define __used __attribute__((__used__))
6#define __must_check __attribute__((warn_unused_result))
7#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
8
9/* Mark functions as cold. gcc will assume any path leading to a call
10 to them will be unlikely. This means a lot of manual unlikely()s
11 are unnecessary now for any paths leading to the usual suspects
12 like BUG(), printk(), panic() etc. [but let's keep them for now for
13 older compilers]
14
15 Early snapshots of gcc 4.3 don't support this and we can't detect this
16 in the preprocessor, but we can live with this because they're unreleased.
17 Maketime probing would be overkill here.
18
19 gcc also has a __attribute__((__hot__)) to move hot functions into
20 a special section, but I don't see any sense in this right now in
21 the kernel context */
22#define __cold __attribute__((__cold__))
23
24#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
25
26#ifndef __CHECKER__
27# define __compiletime_warning(message) __attribute__((warning(message)))
28# define __compiletime_error(message) __attribute__((error(message)))
29#endif /* __CHECKER__ */
30
31/*
32 * Mark a position in code as unreachable. This can be used to
33 * suppress control flow warnings after asm blocks that transfer
34 * control elsewhere.
35 *
36 * Early snapshots of gcc 4.5 don't support this and we can't detect
37 * this in the preprocessor, but we can live with this because they're
38 * unreleased. Really, we need to have autoconf for the kernel.
39 */
40#define unreachable() __builtin_unreachable()
41
42/* Mark a function definition as prohibited from being cloned. */
43#define __noclone __attribute__((__noclone__))
44
45/*
46 * Tell the optimizer that something else uses this function or variable.
47 */
48#define __visible __attribute__((externally_visible))
49
50/*
51 * GCC 'asm goto' miscompiles certain code sequences:
52 *
53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
54 *
55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
56 * Fixed in GCC 4.8.2 and later versions.
57 *
58 * (asm goto is automatically volatile - the naming reflects this.)
59 */
60#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
61
62#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
63#define __HAVE_BUILTIN_BSWAP32__
64#define __HAVE_BUILTIN_BSWAP64__
65#define __HAVE_BUILTIN_BSWAP16__
66#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h
index 4eecd3f088e8..af1fba297bf5 100644
--- a/include/litmus/reservation.h
+++ b/include/litmus/reservation.h
@@ -126,6 +126,14 @@ struct reservation {
126 struct reservation_ops *ops; 126 struct reservation_ops *ops;
127 127
128 struct list_head clients; 128 struct list_head clients;
129
130 /* for global env. */
131 int scheduled_on;
132 int event_added;
133 /* for blocked by ghost. Do not charge budget when ACTIVE */
134 int blocked_by_ghost;
135 /* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
136 int is_ghost;
129}; 137};
130 138
131void reservation_init(struct reservation *res); 139void reservation_init(struct reservation *res);
@@ -185,10 +193,67 @@ struct sup_reservation_environment {
185void sup_init(struct sup_reservation_environment* sup_env); 193void sup_init(struct sup_reservation_environment* sup_env);
186void sup_add_new_reservation(struct sup_reservation_environment* sup_env, 194void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
187 struct reservation* new_res); 195 struct reservation* new_res);
196/*
197 * TODO this function is added from Namhoon's implementation for MC^2.
198 * The need of this declaration is that it's used explicitly outside.
199 */
200void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
201 lt_t timeout);
188void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now); 202void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
189struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env); 203struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
190 204
191struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env, 205struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
192 unsigned int id); 206 unsigned int id);
193 207
208/* A global multiprocessor reservation environment. */
209
210typedef enum {
211 EVENT_REPLENISH = 0,
212 EVENT_DRAIN,
213 EVENT_OTHERS,
214} event_type_t;
215
216
217struct next_timer_event {
218 lt_t next_update;
219 int timer_armed_on;
220 unsigned int id;
221 event_type_t type;
222 struct list_head list;
223};
224
225struct gmp_reservation_environment {
226 raw_spinlock_t lock;
227 struct reservation_environment env;
228
229 /* ordered by priority */
230 struct list_head active_reservations;
231
232 /* ordered by next_replenishment */
233 struct list_head depleted_reservations;
234
235 /* unordered */
236 struct list_head inactive_reservations;
237
238 /* timer event ordered by next_update */
239 struct list_head next_events;
240
241 /* (schedule_now == true) means call gmp_dispatch() now */
242 int schedule_now;
243 /* set to true if a call to gmp_dispatch() is imminent */
244 bool will_schedule;
245};
246
247void gmp_init(struct gmp_reservation_environment* gmp_env);
248void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
249 struct reservation* new_res);
250void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
251 lt_t timeout, unsigned int id, event_type_t type);
252void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
253int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
254struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
255struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
256struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
257struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
258 unsigned int id);
194#endif 259#endif
diff --git a/litmus/reservation.c b/litmus/reservation.c
index 0bc551e2e67b..08c74f9005b3 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -1,4 +1,5 @@
1#include <linux/sched.h> 1#include <linux/sched.h>
2#include <linux/slab.h>
2 3
3#include <litmus/litmus.h> 4#include <litmus/litmus.h>
4#include <litmus/reservation.h> 5#include <litmus/reservation.h>
@@ -52,7 +53,8 @@ static void sup_scheduler_update_at(
52 sup_env->next_scheduler_update = when; 53 sup_env->next_scheduler_update = when;
53} 54}
54 55
55static void sup_scheduler_update_after( 56/* changed from non-static to static function because it's used outside */
57void sup_scheduler_update_after(
56 struct sup_reservation_environment* sup_env, 58 struct sup_reservation_environment* sup_env,
57 lt_t timeout) 59 lt_t timeout)
58{ 60{
@@ -192,8 +194,12 @@ static void sup_charge_budget(
192 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */ 194 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
193 res = list_entry(pos, struct reservation, list); 195 res = list_entry(pos, struct reservation, list);
194 if (res->state == RESERVATION_ACTIVE) { 196 if (res->state == RESERVATION_ACTIVE) {
195 res->ops->drain_budget(res, delta); 197 TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta);
196 encountered_active = 1; 198 if (encountered_active == 0 && res->blocked_by_ghost == 0) {
199 TRACE("DRAIN !!\n");
200 res->ops->drain_budget(res, delta);
201 encountered_active = 1;
202 }
197 } else { 203 } else {
198 BUG_ON(res->state != RESERVATION_ACTIVE_IDLE); 204 BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
199 res->ops->drain_budget(res, delta); 205 res->ops->drain_budget(res, delta);
@@ -207,9 +213,9 @@ static void sup_charge_budget(
207 res->id, res->cur_budget); 213 res->id, res->cur_budget);
208 sup_scheduler_update_after(sup_env, res->cur_budget); 214 sup_scheduler_update_after(sup_env, res->cur_budget);
209 } 215 }
210 if (encountered_active) 216 //if (encountered_active)
211 /* stop at the first ACTIVE reservation */ 217 /* stop at the first ACTIVE reservation */
212 break; 218 //break;
213 } 219 }
214 TRACE("finished charging budgets\n"); 220 TRACE("finished charging budgets\n");
215} 221}
@@ -317,3 +323,356 @@ void sup_init(struct sup_reservation_environment* sup_env)
317 323
318 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; 324 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
319} 325}
326
327/* NOTE: Namhoon's implementation starts below. */
328struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
329 unsigned int id)
330{
331 struct reservation *res;
332
333 list_for_each_entry(res, &gmp_env->active_reservations, list) {
334 if (res->id == id)
335 return res;
336 }
337 list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
338 if (res->id == id)
339 return res;
340 }
341 list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
342 if (res->id == id)
343 return res;
344 }
345
346 return NULL;
347}
348
349
350struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
351 unsigned int id)
352{
353 struct next_timer_event *event;
354
355 list_for_each_entry(event, &gmp_env->next_events, list) {
356 if (event->id == id)
357 return event;
358 }
359
360 return NULL;
361}
362
363
364struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
365 lt_t when)
366{
367 struct next_timer_event *event;
368
369 list_for_each_entry(event, &gmp_env->next_events, list) {
370 if (event->next_update == when)
371 return event;
372 }
373
374 return NULL;
375}
376
377#define TIMER_RESOLUTION 100000L
378
379static void gmp_add_event(
380 struct gmp_reservation_environment* gmp_env,
381 lt_t when, unsigned int id, event_type_t type)
382{
383 struct next_timer_event *nevent, *queued;
384 struct list_head *pos;
385 int found = 0;
386
387 //when = div64_u64(when, TIMER_RESOLUTION);
388 //when *= TIMER_RESOLUTION;
389//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
390 nevent = gmp_find_event_by_id(gmp_env, id);
391
392 if (!nevent || nevent->type != type) {
393 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
394 BUG_ON(!nevent);
395 nevent->next_update = when;
396 nevent->id = id;
397 nevent->type = type;
398 nevent->timer_armed_on = NO_CPU;
399
400 list_for_each(pos, &gmp_env->next_events) {
401 queued = list_entry(pos, struct next_timer_event, list);
402 if (queued->next_update > nevent->next_update) {
403 list_add(&nevent->list, pos->prev);
404 found = 1;
405 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
406 break;
407 }
408 }
409
410 if (!found) {
411 list_add_tail(&nevent->list, &gmp_env->next_events);
412 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
413 }
414 } else {
415 //TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
416; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
417 }
418
419 //TRACE("======START PRINTING EVENT LIST======\n");
420 //gmp_print_events(gmp_env, litmus_clock());
421 //TRACE("======FINISH PRINTING EVENT LIST======\n");
422}
423
424void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
425 lt_t timeout, unsigned int id, event_type_t type)
426{
427 //printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
428 gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
429}
430
431static void gmp_queue_depleted(
432 struct gmp_reservation_environment* gmp_env,
433 struct reservation *res)
434{
435 struct list_head *pos;
436 struct reservation *queued;
437 int found = 0;
438
439//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
440
441 list_for_each(pos, &gmp_env->depleted_reservations) {
442 queued = list_entry(pos, struct reservation, list);
443 if (queued && (queued->next_replenishment > res->next_replenishment)) {
444//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
445 list_add(&res->list, pos->prev);
446 found = 1;
447 break;
448 }
449 }
450
451 if (!found)
452 list_add_tail(&res->list, &gmp_env->depleted_reservations);
453
454 TRACE("R%d queued to depleted_list\n", res->id);
455//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
456 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
457}
458
459static void gmp_queue_active(
460 struct gmp_reservation_environment* gmp_env,
461 struct reservation *res)
462{
463 struct list_head *pos;
464 struct reservation *queued;
465 int check_preempt = 1, found = 0;
466
467 list_for_each(pos, &gmp_env->active_reservations) {
468 queued = list_entry(pos, struct reservation, list);
469 if (queued->priority > res->priority) {
470 list_add(&res->list, pos->prev);
471 found = 1;
472 break;
473 } else if (queued->scheduled_on == NO_CPU)
474 check_preempt = 0;
475 }
476
477 if (!found)
478 list_add_tail(&res->list, &gmp_env->active_reservations);
479
480 /* check for possible preemption */
481 if (res->state == RESERVATION_ACTIVE && check_preempt)
482 gmp_env->schedule_now++;
483
484 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
485 res->event_added = 1;
486}
487
488static void gmp_queue_reservation(
489 struct gmp_reservation_environment* gmp_env,
490 struct reservation *res)
491{
492
493//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
494 switch (res->state) {
495 case RESERVATION_INACTIVE:
496 list_add(&res->list, &gmp_env->inactive_reservations);
497 break;
498
499 case RESERVATION_DEPLETED:
500 gmp_queue_depleted(gmp_env, res);
501 break;
502
503 case RESERVATION_ACTIVE_IDLE:
504 case RESERVATION_ACTIVE:
505 gmp_queue_active(gmp_env, res);
506 break;
507 }
508}
509
510void gmp_add_new_reservation(
511 struct gmp_reservation_environment* gmp_env,
512 struct reservation* new_res)
513{
514 new_res->env = &gmp_env->env;
515 gmp_queue_reservation(gmp_env, new_res);
516}
517
518static void gmp_charge_budget(
519 struct gmp_reservation_environment* gmp_env,
520 lt_t delta)
521{
522 struct list_head *pos, *next;
523 struct reservation *res;
524
525 list_for_each_safe(pos, next, &gmp_env->active_reservations) {
526 int drained = 0;
527 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
528 res = list_entry(pos, struct reservation, list);
529 if (res->state == RESERVATION_ACTIVE) {
530 TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
531 if (res->scheduled_on != NO_CPU
532 && res->blocked_by_ghost == 0) {
533 TRACE("DRAIN !!\n");
534 drained = 1;
535 res->ops->drain_budget(res, delta);
536 } else {
537 TRACE("NO DRAIN (not scheduled)!!\n");
538 }
539 } else {
540 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
541 if (res->state != RESERVATION_ACTIVE_IDLE)
542 TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n");
543 TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n",
544 res->id, delta);
545 //if (res->is_ghost == 1) {
546 TRACE("DRAIN !!\n");
547 drained = 1;
548 res->ops->drain_budget(res, delta);
549 //}
550 }
551 if ((res->state == RESERVATION_ACTIVE ||
552 res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1))
553 {
554 /* make sure scheduler is invoked when this reservation expires
555 * its remaining budget */
556 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
557 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
558 res->event_added = 1;
559 }
560 //if (encountered_active == 2)
561 /* stop at the first ACTIVE reservation */
562 // break;
563 }
564 TRACE("finished charging budgets\n");
565}
566
567static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
568{
569 struct list_head *pos, *next;
570 struct reservation *res;
571
572 list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
573 res = list_entry(pos, struct reservation, list);
574 if (res->next_replenishment <= gmp_env->env.current_time) {
575 res->ops->replenish(res);
576 } else {
577 /* list is ordered by increasing depletion times */
578 break;
579 }
580 }
581 TRACE("finished replenishing budgets\n");
582}
583
584#define EPSILON 50
585
586/* return schedule_now */
587int gmp_update_time(
588 struct gmp_reservation_environment* gmp_env,
589 lt_t now)
590{
591 struct next_timer_event *event, *next;
592 lt_t delta, ret;
593
594 /* If the time didn't advance, there is nothing to do.
595 * This check makes it safe to call sup_advance_time() potentially
596 * multiple times (e.g., via different code paths. */
597 //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
598 if (unlikely(now <= gmp_env->env.current_time + EPSILON))
599 return 0;
600
601 delta = now - gmp_env->env.current_time;
602 gmp_env->env.current_time = now;
603
604
605 //gmp_print_events(gmp_env, now);
606 /* deplete budgets by passage of time */
607 //TRACE("CHARGE###\n");
608 gmp_charge_budget(gmp_env, delta);
609
610 /* check if any budgets where replenished */
611 //TRACE("REPLENISH###\n");
612 gmp_replenish_budgets(gmp_env);
613
614
615 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
616 if (event->next_update < now) {
617 list_del(&event->list);
618 TRACE("EVENT at %llu IS DELETED\n", event->next_update);
619 kfree(event);
620 } else {
621 break;
622 }
623 }
624
625 //gmp_print_events(gmp_env, litmus_clock());
626
627 ret = min(gmp_env->schedule_now, NR_CPUS);
628 gmp_env->schedule_now = 0;
629
630 return ret;
631}
632
633void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
634{
635 struct next_timer_event *event, *next;
636
637 TRACE("GLOBAL EVENTS now=%llu\n", now);
638 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
639 TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
640 }
641}
642
643static void gmp_res_change_state(
644 struct reservation_environment* env,
645 struct reservation *res,
646 reservation_state_t new_state)
647{
648 struct gmp_reservation_environment* gmp_env;
649
650 gmp_env = container_of(env, struct gmp_reservation_environment, env);
651
652 TRACE("GMP reservation R%d state %d->%d at %llu\n",
653 res->id, res->state, new_state, env->current_time);
654
655 list_del(&res->list);
656 /* check if we need to reschedule because we lost an active reservation */
657 if (res->state == RESERVATION_ACTIVE)
658 gmp_env->schedule_now++;
659 res->state = new_state;
660 gmp_queue_reservation(gmp_env, res);
661}
662
663void gmp_init(struct gmp_reservation_environment* gmp_env)
664{
665 memset(gmp_env, sizeof(*gmp_env), 0);
666
667 INIT_LIST_HEAD(&gmp_env->active_reservations);
668 INIT_LIST_HEAD(&gmp_env->depleted_reservations);
669 INIT_LIST_HEAD(&gmp_env->inactive_reservations);
670 INIT_LIST_HEAD(&gmp_env->next_events);
671
672 gmp_env->env.change_state = gmp_res_change_state;
673
674 gmp_env->schedule_now = 0;
675 gmp_env->will_schedule = false;
676
677 raw_spin_lock_init(&gmp_env->lock);
678}
diff --git a/litmus/sched_pres.c b/litmus/sched_pres.c
index e2d48101c890..23fe8f3ebfa9 100644
--- a/litmus/sched_pres.c
+++ b/litmus/sched_pres.c
@@ -392,7 +392,6 @@ static long create_polling_reservation(
392 int use_edf = config->priority == LITMUS_NO_PRIORITY; 392 int use_edf = config->priority == LITMUS_NO_PRIORITY;
393 int periodic = res_type == PERIODIC_POLLING; 393 int periodic = res_type == PERIODIC_POLLING;
394 long err = -EINVAL; 394 long err = -EINVAL;
395
396 if (config->polling_params.budget > 395 if (config->polling_params.budget >
397 config->polling_params.period) { 396 config->polling_params.period) {
398 printk(KERN_ERR "invalid polling reservation (%u): " 397 printk(KERN_ERR "invalid polling reservation (%u): "
@@ -558,7 +557,9 @@ static long pres_reservation_create(int res_type, void* __user _config)
558 if (copy_from_user(&config, _config, sizeof(config))) 557 if (copy_from_user(&config, _config, sizeof(config)))
559 return -EFAULT; 558 return -EFAULT;
560 559
561 if (config.cpu < 0 || !cpu_online(config.cpu)) { 560 if (config.cpu < 0
561 || config.cpu >= nr_cpu_ids /* in case big number causes crash */
562 || !cpu_online(config.cpu)) {
562 printk(KERN_ERR "invalid polling reservation (%u): " 563 printk(KERN_ERR "invalid polling reservation (%u): "
563 "CPU %d offline\n", config.id, config.cpu); 564 "CPU %d offline\n", config.id, config.cpu);
564 return -EINVAL; 565 return -EINVAL;