aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/reservation.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/reservation.c')
-rw-r--r--litmus/reservation.c709
1 files changed, 709 insertions, 0 deletions
diff --git a/litmus/reservation.c b/litmus/reservation.c
new file mode 100644
index 000000000000..07e38cb7d138
--- /dev/null
+++ b/litmus/reservation.c
@@ -0,0 +1,709 @@
1#include <linux/sched.h>
2#include <linux/slab.h>
3
4#include <litmus/litmus.h>
5#include <litmus/reservation.h>
6
7//#define TRACE(fmt, args...) do {} while (false)
8//#define TRACE_TASK(fmt, args...) do {} while (false)
9
10#define BUDGET_ENFORCEMENT_AT_C 0
11
12void reservation_init(struct reservation *res)
13{
14 memset(res, sizeof(*res), 0);
15 res->state = RESERVATION_INACTIVE;
16 INIT_LIST_HEAD(&res->clients);
17}
18
19struct task_struct* default_dispatch_client(
20 struct reservation *res,
21 lt_t *for_at_most)
22{
23 struct reservation_client *client, *next;
24 struct task_struct* tsk;
25
26 BUG_ON(res->state != RESERVATION_ACTIVE);
27 *for_at_most = 0;
28
29 list_for_each_entry_safe(client, next, &res->clients, list) {
30 tsk = client->dispatch(client);
31 if (likely(tsk)) {
32 return tsk;
33 }
34 }
35 return NULL;
36}
37
38static struct task_struct * task_client_dispatch(struct reservation_client *client)
39{
40 struct task_client *tc = container_of(client, struct task_client, client);
41 return tc->task;
42}
43
44void task_client_init(struct task_client *tc, struct task_struct *tsk,
45 struct reservation *res)
46{
47 memset(&tc->client, sizeof(tc->client), 0);
48 tc->client.dispatch = task_client_dispatch;
49 tc->client.reservation = res;
50 tc->task = tsk;
51}
52
53static void sup_scheduler_update_at(
54 struct sup_reservation_environment* sup_env,
55 lt_t when)
56{
57 //TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when);
58 if (sup_env->next_scheduler_update > when)
59 sup_env->next_scheduler_update = when;
60}
61
62void sup_scheduler_update_after(
63 struct sup_reservation_environment* sup_env,
64 lt_t timeout)
65{
66 sup_scheduler_update_at(sup_env, sup_env->env.current_time + timeout);
67}
68
69static int _sup_queue_depleted(
70 struct sup_reservation_environment* sup_env,
71 struct reservation *res)
72{
73 struct list_head *pos;
74 struct reservation *queued;
75 int passed_earlier = 0;
76
77 list_for_each(pos, &sup_env->depleted_reservations) {
78 queued = list_entry(pos, struct reservation, list);
79 if (queued->next_replenishment > res->next_replenishment) {
80 list_add(&res->list, pos->prev);
81 return passed_earlier;
82 } else
83 passed_earlier = 1;
84 }
85
86 list_add_tail(&res->list, &sup_env->depleted_reservations);
87
88 return passed_earlier;
89}
90
91static void sup_queue_depleted(
92 struct sup_reservation_environment* sup_env,
93 struct reservation *res)
94{
95 int passed_earlier = _sup_queue_depleted(sup_env, res);
96
97 /* check for updated replenishment time */
98 if (!passed_earlier)
99 sup_scheduler_update_at(sup_env, res->next_replenishment);
100}
101
102static int _sup_queue_active(
103 struct sup_reservation_environment* sup_env,
104 struct reservation *res)
105{
106 struct list_head *pos;
107 struct reservation *queued;
108 int passed_active = 0;
109
110 list_for_each(pos, &sup_env->active_reservations) {
111 queued = list_entry(pos, struct reservation, list);
112 if (queued->priority > res->priority) {
113 list_add(&res->list, pos->prev);
114 return passed_active;
115 } else if (queued->state == RESERVATION_ACTIVE)
116 passed_active = 1;
117 }
118
119 list_add_tail(&res->list, &sup_env->active_reservations);
120 return passed_active;
121}
122
123static void sup_queue_active(
124 struct sup_reservation_environment* sup_env,
125 struct reservation *res)
126{
127 int passed_active = _sup_queue_active(sup_env, res);
128
129 /* check for possible preemption */
130 if (res->state == RESERVATION_ACTIVE && !passed_active)
131 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
132 else {
133 /* Active means this reservation is draining budget => make sure
134 * the scheduler is called to notice when the reservation budget has been
135 * drained completely. */
136 sup_scheduler_update_after(sup_env, res->cur_budget);
137 }
138}
139
140static void sup_queue_reservation(
141 struct sup_reservation_environment* sup_env,
142 struct reservation *res)
143{
144 switch (res->state) {
145 case RESERVATION_INACTIVE:
146 list_add(&res->list, &sup_env->inactive_reservations);
147 break;
148
149 case RESERVATION_DEPLETED:
150 sup_queue_depleted(sup_env, res);
151 break;
152
153 case RESERVATION_ACTIVE_IDLE:
154 case RESERVATION_ACTIVE:
155 sup_queue_active(sup_env, res);
156 break;
157 }
158}
159
160void sup_add_new_reservation(
161 struct sup_reservation_environment* sup_env,
162 struct reservation* new_res)
163{
164 new_res->env = &sup_env->env;
165 sup_queue_reservation(sup_env, new_res);
166}
167
168struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
169 unsigned int id)
170{
171 struct reservation *res;
172
173 list_for_each_entry(res, &sup_env->active_reservations, list) {
174 if (res->id == id)
175 return res;
176 }
177 list_for_each_entry(res, &sup_env->inactive_reservations, list) {
178 if (res->id == id)
179 return res;
180 }
181 list_for_each_entry(res, &sup_env->depleted_reservations, list) {
182 if (res->id == id)
183 return res;
184 }
185
186 return NULL;
187}
188
189static void sup_charge_budget(
190 struct sup_reservation_environment* sup_env,
191 lt_t delta)
192{
193 struct list_head *pos, *next;
194 struct reservation *res;
195
196 int encountered_active = 0;
197
198 list_for_each_safe(pos, next, &sup_env->active_reservations) {
199 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
200 res = list_entry(pos, struct reservation, list);
201 if (res->state == RESERVATION_ACTIVE) {
202 TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta);
203 if (encountered_active == 0 && res->blocked_by_ghost == 0) {
204 TRACE("DRAIN !!\n");
205 res->ops->drain_budget(res, delta);
206 encountered_active = 1;
207 }
208 } else {
209 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
210 TRACE("sup_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
211 res->ops->drain_budget(res, delta);
212 }
213 if (res->state == RESERVATION_ACTIVE ||
214 res->state == RESERVATION_ACTIVE_IDLE)
215 {
216 /* make sure scheduler is invoked when this reservation expires
217 * its remaining budget */
218 TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n",
219 res->id, res->cur_budget);
220 sup_scheduler_update_after(sup_env, res->cur_budget);
221 }
222 //if (encountered_active == 2)
223 /* stop at the first ACTIVE reservation */
224 // break;
225 }
226 //TRACE("finished charging budgets\n");
227}
228
229static void sup_replenish_budgets(struct sup_reservation_environment* sup_env)
230{
231 struct list_head *pos, *next;
232 struct reservation *res;
233
234 list_for_each_safe(pos, next, &sup_env->depleted_reservations) {
235 res = list_entry(pos, struct reservation, list);
236 if (res->next_replenishment <= sup_env->env.current_time) {
237 res->ops->replenish(res);
238 } else {
239 /* list is ordered by increasing depletion times */
240 break;
241 }
242 }
243 //TRACE("finished replenishing budgets\n");
244
245 /* request a scheduler update at the next replenishment instant */
246 res = list_first_entry_or_null(&sup_env->depleted_reservations,
247 struct reservation, list);
248 if (res)
249 sup_scheduler_update_at(sup_env, res->next_replenishment);
250}
251
252void sup_update_time(
253 struct sup_reservation_environment* sup_env,
254 lt_t now)
255{
256 lt_t delta;
257
258 /* If the time didn't advance, there is nothing to do.
259 * This check makes it safe to call sup_advance_time() potentially
260 * multiple times (e.g., via different code paths. */
261 //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time);
262 if (unlikely(now <= sup_env->env.current_time))
263 return;
264
265 delta = now - sup_env->env.current_time;
266 sup_env->env.current_time = now;
267
268 /* check if future updates are required */
269 if (sup_env->next_scheduler_update <= sup_env->env.current_time)
270 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
271
272 /* deplete budgets by passage of time */
273 //TRACE("CHARGE###\n");
274 sup_charge_budget(sup_env, delta);
275
276 /* check if any budgets where replenished */
277 //TRACE("REPLENISH###\n");
278 sup_replenish_budgets(sup_env);
279}
280
281struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env)
282{
283 struct reservation *res, *next;
284 struct task_struct *tsk = NULL;
285 lt_t time_slice;
286
287 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
288 if (res->state == RESERVATION_ACTIVE) {
289 tsk = res->ops->dispatch_client(res, &time_slice);
290 if (likely(tsk)) {
291 if (time_slice)
292 sup_scheduler_update_after(sup_env, time_slice);
293 sup_scheduler_update_after(sup_env, res->cur_budget);
294 return tsk;
295 }
296 }
297 }
298
299 return NULL;
300}
301
302static void sup_res_change_state(
303 struct reservation_environment* env,
304 struct reservation *res,
305 reservation_state_t new_state)
306{
307 struct sup_reservation_environment* sup_env;
308
309 sup_env = container_of(env, struct sup_reservation_environment, env);
310
311 TRACE("reservation R%d state %d->%d at %llu\n",
312 res->id, res->state, new_state, env->current_time);
313
314 list_del(&res->list);
315 /* check if we need to reschedule because we lost an active reservation */
316 if (res->state == RESERVATION_ACTIVE && !sup_env->will_schedule)
317 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
318 res->state = new_state;
319 sup_queue_reservation(sup_env, res);
320}
321
322void sup_init(struct sup_reservation_environment* sup_env)
323{
324 memset(sup_env, sizeof(*sup_env), 0);
325
326 INIT_LIST_HEAD(&sup_env->active_reservations);
327 INIT_LIST_HEAD(&sup_env->depleted_reservations);
328 INIT_LIST_HEAD(&sup_env->inactive_reservations);
329
330 sup_env->env.change_state = sup_res_change_state;
331
332 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
333}
334
335struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
336 unsigned int id)
337{
338 struct reservation *res;
339
340 list_for_each_entry(res, &gmp_env->active_reservations, list) {
341 if (res->id == id)
342 return res;
343 }
344 list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
345 if (res->id == id)
346 return res;
347 }
348 list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
349 if (res->id == id)
350 return res;
351 }
352
353 return NULL;
354}
355
356
357struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
358 unsigned int id)
359{
360 struct next_timer_event *event;
361
362 list_for_each_entry(event, &gmp_env->next_events, list) {
363 if (event->id == id)
364 return event;
365 }
366
367 return NULL;
368}
369
370
371struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
372 lt_t when)
373{
374 struct next_timer_event *event;
375
376 list_for_each_entry(event, &gmp_env->next_events, list) {
377 if (event->next_update == when)
378 return event;
379 }
380
381 return NULL;
382}
383
384#define TIMER_RESOLUTION 100000L
385
386static void gmp_add_event(
387 struct gmp_reservation_environment* gmp_env,
388 lt_t when, unsigned int id, event_type_t type)
389{
390 struct next_timer_event *nevent, *queued;
391 struct list_head *pos;
392 int found = 0, update = 0;
393
394 //when = div64_u64(when, TIMER_RESOLUTION);
395 //when *= TIMER_RESOLUTION;
396//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
397 nevent = gmp_find_event_by_id(gmp_env, id);
398
399 if (nevent)
400 TRACE("EVENT R%d update prev = %llu, new = %llu\n", nevent->id, nevent->next_update, when);
401
402 if (nevent && nevent->next_update > when) {
403 list_del(&nevent->list);
404 update = 1;
405
406 }
407
408 if (!nevent || nevent->type != type || update == 1) {
409 if (update == 0)
410 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
411 BUG_ON(!nevent);
412 nevent->next_update = when;
413 nevent->id = id;
414 nevent->type = type;
415 nevent->timer_armed_on = NO_CPU;
416
417 list_for_each(pos, &gmp_env->next_events) {
418 queued = list_entry(pos, struct next_timer_event, list);
419 if (queued->next_update > nevent->next_update) {
420 list_add(&nevent->list, pos->prev);
421 found = 1;
422 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
423 break;
424 }
425 }
426
427 if (!found) {
428 list_add_tail(&nevent->list, &gmp_env->next_events);
429 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
430 }
431 } else {
432 //TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
433; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
434 }
435
436 TRACE("======START PRINTING EVENT LIST======\n");
437 gmp_print_events(gmp_env, litmus_clock());
438 TRACE("======FINISH PRINTING EVENT LIST======\n");
439}
440
441void gmp_add_event_after(
442 struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type)
443{
444 //printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
445 gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
446}
447
448static void gmp_queue_depleted(
449 struct gmp_reservation_environment* gmp_env,
450 struct reservation *res)
451{
452 struct list_head *pos;
453 struct reservation *queued;
454 int found = 0;
455
456//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
457
458 list_for_each(pos, &gmp_env->depleted_reservations) {
459 queued = list_entry(pos, struct reservation, list);
460 if (queued && (queued->next_replenishment > res->next_replenishment)) {
461//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
462 list_add(&res->list, pos->prev);
463 found = 1;
464 break;
465 }
466 }
467
468 if (!found)
469 list_add_tail(&res->list, &gmp_env->depleted_reservations);
470
471 TRACE("R%d queued to depleted_list\n", res->id);
472//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
473 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
474}
475
476static void gmp_queue_active(
477 struct gmp_reservation_environment* gmp_env,
478 struct reservation *res)
479{
480 struct list_head *pos;
481 struct reservation *queued;
482 int check_preempt = 1, found = 0;
483
484 list_for_each(pos, &gmp_env->active_reservations) {
485 queued = list_entry(pos, struct reservation, list);
486 if (queued->priority > res->priority) {
487 list_add(&res->list, pos->prev);
488 found = 1;
489 break;
490 } else if (queued->scheduled_on == NO_CPU)
491 check_preempt = 0;
492 }
493
494 if (!found)
495 list_add_tail(&res->list, &gmp_env->active_reservations);
496
497 /* check for possible preemption */
498 if (res->state == RESERVATION_ACTIVE && check_preempt)
499 gmp_env->schedule_now++;
500
501#if BUDGET_ENFORCEMENT_AT_C
502 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
503#endif
504 res->event_added = 1;
505}
506
507static void gmp_queue_reservation(
508 struct gmp_reservation_environment* gmp_env,
509 struct reservation *res)
510{
511
512//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
513 switch (res->state) {
514 case RESERVATION_INACTIVE:
515 list_add(&res->list, &gmp_env->inactive_reservations);
516 break;
517
518 case RESERVATION_DEPLETED:
519 gmp_queue_depleted(gmp_env, res);
520 break;
521
522 case RESERVATION_ACTIVE_IDLE:
523 case RESERVATION_ACTIVE:
524 gmp_queue_active(gmp_env, res);
525 break;
526 }
527}
528
529void gmp_add_new_reservation(
530 struct gmp_reservation_environment* gmp_env,
531 struct reservation* new_res)
532{
533 new_res->env = &gmp_env->env;
534 gmp_queue_reservation(gmp_env, new_res);
535}
536
537#if BUDGET_ENFORCEMENT_AT_C
538static void gmp_charge_budget(
539 struct gmp_reservation_environment* gmp_env,
540 lt_t delta)
541{
542 struct list_head *pos, *next;
543 struct reservation *res;
544
545 list_for_each_safe(pos, next, &gmp_env->active_reservations) {
546 int drained = 0;
547 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
548 res = list_entry(pos, struct reservation, list);
549 if (res->state == RESERVATION_ACTIVE) {
550 TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
551 if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) {
552 TRACE("DRAIN !!\n");
553 drained = 1;
554 res->ops->drain_budget(res, delta);
555 } else {
556 TRACE("NO DRAIN (not scheduled)!!\n");
557 }
558 } else {
559 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
560 if (res->state != RESERVATION_ACTIVE_IDLE)
561 TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n");
562 TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
563 //if (res->is_ghost != NO_CPU) {
564 TRACE("DRAIN !!\n");
565 drained = 1;
566 res->ops->drain_budget(res, delta);
567 //}
568 }
569 if ((res->state == RESERVATION_ACTIVE ||
570 res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1))
571 {
572 /* make sure scheduler is invoked when this reservation expires
573 * its remaining budget */
574 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
575 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
576 res->event_added = 1;
577 }
578 //if (encountered_active == 2)
579 /* stop at the first ACTIVE reservation */
580 // break;
581 }
582 //TRACE("finished charging budgets\n");
583}
584#else
585
586static void gmp_charge_budget(
587 struct gmp_reservation_environment* gmp_env,
588 lt_t delta)
589{
590 return;
591}
592
593#endif
594
595static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
596{
597 struct list_head *pos, *next;
598 struct reservation *res;
599
600 list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
601 res = list_entry(pos, struct reservation, list);
602 if (res->next_replenishment <= gmp_env->env.current_time) {
603 res->ops->replenish(res);
604 if (res->is_ghost != NO_CPU) {
605 TRACE("R%d replenished! scheduled_on=%d\n", res->id, res->scheduled_on);
606 }
607 } else {
608 /* list is ordered by increasing depletion times */
609 break;
610 }
611 }
612 //TRACE("finished replenishing budgets\n");
613}
614
615#define EPSILON 50
616
617/* return schedule_now */
618int gmp_update_time(
619 struct gmp_reservation_environment* gmp_env,
620 lt_t now)
621{
622 struct next_timer_event *event, *next;
623 lt_t delta, ret;
624
625 /* If the time didn't advance, there is nothing to do.
626 * This check makes it safe to call sup_advance_time() potentially
627 * multiple times (e.g., via different code paths. */
628 //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
629 if (unlikely(now <= gmp_env->env.current_time + EPSILON))
630 return 0;
631
632 delta = now - gmp_env->env.current_time;
633 gmp_env->env.current_time = now;
634
635
636 //gmp_print_events(gmp_env, now);
637 /* deplete budgets by passage of time */
638 //TRACE("CHARGE###\n");
639 gmp_charge_budget(gmp_env, delta);
640
641 /* check if any budgets where replenished */
642 //TRACE("REPLENISH###\n");
643 gmp_replenish_budgets(gmp_env);
644
645
646 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
647 if (event->next_update < now) {
648 list_del(&event->list);
649 //TRACE("EVENT at %llu IS DELETED\n", event->next_update);
650 kfree(event);
651 } else {
652 break;
653 }
654 }
655
656 //gmp_print_events(gmp_env, litmus_clock());
657
658 ret = min(gmp_env->schedule_now, NR_CPUS);
659 gmp_env->schedule_now = 0;
660
661 return ret;
662}
663
664void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
665{
666 struct next_timer_event *event, *next;
667
668 TRACE("GLOBAL EVENTS now=%llu\n", now);
669 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
670 TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
671 }
672}
673
674static void gmp_res_change_state(
675 struct reservation_environment* env,
676 struct reservation *res,
677 reservation_state_t new_state)
678{
679 struct gmp_reservation_environment* gmp_env;
680
681 gmp_env = container_of(env, struct gmp_reservation_environment, env);
682
683 TRACE("GMP reservation R%d state %d->%d at %llu\n",
684 res->id, res->state, new_state, env->current_time);
685
686 list_del(&res->list);
687 /* check if we need to reschedule because we lost an active reservation */
688 if (res->state == RESERVATION_ACTIVE)
689 gmp_env->schedule_now++;
690 res->state = new_state;
691 gmp_queue_reservation(gmp_env, res);
692}
693
694void gmp_init(struct gmp_reservation_environment* gmp_env)
695{
696 memset(gmp_env, sizeof(*gmp_env), 0);
697
698 INIT_LIST_HEAD(&gmp_env->active_reservations);
699 INIT_LIST_HEAD(&gmp_env->depleted_reservations);
700 INIT_LIST_HEAD(&gmp_env->inactive_reservations);
701 INIT_LIST_HEAD(&gmp_env->next_events);
702
703 gmp_env->env.change_state = gmp_res_change_state;
704
705 gmp_env->schedule_now = 0;
706 gmp_env->will_schedule = false;
707
708 raw_spin_lock_init(&gmp_env->lock);
709}