summaryrefslogtreecommitdiffstats
path: root/unit_trace/cedf_test.py
blob: d09d00125625a0ff81d879b91fabc7a6e3bd67c9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
###############################################################################
# Description
###############################################################################

# C-EDF Test

###############################################################################
# Imports
###############################################################################
import time
import copy
import sys    
###############################################################################
# Public Functions
###############################################################################

def cedf_test(stream,csize,number):

    m = 12        # CPUs fixed it for some weird bug
    # System model 
    on_cpu          = []     # Tasks on CPU 
    off_cpu         = []    # Tasks not on CPU
    tasklet_off_cpu = [] # Tasklets on CPU 
    tasklet_on_cpu  = []  # Tasklets on CPU 
    work_off_cpu    = [] # work item on CPU 
    work_on_cpu     = []  # work item on CPU
    
    cluster_map = []
    
    #for record in stream:
    #	if record.record_type == "meta" and record.type_name == "num_cpus":
    #        m = record.num_cpus
    #        break
    job_2s_released = [] # list of tasks which have released their job 2s
        
    # Time of the last record we saw. Only run the G-EDF test when the time
    # is updated.
    last_time = None
    
    #Count Time
    start_time = 0
    count = 0

    # First event for the latest timestamp. This is used to match up
    # inversion starts and ends with the first event from the previous
    # timestamp, which is the first event that could have triggered
    # the inversion start or end.
    first_event_this_timestamp = 0

    for record in stream:
        if record.record_type != "event":
            #yield record # unless for analysis
            continue	
            
        # All records with job < 2 are garbage
        if record.job < 2:
            continue
            
        # All records with pid < 1 are garbage
        if record.pid < 1:
            continue
        
        # All records with cpu > 11 are garbage
        if record.cpu > 11 or record.cpu <0:
            continue  
        # All records with when ==0 are garbage 
        if record.when == 0:
            continue
        #if processing count reach number(maximum) start to process
        if number > 0:
                number -= 1
        elif number == 0:
            break  
            
        #yield record
        #continue  
        
        # Bookkeeping iff the timestamp has moved forward.
        # Check for inversion starts and ends and yield them.
        # (It is common to have records with simultaneous timestamps,
        # so we only check when the time has moved forward)
        # Also, need to update the first_event_this_timestamp variable
        if last_time is not None and last_time != record.when:  
            
            # check PI for tasklet 
            errors = _cedf_check_irq(
                off_cpu,on_cpu,
                tasklet_on_cpu,work_on_cpu,
                tasklet_off_cpu,tasklet_on_cpu,
                last_time,csize,
                first_event_this_timestamp, "Tasklet")
            for error in errors:
                yield error
            # check Simultaneous execution of tasklet and owner
            #errors = _cedf_check_tasklet_simultaneous(off_cpu,on_cpu,
            #    tasklet_off_cpu,tasklet_on_cpu,last_time,csize,
            #    first_event_this_timestamp)
            #for error in errors:
            #    yield error
            
            # check PI for work item    
            errors = _cedf_check_irq(
                off_cpu,on_cpu,
                tasklet_on_cpu,work_on_cpu,
                work_off_cpu,work_on_cpu,
                last_time,csize,
                first_event_this_timestamp, "Work_Item")            
            for error in errors:
                yield error
            
            # check Simultaneous execution of work item and owner
            #errors = _cedf_check_work_simultaneous(off_cpu,on_cpu,
            #    work_off_cpu,work_on_cpu,last_time,csize,
            #    first_event_this_timestamp)            
            #for error in errors:
            #    yield error
            
            # check PI for task
            for c in range(0,int(m/csize)):
                errors = _cedf_check(c,
                    off_cpu,on_cpu,
                    tasklet_on_cpu,work_on_cpu,
                    last_time,csize,
                    first_event_this_timestamp)
                for error in errors:
                    yield error
            
            first_event_this_timestamp = record.id

        # Add a newly-released Job to the off_cpu queue
        if record.type_name == 'release':
            # Some records with job == 2 are garbage
            if record.job==2:
                # There is a duplicate release of every job 2
                # This will throw away the second one
                if record.pid in job_2s_released:
                    continue
                else:
                    job_2s_released.append(record.pid)
            
            off_cpu.append(Job(record))         
            # Not put into job initiation 
            pos = _find_job(record,off_cpu)
            off_cpu[pos].cluster = _get_cluster(record, cluster_map)
            off_cpu[pos].deadline = record.deadline
            #off_cpu[pos].inh_deadline = record.deadline
                
        # Move a Job from the off_cpu queue to on_cpu
        elif record.type_name == 'switch_to':
            pos = _find_job(record,off_cpu)
            if pos is None:
                msg = "Event %d tried to switch to a job %d that was not on the"
                msg += " off_cpu queue\n"
                msg = msg % (record.id, record.pid)
                print msg
                #sys.stderr.write(msg)
                yield record
                #exit()
                continue
            job = off_cpu[pos]
            job.cluster = int(record.cpu/csize) 
            _insert_cluster_map(job,cluster_map)            
            del off_cpu[pos]
            on_cpu.append(job)

        # Mark a Job as completed.
        # The only time a Job completes when it is not on a
        # CPU is when it is the last job of the task.
        elif record.type_name == 'completion':
            pos = _find_job(record,on_cpu)
            if pos is not None:
                on_cpu[pos].is_complete = True
            else:
                pos = _find_job(record,off_cpu)  
                if pos is None:
                    msg = ("Event %d tried to complete a job %d" +
                        " that was not running\n")
                    msg = msg % (record.id,record.pid)
                    print msg
                    #sys.stderr.write(msg)
                    #exit()
                    continue
                    
                if off_cpu[pos].inversion_start is not None:
                    off_cpu[pos].inversion_end = record.when
                    yield Error(off_cpu[pos], off_cpu, on_cpu,
                        first_event_this_timestamp,"Task")
                    off_cpu[pos].inversion_start = None
                    off_cpu[pos].inversion_end = None
                del off_cpu[pos]
        # A job is switched away from a CPU. If it has
        # been marked as complete, remove it from the model.
        elif record.type_name == 'switch_away':             
            pos = _find_job(record,on_cpu)
            if pos is None:
                # By default, the switch_away for a job (after it has completed)
                # is maked as being for job+1, which has never been switched to.
                # We can correct this if we note which jobs really
                # have been switched to.
                record.job -= 1
                pos = _find_job(record,on_cpu)
                if pos is None:
                    msg = ("Event %d tried to switch away a job %d" +
                        " that was not running\n")
                    msg = msg % (record.id, record.pid)
                    print msg
                    #sys.stderr.write(msg)
                    #exit()
                    yield record
                    continue
                    
            job = on_cpu[pos]
            del on_cpu[pos]
            if job.is_complete == False:
                off_cpu.append(job)
                
                

        # A job has been blocked.
        elif record.type_name == 'block':
            pos = _find_job(record,on_cpu)
            # What if the job is blocked AFTER being switched away?
            # This is a bug in some versions of LITMUS.
            if pos is None:
                pos = _find_job(record,off_cpu)
                if pos is None:
                    msg = ("Event %d tried to block a job %d" +
                        " that was not running\n")
                    msg = msg % (record.id,record.pid)
                    print msg
                    #sys.stderr.write(msg)
                    #exit()
                    continue
                off_cpu[pos].is_blocked =True
            else:
                on_cpu[pos].is_blocked =True


        # A job is resumed
        elif record.type_name == 'resume':
            # Job 2 has a resume that is garbage
            if record.job==2:
                continue
            pos = _find_job(record,off_cpu)
            if pos is None:
                    msg = ("Event %d tried to block a job %d" +
                        " that was not running\n")
                    msg = msg % (record.id,record.pid)
                    print msg
                    #sys.stderr.write(msg)
                    #exit()
                    continue
            off_cpu[pos].is_blocked = False

        # Add a newly-released Takslet to the tasklet_off_cpu queue
        elif record.type_name == 'tasklet_release':
            #Qualified klit-ed tasklet
            if _is_owner(record,off_cpu,on_cpu) is True:
                pos = _find_job(record,off_cpu)               
                if pos is not None:
                    job = off_cpu[pos]
                else: #weird 
                    pos = _find_job(record,on_cpu)
                    job = on_cpu[pos]  
                     
                tasklet_off_cpu.append(Job(record))
                # Not put into job initiation         
                pos = _find_job(record,tasklet_off_cpu)
                tasklet_off_cpu[pos].cluster = job.cluster
                tasklet_off_cpu[pos].deadline = job.deadline              
                tasklet_off_cpu[pos].inversion_start = None
                tasklet_off_cpu[pos].inversion_end = None
                tasklet_off_cpu[pos].inversion_start_id = None
                tasklet_off_cpu[pos].inversion_start_triggering_event_id = None  
                
		
		# Add a newly-released Work item to the work_off_cpu queue
        elif record.type_name == 'work_release':
            #Qualified klit-ed work item
            if _is_owner(record,off_cpu,on_cpu) is True:
                pos = _find_job(record,off_cpu)               
                if pos is not None:
                    job = off_cpu[pos]
                else: #weird 
                    pos = _find_job(record,on_cpu)
                    job = on_cpu[pos]                   
                work_off_cpu.append(Job(record))
                # Not put into job initiation         
                pos = _find_job(record,work_off_cpu)
                work_off_cpu[pos].cluster = job.cluster
                work_off_cpu[pos].deadline = job.deadline              
                work_off_cpu[pos].inversion_start = None
                work_off_cpu[pos].inversion_end = None
                work_off_cpu[pos].inversion_start_id = None
                work_off_cpu[pos].inversion_start_triggering_event_id = None
		
		# Move a Takslet from the tasklet_off_cpu queue to tasklet_on_cpu
        elif record.type_name == 'tasklet_begin':
            pos = _find_job(record,tasklet_off_cpu)
            if pos is None:
                msg = "Event %d tried to begin to a tasklet for job %d that was not on the"
                msg += " tasklet_off_cpu queue\n"
                msg = msg % (record.id, record.pid)
                print msg
                #sys.stderr.write(msg)
                #exit() #Continue see if anything wrong
                continue
            job = tasklet_off_cpu[pos]
            job.exe_pid = record.exe_pid
            del tasklet_off_cpu[pos]
            tasklet_on_cpu.append(job)

        
        # Register a klitirqd threadto a Work item in the work_off_cpu
        elif record.type_name == 'work_begin':
            pos = _find_job(record,work_off_cpu)
            if pos is None:
                msg = "Event %d tried to begin to a work item for job %d that was not on the"
                msg += " work_off_cpu queue\n"
                msg = msg % (record.id, record.pid)
                print msg
                #sys.stderr.write(msg)
                #exit() #Continue see if anything wrong
                continue
            job = work_off_cpu[pos]
            job.exe_pid = record.exe_pid
            del work_off_cpu[pos]
            work_on_cpu.append(job)
       
        # A Takslet is end from a CPU.
        elif record.type_name == 'tasklet_end':
            pos = _find_job(record,tasklet_on_cpu)
            if pos is None:
                msg = ("Event %d tried to end a tasklet for job %d" +
                       " that was not running\n")
                msg = msg % (record.id, record.pid)
                print msg
                #sys.stderr.write(msg)
                pos = _find_job(record,tasklet_off_cpu)
                if pos is not None:
                    del tasklet_off_cpu[pos]
                #exit() #Continue see if anything wrong
            else:    
                if tasklet_on_cpu[pos].inversion_start is not None:
                    tasklet_on_cpu[pos].inversion_end = record.when
                    yield Error(tasklet_on_cpu[pos], off_cpu, on_cpu,
                        first_event_this_timestamp,"Tasklet")
                    tasklet_on_cpu[pos].inversion_start = None
                    tasklet_on_cpu[pos].inversion_end = None
            
                del tasklet_on_cpu[pos]
            
                  
        # A Work item is end from a CPU.
        elif record.type_name == 'work_end':
            pos = _find_job(record,work_on_cpu)
            if pos is None:
                msg = ("Event %d tried to end a work for job %d" +
                       " that was not running\n")
                msg = msg % (record.id, record.pid)
                print msg
                #sys.stderr.write(msg)
                pos = _find_job(record,work_off_cpu)
                if pos is not None:
                    del work_off_cpu[pos]
                #exit() #Continue see if anything wrong
            else:    
                if work_on_cpu[pos].inversion_start is not None:
                    work_on_cpu[pos].inversion_end = record.when
                    yield Error(work_on_cpu[pos], off_cpu, on_cpu,
                        first_event_this_timestamp,"Work_Item")
                    work_on_cpu[pos].inversion_start = None
                    work_on_cpu[pos].inversion_end = None
                del work_on_cpu[pos]
            
        # a Task has Priority inheritance 
        #elif record.type_name == 'eff_prio_change':
        #    inh_pos = _find_inh_job(record,off_cpu)
        #    if inh_pos is None:
        #        inh_pos = _find_inh_job(record,on_cpu)
        #        if inh_pos is not None:
        #            inh_job = on_cpu[inh_pos]
        #    else:
        #        inh_job = off_cpu[inh_pos]
        #    
        #    pos = _find_job(record,on_cpu)
        #    if pos is None:
        #        pos = _find_job(record,off_cpu)
        #        if pos is None:
        #            msg = ("Event %d tried to change a jobs priority " +
        #                " that cannot found\n")
        #            msg = msg % (record.id)
        #            sys.stderr.write(msg)
        #            #exit() #Continue see if anything wrong
        #            continue
        #        if inh_pos is not None:
        #            off_cpu[pos].inh_deadline = inh_job.deadline
        #        else:
        #            off_cpu[pos].inh_deadline = off_cpu[pos].deadline
        #    else:
        #        if inh_pos is not None:
        #            on_cpu[pos].inh_deadline = inh_job.deadline
        #        else:
        #            on_cpu[pos].inh_deadline = on_cpu[pos].deadline
        else:
            continue # illegal event
	
        last_time = record.when
        
        if (record.id-count) > 25000:   
            sys.stderr.write(("Parsed the %d event:\t") % (count))
            #sys.stderr.write(("(%d,%d,%d,%d,%d,%d)\n") 
            #    % (len(on_cpu),len(off_cpu),len(tasklet_on_cpu),len(tasklet_off_cpu),len(work_on_cpu),len(work_off_cpu)))      
            sys.stderr.write(("Time elapsed in last 25000: %ds\n\n") % (time.time() - start_time))
            start_time = time.time()
            count = record.id
            
        yield record
        
        
###############################################################################
# Private Functions
###############################################################################


# Internal representation of a Job
class Job(object):
    def __init__(self, record):
        self.pid = record.pid
        self.job = record.job
        self.cpu = record.cpu  # for target cpu  (-1: not assigned yet)
        self.cluster = -1  # for target cluster  (-1: not assigned yet)
        self.deadline = None
        #self.inh_deadline = None
        self.exe_pid = -1 # handleing klitthread (-1: not assigned yet)
        self.is_complete = False
        self.is_blocked = False
        self.inversion_start = None
        self.inversion_end = None
        self.inversion_start_id = None
        self.inversion_start_triggering_event_id = None
        #self.simultaneous_start = None
        #self.simultaneous_end = None
        #self.simultaneous_start_id = None
        #self.simultaneous_start_triggering_event_id = None

    def __str__(self):
        return "(%d.%d:%d)" % (self.pid,self.job,self.deadline)

# C-EDF errors: the start or end of an inversion
class Error(object):
    id = 0
    def __init__(self, job, off_cpu, on_cpu,first_event_this_timestamp,pi_type):
        Error.id += 1
        self.error_type = "inversion"
        self.id = Error.id
        self.job = copy.copy(job)
        self.off_cpu = copy.copy(off_cpu)
        self.on_cpu = copy.copy(on_cpu)
        self.record_type = 'error'
        self.triggering_event_id = first_event_this_timestamp
        if job.inversion_end is None:
            self.type_name = pi_type+'_inversion_start'
            job.inversion_start_id = self.id
            job.inversion_start_triggering_event_id = self.triggering_event_id
        else:
            self.type_name = pi_type+'_inversion_end'
            self.inversion_start_id = job.inversion_start_id
            self.inversion_start_triggering_event_id = job.inversion_start_triggering_event_id


# C-EDF errors: the start or end of a simultaneous execution of owner and tasklet(work_item)
#class Error_simultaneous(object):
#    id = 0
#    def __init__(self, job, c, first_event_this_timestamp,irq_type):
#        Error_simultaneous.id += 1
#        self.error_type = "simultaneous"
#        self.id = Error_simultaneous.id
#        self.job = copy.copy(job)
#        self.cluster = c
#        self.record_type = 'error'
#        self.triggering_event_id = first_event_this_timestamp
#        if job.simultaneous_end is None:
#            self.type_name = irq_type+'_simultaneous_start'
#            job.simultaneous_start_id = self.id
#            job.simultaneous_start_triggering_event_id = self.triggering_event_id
#        else:
#            self.type_name = irq_type+'_simultaneous_end'
#            self.simultaneous_start_id = job.simultaneous_start_id
#            self.simultaneous_start_triggering_event_id = job.simultaneous_start_triggering_event_id

#insert into cluster map
def _insert_cluster_map(job, cluster_map):
    for x in cluster_map:
        if x.pid == job.pid:
            if x.cluster != job.cluster:
                sys.stderr.write( "Cluster for Job %d has been changed" % (job.pid))
                x.cluster = job.cluster
            return
    cluster_map.append(job)
    
#Get correct cluster
def _get_cluster(record, cluster_map):
    for job in cluster_map:
        if job.pid == record.pid:
            return job.cluster
    return -1  #(-1: not assigned yet)
    
# Returns the position of a Job in a list, or None
def _find_job(record,list):
    for i in range(0,len(list)):
        if list[i].pid == record.pid and list[i].job == record.job:
            return i
    return None

#Identify klit thread
def _is_klit_thread(job, tasklet_on_cpu, work_on_cpu):
    for x in tasklet_on_cpu:
        if x.exe_pid is not None and x.exe_pid == job.pid:
            return True
    for x in work_on_cpu:
        if x.exe_pid is not None and x.exe_pid == job.pid:
            return True 
    return False

# Returns the position of a exe task for tasklet ot work item in a list, or None
def _find_irq_carrier(job,list):
    for i in range(0,len(list)):
        if list[i].pid == job.exe_pid:
            return i
    return None

# Returns the position of a inheritanced Job in a list, or None
#def _find_inh_job(record,list):
#    for i in range(0,len(list)):
#        if list[i].pid == record.inh_pid:
#            return i
#    return None

# Return True if owner in  off_cpu or on_cpu
def _is_owner(record,off_cpu,on_cpu):
    pos = _find_job(record,off_cpu)
    if pos is not None:
        return True
    pos = _find_job(record,on_cpu)
    if pos is not None:
        return True
    else:
        return False 

# Return records for any inversion_starts and inversion_ends
def _cedf_check(c,
    off_cpu,on_cpu,
    tasklet_on_cpu, work_on_cpu,
    when,csize,
    first_event_this_timestamp):

    # List of error records to be returned
    errors = []

    # List of all jobs without klitithread that are contending for the CPU   
    # (neither complete nor blocked)
    all = []
    for x in on_cpu:
        if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
            if x.is_complete is not True and x.is_blocked is not True: 
                all.append(x)
    for x in off_cpu:
        if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
            if x.is_blocked is not True:
                all.append(x)

    # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable.
    # Thus, this gives us jobs ordered by deadline with preference to those
    # actually running.
    all.sort(key=lambda x: 0 if (x in on_cpu) else 1)
    all.sort(key=lambda x: x.deadline)

    # Check those that actually should be running, to look for priority
    # inversions
    for x in range(0,min(csize,len(all))):
        job = all[x]

        # It's not running and an inversion_start has not been recorded
        if job not in on_cpu and job.inversion_start is None:
            job.inversion_start = when
            errors.append(Error(job, off_cpu, on_cpu,
                first_event_this_timestamp,"Task"))

        # It is running and an inversion_start exists (i.e. it it still
        # marked as being inverted)
        elif job in on_cpu and job.inversion_start is not None:
            job.inversion_end = when
            errors.append(Error(job, off_cpu, on_cpu,
                first_event_this_timestamp,"Task"))
            job.inversion_start = None
            job.inversion_end = None

    # Check those that actually should not be running, to record the end of any
    # priority inversions
    for x in range(csize,len(all)):
        job = all[x]
        if job not in on_cpu and job.inversion_start is not None:
            job.inversion_end = when
            errors.append(Error(job, off_cpu, on_cpu,
                first_event_this_timestamp,"Task"))
            job.inversion_start = None
            job.inversion_end = None
            
    #should take care of those tasks are blocked or complete either in on_cpu or off_cpu
    for x in on_cpu:
        if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
            if x.is_blocked is True or x.is_complete is True:
                if x.inversion_start is not None:
                    x.inversion_end = when
                    errors.append(Error(x, off_cpu, on_cpu,
                        first_event_this_timestamp,"Task"))
                    x.inversion_start = None
                    x.inversion_end = None
    
    for x in off_cpu:
        if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
            if x.is_blocked is True:
                if x.inversion_start is not None:
                    x.inversion_end = when
                    errors.append(Error(x, off_cpu, on_cpu,
                        first_event_this_timestamp,"Task"))
                    x.inversion_start = None
                    x.inversion_end = None        
    return errors
    

# Return records for any inversion_starts and inversion_ends
def _cedf_check_irq(
    off_cpu,on_cpu,
    tasklet_on_cpu, work_on_cpu,
	irq_off_cpu,irq_on_cpu,
    when,csize,
    first_event_this_timestamp, irq_type):

    # List of error records to be returned
    errors = []
        
    #Look for all irq is irq_off_cpu (not assign klitithread yet)
    for irq in irq_off_cpu:     
        # List of all jobs without klitithread that are contending for the CPU 
        # (should not count in complete but should count in blocked tasks)
        all = []
        for x in on_cpu:
            if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
                if x.is_complete is not True:# and x.is_blocked is not True: 
                    all.append(x)
        for x in off_cpu:
            if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
                #if x.is_blocked is not True:
                all.append(x)
        # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable.
        # Thus, this gives us jobs ordered by deadline with preference to those
        # actually running.
        all.sort(key=lambda x: 0 if (x in on_cpu) else 1)
        all.sort(key=lambda x: x.deadline)
        
        pos = _find_job(irq,all)
        _on = _find_job(irq,on_cpu)
        # owner task is completed (weird!!!)
        if pos is None and irq.inversion_start is not None:
            irq.inversion_end = when
            errors.append(Error(irq, off_cpu, on_cpu,
                first_event_this_timestamp,irq_type))
            irq.inversion_start = None
            irq.inversion_end = None
        elif pos is not None:
            # look for priority of owner task
            # owner task is  m-priority task in the cluster
            if pos in range(0,min(csize,len(all))):
                # owner task is not running
                if _on is None:
                    if irq.inversion_start is None:
                        irq.inversion_start = when
                        errors.append(Error(irq, off_cpu, on_cpu,
                            first_event_this_timestamp,irq_type))
                else:
                    # here is very weird situation becasue owner must be blocked
                    if irq.inversion_start is not None:     
                        irq.inversion_end = when
                        errors.append(Error(irq, off_cpu, on_cpu,
                            first_event_this_timestamp,irq_type))
                        irq.inversion_start = None
                        irq.inversion_end = None
            # owner task is not m-priority task in the cluster
            elif pos in range(csize,len(all)):
                if irq.inversion_start is not None:
                    irq.inversion_end = when
                    errors.append(Error(irq, off_cpu, on_cpu,
                        first_event_this_timestamp,irq_type))
                    irq.inversion_start = None
                    irq.inversion_end = None

    # have klitirqd take care of (already work_begin or tasklet_begin and assinged a klitthread)
    for irq in irq_on_cpu:
        # List of all jobs without klitithread that are contending for the CPU 
        # (should not count in complete but should count in blocked tasks)
        all = []
        for x in on_cpu:
            if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
                if x.is_complete is not True:# and x.is_blocked is not True: 
                    all.append(x)
        for x in off_cpu:
            if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True:
                #if x.is_blocked is not True:
                all.append(x)
        # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable.
        # Thus, this gives us jobs ordered by deadline with preference to those
        # actually running.
        all.sort(key=lambda x: 0 if (x in on_cpu) else 1)
        all.sort(key=lambda x: x.deadline)
        
        pos = _find_job(irq,all)
        _on = _find_job(irq,on_cpu)
        # owner task is completed (weird!!!)
        if pos is None and irq.inversion_start is not None:
            irq.inversion_end = when
            errors.append(Error(irq, off_cpu, on_cpu,
                first_event_this_timestamp,irq_type))
            irq.inversion_start = None
            irq.inversion_end = None
        elif pos is not None:
            # look for priorit of owner task
            # owner task is  m-priority task in the cluster
            if pos in range(0,min(csize,len(all))):
                # owner task is not running
                if _on is None:                        
                    klit_pos = _find_irq_carrier(irq,on_cpu)
                    # if the klitirqd task is running and is not blocked
                    if klit_pos is not None and on_cpu[klit_pos].is_blocked is False:
                        if irq.inversion_start is not None: 
                            irq.inversion_end = when
                            errors.append(Error(irq, off_cpu, on_cpu,
                                first_event_this_timestamp,irq_type))
                            irq.inversion_start = None
                            irq.inversion_end = None
                    #if the klitirqd task is not running or blocked
                    else:
                        if irq.inversion_start is None:
                            irq.inversion_start = when
                            errors.append(Error(irq, off_cpu, on_cpu,
                                first_event_this_timestamp,irq_type))                          
                else:
                    # here is very weird situation becasue owner must be blocked
                    if irq.inversion_start is not None:     
                        irq.inversion_end = when
                        errors.append(Error(irq, off_cpu, on_cpu,
                            first_event_this_timestamp,irq_type))
                        irq.inversion_start = None
                        irq.inversion_end = None
                        
            # owner task is not m-priority task in the cluster
            elif pos in range(csize,len(all)):
                if irq.inversion_start is not None:
                    irq.inversion_end = when
                    errors.append(Error(irq, off_cpu, on_cpu,
                        first_event_this_timestamp,irq_type))
                    irq.inversion_start = None
                    irq.inversion_end = None                               
    return errors


# Return records for any inversion_starts and inversion_ends
#def _cedf_check_work_simultaneous(off_cpu,on_cpu,
#	work_off_cpu,work_on_cpu,
#    when,csize,first_event_this_timestamp):
#
#    # List of error records to be returned
#    errors = []
#        
#    #Look for all work is work_off_cpu (not running)
#    for work in work_off_cpu:
#        # find owner's cpu
#        owner_cpu = _get_cpu(work,off_cpu,on_cpu,"Work_Item")  
#        if owner_cpu is None:
#            work_off_cpu.remove(work)
#            continue  
#                
#        if work.simultaneous_start is not None:
#            work.simultaneous_end = when
#            errors.append(Error_simultaneous(tasklet, int(owner_cpu/csize), 
#                first_event_this_timestamp,"Work_Item"))
#            work.simultaneous_start = None
#            work.simultaneous_end = None  
#
#    # have klitirqd take care of
#    for work in work_on_cpu:
#        # find owner's cpu
#        owner_cpu = _get_cpu(work,off_cpu,on_cpu,"Work_Item")
#        if owner_cpu is None:
#            work_on_cpu.remove(work)
#            continue 
#        klit_pos = _find_work_carrier(work,on_cpu)
#        # if the klitirqd task is running and is not blocked
#        if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: 
#            _on = _find_job(work,on_cpu)
#            # owner task is running
#            if _on is not None and on_cpu[_on].is_complete is not True and on_cpu[_on].is_blocked is not True:
#                if work.simultaneous_start is None:
#                    work.simultaneous_start = when
#                    errors.append(Error_simultaneous(work,int(owner_cpu/csize),
#                        first_event_this_timestamp,"Work_Item")) 
#                    continue		
#
#        if work.simultaneous_start is not None:
#            work.simultaneous_end = when
#            errors.append(Error_simultaneous(work, int(owner_cpu/csize), 
#                first_event_this_timestamp,"Work_Item"))
#            work.simultaneous_start = None
#            work.simultaneous_end = None		
#        
#                               
#    return errors


# Return records for any inversion_starts and inversion_ends
#def _cedf_check_tasklet_simultaneous(off_cpu,on_cpu,
#	tasklet_off_cpu, tasklet_on_cpu, 
#	when,csize,first_event_this_timestamp):
#
#    # List of error records to be returned
#    errors = []
#
#	#Look for all tasklet is tasklet_off_cpu (not running)
#    for tasklet in tasklet_off_cpu:
#        # find owner's cpu
#        owner_cpu = _get_cpu(tasklet,off_cpu,on_cpu,"Tasklet") 
#        if owner_cpu is None:
#            tasklet_off_cpu.remove(tasklet)
#            continue
#        
#        if tasklet.simultaneous_start is not None:
#            tasklet.simultaneous_end = when
#            errors.append(Error_simultaneous(tasklet, int(owner_cpu/csize),
#                first_event_this_timestamp,"Tasklet"))
#            tasklet.simultaneous_start = None
#            tasklet.simultaneous_end = None              
#        
#                     
#    #Look for all tasklet is tasklet_on_cpu (running)
#    for tasklet in tasklet_on_cpu:
#        # find owner's cpu
#        owner_cpu = _get_cpu(tasklet,off_cpu,on_cpu,"Tasklet") 
#        if owner_cpu is None:
#            tasklet_on_cpu.remove(tasklet)
#            continue
#        _on = _find_job(tasklet,on_cpu)
#        # owner task is running
#        if _on is not None and on_cpu[_on].is_complete is not True and on_cpu[_on].is_blocked is not True:
#            if tasklet.simultaneous_start is None:
#                tasklet.simultaneous_start = when
#                errors.append(Error_simultaneous(tasklet,int(owner_cpu/csize),
#                    first_event_this_timestamp,"Tasklet")) 
#        else:
#            if tasklet.simultaneous_start is not None:
#                tasklet.simultaneous_end = when
#                errors.append(Error_simultaneous(tasklet, int(owner_cpu/csize), 
#                    first_event_this_timestamp,"Tasklet"))
#                tasklet.simultaneous_start = None
#                tasklet.simultaneous_end = None 
#		
#    return errors