############################################################################### # Description ############################################################################### # C-EDF Test ############################################################################### # Imports ############################################################################### import time import copy import sys ############################################################################### # Public Functions ############################################################################### def cedf_test(stream,csize,number): m = 12 # CPUs fixed it for some weird bug # System model on_cpu = [] # Tasks on CPU off_cpu = [] # Tasks not on CPU tasklet_off_cpu = [] # Tasklets on CPU tasklet_on_cpu = [] # Tasklets on CPU work_off_cpu = [] # work item on CPU work_on_cpu = [] # work item on CPU cluster_map = [] #for record in stream: # if record.record_type == "meta" and record.type_name == "num_cpus": # m = record.num_cpus # break job_2s_released = [] # list of tasks which have released their job 2s # Time of the last record we saw. Only run the G-EDF test when the time # is updated. last_time = None #Count Time start_time = 0 count = 0 # First event for the latest timestamp. This is used to match up # inversion starts and ends with the first event from the previous # timestamp, which is the first event that could have triggered # the inversion start or end. first_event_this_timestamp = 0 for record in stream: if record.record_type != "event": #yield record # unless for analysis continue # All records with job < 2 are garbage if record.job < 2: continue # All records with pid < 1 are garbage if record.pid < 1: continue # All records with cpu > 11 are garbage if record.cpu > 11 or record.cpu <0: continue # All records with when ==0 are garbage if record.when == 0: continue #yield record #continue # Bookkeeping iff the timestamp has moved forward. # Check for inversion starts and ends and yield them. # (It is common to have records with simultaneous timestamps, # so we only check when the time has moved forward) # Also, need to update the first_event_this_timestamp variable if last_time is not None and last_time != record.when: # check PI for tasklet errors = _cedf_check_irq_inversion( off_cpu,on_cpu, tasklet_on_cpu,work_on_cpu, tasklet_off_cpu,tasklet_on_cpu, last_time,csize, first_event_this_timestamp, "Tasklet") for error in errors: yield error # check Simultaneous for tasklet errors = _cedf_check_irq_simultaneous( off_cpu,on_cpu, tasklet_on_cpu,work_on_cpu, tasklet_off_cpu,tasklet_on_cpu, last_time,csize, first_event_this_timestamp, "Tasklet") for error in errors: yield error # check PI for work item errors = _cedf_check_irq_inversion( off_cpu,on_cpu, tasklet_on_cpu,work_on_cpu, work_off_cpu,work_on_cpu, last_time,csize, first_event_this_timestamp, "Work_Item") for error in errors: yield error # check Simultaneous for work item errors = _cedf_check_irq_simultaneous( off_cpu,on_cpu, tasklet_on_cpu,work_on_cpu, work_off_cpu,work_on_cpu, last_time,csize, first_event_this_timestamp, "Work_Item") for error in errors: yield error # check PI for task for c in range(0,int(m/csize)): errors = _cedf_check(c, off_cpu,on_cpu, tasklet_on_cpu,work_on_cpu, last_time,csize, first_event_this_timestamp) for error in errors: yield error first_event_this_timestamp = record.id # Add a newly-released Job to the off_cpu queue if record.type_name == 'release': # Some records with job == 2 are garbage if record.job==2: # There is a duplicate release of every job 2 # This will throw away the second one if record.pid in job_2s_released: continue else: job_2s_released.append(record.pid) off_cpu.append(Job(record)) # Not put into job initiation pos = _find_job(record,off_cpu) off_cpu[pos].cluster = _get_cluster(record, cluster_map) off_cpu[pos].deadline = record.deadline #off_cpu[pos].inh_deadline = record.deadline # Move a Job from the off_cpu queue to on_cpu elif record.type_name == 'switch_to': pos = _find_job(record,off_cpu) if pos is None: msg = "Event %d tried to switch to a job %d that was not on the" msg += " off_cpu queue\n" msg = msg % (record.id, record.pid) print msg #sys.stderr.write(msg) yield record #exit() continue job = off_cpu[pos] job.cluster = int(record.cpu/csize) _insert_cluster_map(job,cluster_map) del off_cpu[pos] on_cpu.append(job) # Mark a Job as completed. # The only time a Job completes when it is not on a # CPU is when it is the last job of the task. elif record.type_name == 'completion': pos = _find_job(record,on_cpu) if pos is not None: on_cpu[pos].is_complete = True else: pos = _find_job(record,off_cpu) if pos is None: msg = ("Event %d tried to complete a job %d" + " that was not running\n") msg = msg % (record.id,record.pid) print msg #sys.stderr.write(msg) yield record #exit() continue if off_cpu[pos].inversion_start is not None: off_cpu[pos].inversion_end = record.when yield Error(off_cpu[pos], off_cpu, on_cpu, first_event_this_timestamp,"Task") off_cpu[pos].inversion_start = None off_cpu[pos].inversion_end = None del off_cpu[pos] # A job is switched away from a CPU. If it has # been marked as complete, remove it from the model. elif record.type_name == 'switch_away': pos = _find_job(record,on_cpu) if pos is None: # By default, the switch_away for a job (after it has completed) # is maked as being for job+1, which has never been switched to. # We can correct this if we note which jobs really # have been switched to. record.job -= 1 pos = _find_job(record,on_cpu) if pos is None: msg = ("Event %d tried to switch away a job %d" + " that was not running\n") msg = msg % (record.id, record.pid) print msg #sys.stderr.write(msg) #exit() yield record continue job = on_cpu[pos] del on_cpu[pos] if job.is_complete == False: off_cpu.append(job) # A job has been blocked. elif record.type_name == 'block': pos = _find_job(record,on_cpu) # What if the job is blocked AFTER being switched away? # This is a bug in some versions of LITMUS. if pos is None: pos = _find_job(record,off_cpu) if pos is None: msg = ("Event %d tried to block a job %d" + " that was not running\n") msg = msg % (record.id,record.pid) print msg #sys.stderr.write(msg) yield record #exit() continue off_cpu[pos].is_blocked =True else: on_cpu[pos].is_blocked =True # A job is resumed elif record.type_name == 'resume': # Job 2 has a resume that is garbage if record.job==2: continue pos = _find_job(record,off_cpu) if pos is None: msg = ("Event %d tried to block a job %d" + " that was not running\n") msg = msg % (record.id,record.pid) print msg #sys.stderr.write(msg) yield record #exit() continue off_cpu[pos].is_blocked = False # Add a newly-released Takslet to the tasklet_off_cpu queue elif record.type_name == 'tasklet_release': #Qualified klit-ed tasklet if _is_owner(record,off_cpu,on_cpu) is True: pos = _find_job(record,off_cpu) if pos is not None: job = off_cpu[pos] else: #weird pos = _find_job(record,on_cpu) job = on_cpu[pos] tasklet_off_cpu.append(Job(record)) # Not put into job initiation pos = _find_job(record,tasklet_off_cpu) tasklet_off_cpu[pos].cluster = job.cluster tasklet_off_cpu[pos].deadline = job.deadline tasklet_off_cpu[pos].inversion_start = None tasklet_off_cpu[pos].inversion_end = None tasklet_off_cpu[pos].inversion_start_id = None tasklet_off_cpu[pos].inversion_start_triggering_event_id = None # Add a newly-released Work item to the work_off_cpu queue elif record.type_name == 'work_release': #Qualified klit-ed work item if _is_owner(record,off_cpu,on_cpu) is True: pos = _find_job(record,off_cpu) if pos is not None: job = off_cpu[pos] else: #weird pos = _find_job(record,on_cpu) job = on_cpu[pos] work_off_cpu.append(Job(record)) # Not put into job initiation pos = _find_job(record,work_off_cpu) work_off_cpu[pos].cluster = job.cluster work_off_cpu[pos].deadline = job.deadline work_off_cpu[pos].inversion_start = None work_off_cpu[pos].inversion_end = None work_off_cpu[pos].inversion_start_id = None work_off_cpu[pos].inversion_start_triggering_event_id = None # Move a Takslet from the tasklet_off_cpu queue to tasklet_on_cpu elif record.type_name == 'tasklet_begin': pos = _find_job(record,tasklet_off_cpu) if pos is None: msg = "Event %d tried to begin to a tasklet for job %d that was not on the" msg += " tasklet_off_cpu queue\n" msg = msg % (record.id, record.pid) print msg #sys.stderr.write(msg) yield record #exit() #Continue see if anything wrong continue job = tasklet_off_cpu[pos] job.exe_pid = record.exe_pid del tasklet_off_cpu[pos] tasklet_on_cpu.append(job) # Register a klitirqd threadto a Work item in the work_off_cpu elif record.type_name == 'work_begin': pos = _find_job(record,work_off_cpu) if pos is None: msg = "Event %d tried to begin to a work item for job %d that was not on the" msg += " work_off_cpu queue\n" msg = msg % (record.id, record.pid) print msg #sys.stderr.write(msg) yield record #exit() #Continue see if anything wrong continue job = work_off_cpu[pos] job.exe_pid = record.exe_pid del work_off_cpu[pos] work_on_cpu.append(job) # A Takslet is end from a CPU. elif record.type_name == 'tasklet_end': pos = _find_job(record,tasklet_on_cpu) if pos is None: msg = ("Event %d tried to end a tasklet for job %d" + " that was not running\n") msg = msg % (record.id, record.pid) print msg yield record #sys.stderr.write(msg) pos = _find_job(record,tasklet_off_cpu) if pos is not None: del tasklet_off_cpu[pos] #exit() #Continue see if anything wrong else: if tasklet_on_cpu[pos].inversion_start is not None: tasklet_on_cpu[pos].inversion_end = record.when yield Error(tasklet_on_cpu[pos], off_cpu, on_cpu, first_event_this_timestamp,"Tasklet") tasklet_on_cpu[pos].inversion_start = None tasklet_on_cpu[pos].inversion_end = None del tasklet_on_cpu[pos] # A Work item is end from a CPU. elif record.type_name == 'work_end': pos = _find_job(record,work_on_cpu) if pos is None: msg = ("Event %d tried to end a work for job %d" + " that was not running\n") msg = msg % (record.id, record.pid) print msg yield record #sys.stderr.write(msg) pos = _find_job(record,work_off_cpu) if pos is not None: del work_off_cpu[pos] #exit() #Continue see if anything wrong else: if work_on_cpu[pos].inversion_start is not None: work_on_cpu[pos].inversion_end = record.when yield Error(work_on_cpu[pos], off_cpu, on_cpu, first_event_this_timestamp,"Work_Item") work_on_cpu[pos].inversion_start = None work_on_cpu[pos].inversion_end = None del work_on_cpu[pos] # a Task has Priority inheritance elif record.type_name == 'eff_prio_change': inh_pos = _find_inh_job(record,off_cpu) if inh_pos is None: inh_pos = _find_inh_job(record,on_cpu) if inh_pos is not None: inh_job = on_cpu[inh_pos] else: inh_job = off_cpu[inh_pos] pos = _find_job(record,on_cpu) if pos is None: pos = _find_job(record,off_cpu) if pos is None: msg = ("Event %d tried to change a jobs %d priority " + " that cannot found\n") msg = msg % (record.id,record.pid) print msg #sys.stderr.write(msg) yield record #exit() #Continue see if anything wrong continue if inh_pos is not None: off_cpu[pos].base_deadline = off_cpu[pos].deadline off_cpu[pos].deadline = inh_job.deadline else: off_cpu[pos].deadline = off_cpu[pos].base_deadline else: if inh_pos is not None: on_cpu[pos].base_deadline = on_cpu[pos].deadline on_cpu[pos].deadline = inh_job.deadline else: on_cpu[pos].deadline = on_cpu[pos].base_deadline else: continue # illegal event last_time = record.when if (record.id-count) > 25000: sys.stderr.write(("Parsed the %d event:\t") % (count)) #sys.stderr.write(("(%d,%d,%d,%d,%d,%d)\n") # % (len(on_cpu),len(off_cpu),len(tasklet_on_cpu),len(tasklet_off_cpu),len(work_on_cpu),len(work_off_cpu))) sys.stderr.write(("Time elapsed in last 25000: %ds\n\n") % (time.time() - start_time)) start_time = time.time() count = record.id #if processing count reach number(maximum) start to process if record.id > number: break yield record ############################################################################### # Private Functions ############################################################################### # Internal representation of a Job class Job(object): def __init__(self, record): self.pid = record.pid self.job = record.job self.cpu = record.cpu # for target cpu (-1: not assigned yet) self.cluster = -1 # for target cluster (-1: not assigned yet) self.deadline = None self.base_deadline = None self.exe_pid = -1 # handleing klitthread (-1: not assigned yet) self.is_complete = False self.is_blocked = False self.inversion_start = None self.inversion_end = None self.inversion_start_id = None self.inversion_start_triggering_event_id = None self.simultaneous_start = None self.simultaneous_end = None self.simultaneous_start_id = None self.simultaneous_start_triggering_event_id = None def __str__(self): return "(%d.%d:%d)" % (self.pid,self.job,self.deadline) # C-EDF errors: the start or end of an inversion class Error(object): id = 0 def __init__(self, job, off_cpu, on_cpu,first_event_this_timestamp,pi_type): Error.id += 1 self.error_type = "inversion" self.id = Error.id self.job = copy.copy(job) self.off_cpu = copy.copy(off_cpu) self.on_cpu = copy.copy(on_cpu) self.record_type = 'error' self.triggering_event_id = first_event_this_timestamp if job.inversion_end is None: self.type_name = pi_type+'_inversion_start' job.inversion_start_id = self.id job.inversion_start_triggering_event_id = self.triggering_event_id else: self.type_name = pi_type+'_inversion_end' self.inversion_start_id = job.inversion_start_id self.inversion_start_triggering_event_id = job.inversion_start_triggering_event_id # C-EDF errors: the start or end of a simultaneous execution of owner and tasklet(work_item) class Error_simultaneous(object): id = 0 def __init__(self, job, off_cpu, on_cpu,first_event_this_timestamp,irq_type): Error_simultaneous.id += 1 self.error_type = "simultaneous" self.id = Error_simultaneous.id self.job = copy.copy(job) self.off_cpu = copy.copy(off_cpu) self.on_cpu = copy.copy(on_cpu) self.record_type = 'error' self.triggering_event_id = first_event_this_timestamp if job.simultaneous_end is None: self.type_name = irq_type+'_simultaneous_start' job.simultaneous_start_id = self.id job.simultaneous_start_triggering_event_id = self.triggering_event_id else: self.type_name = irq_type+'_simultaneous_end' self.simultaneous_start_id = job.simultaneous_start_id self.simultaneous_start_triggering_event_id = job.simultaneous_start_triggering_event_id #insert into cluster map def _insert_cluster_map(job, cluster_map): for x in cluster_map: if x.pid == job.pid: if x.cluster != job.cluster: sys.stderr.write( "Cluster for Job %d has been changed" % (job.pid)) x.cluster = job.cluster return cluster_map.append(job) #Get correct cluster def _get_cluster(record, cluster_map): for job in cluster_map: if job.pid == record.pid: return job.cluster return -1 #(-1: not assigned yet) # Returns the position of a Job in a list, or None def _find_job(record,list): for i in range(0,len(list)): if list[i].pid == record.pid and list[i].job == record.job: return i return None #Identify klit thread def _is_klit_thread(job, tasklet_on_cpu, work_on_cpu): for x in tasklet_on_cpu: if x.exe_pid is not None and x.exe_pid == job.pid: return True for x in work_on_cpu: if x.exe_pid is not None and x.exe_pid == job.pid: return True return False # Returns the position of a exe task for tasklet ot work item in a list, or None def _find_irq_carrier(job,list): for i in range(0,len(list)): if list[i].pid == job.exe_pid: return i return None # Returns the position of a inheritanced Job in a list, or None def _find_inh_job(record,list): for i in range(0,len(list)): if list[i].pid == record.inh_pid: return i return None # Return True if owner in off_cpu or on_cpu def _is_owner(record,off_cpu,on_cpu): pos = _find_job(record,off_cpu) if pos is not None: return True pos = _find_job(record,on_cpu) if pos is not None: return True else: return False # Return records for any inversion_starts and inversion_ends def _cedf_check(c, off_cpu,on_cpu, tasklet_on_cpu, work_on_cpu, when,csize, first_event_this_timestamp): # List of error records to be returned errors = [] # List of all jobs without klitithread that are contending for the CPU # (neither complete nor blocked) all = [] for x in on_cpu: if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: if x.is_complete is not True and x.is_blocked is not True: all.append(x) for x in off_cpu: if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: if x.is_blocked is not True: all.append(x) # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable. # Thus, this gives us jobs ordered by deadline with preference to those # actually running. all.sort(key=lambda x: 0 if (x in on_cpu) else 1) all.sort(key=lambda x: x.deadline) # Check those that actually should be running, to look for priority # inversions for x in range(0,min(csize,len(all))): job = all[x] # It's not running and an inversion_start has not been recorded if job not in on_cpu and job.inversion_start is None: job.inversion_start = when errors.append(Error(job, off_cpu, on_cpu, first_event_this_timestamp,"Task")) # It is running and an inversion_start exists (i.e. it it still # marked as being inverted) elif job in on_cpu and job.inversion_start is not None: job.inversion_end = when errors.append(Error(job, off_cpu, on_cpu, first_event_this_timestamp,"Task")) job.inversion_start = None job.inversion_end = None # Check those that actually should not be running, to record the end of any # priority inversions for x in range(csize,len(all)): job = all[x] if job not in on_cpu and job.inversion_start is not None: job.inversion_end = when errors.append(Error(job, off_cpu, on_cpu, first_event_this_timestamp,"Task")) job.inversion_start = None job.inversion_end = None #should take care of those tasks are blocked or complete either in on_cpu or off_cpu for x in on_cpu: if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: if x.is_blocked is True or x.is_complete is True: if x.inversion_start is not None: x.inversion_end = when errors.append(Error(x, off_cpu, on_cpu, first_event_this_timestamp,"Task")) x.inversion_start = None x.inversion_end = None for x in off_cpu: if x.cluster == c and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: if x.is_blocked is True: if x.inversion_start is not None: x.inversion_end = when errors.append(Error(x, off_cpu, on_cpu, first_event_this_timestamp,"Task")) x.inversion_start = None x.inversion_end = None return errors # Return records for any inversion_start and inversion_end def _cedf_check_irq_inversion( off_cpu,on_cpu, tasklet_on_cpu, work_on_cpu, irq_off_cpu,irq_on_cpu, when,csize, first_event_this_timestamp, irq_type): # List of error records to be returned errors = [] #Look for all irq is irq_off_cpu (not assign klitithread yet) for irq in irq_off_cpu: # List of all jobs without klitithread that are contending for the CPU # (should not count in complete but should count in blocked tasks) all = [] for x in on_cpu: if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: if x.is_complete is not True:# and x.is_blocked is not True: all.append(x) for x in off_cpu: if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: #if x.is_blocked is not True: all.append(x) # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable. # Thus, this gives us jobs ordered by deadline with preference to those # actually running. all.sort(key=lambda x: 0 if (x in on_cpu) else 1) all.sort(key=lambda x: x.deadline) pos = _find_job(irq,all) _on = _find_job(irq,on_cpu) # owner task is completed (weird!!!) if pos is None and irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None elif pos is not None: # look for priority of owner task # owner task is m-priority task in the cluster if pos in range(0,min(csize,len(all))): # owner task is not running if _on is None: if irq.inversion_start is None: irq.inversion_start = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) else: # here is very weird situation becasue owner must be blocked if irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None # owner task is not m-priority task in the cluster elif pos in range(csize,len(all)): if irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None # have klitirqd take care of (already work_begin or tasklet_begin and assinged a klitthread) for irq in irq_on_cpu: # List of all jobs without klitithread that are contending for the CPU # (should not count in complete but should count in blocked tasks) all = [] for x in on_cpu: if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: if x.is_complete is not True:# and x.is_blocked is not True: all.append(x) for x in off_cpu: if x.cluster == irq.cluster and _is_klit_thread(x, tasklet_on_cpu, work_on_cpu) is not True: #if x.is_blocked is not True: all.append(x) # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable. # Thus, this gives us jobs ordered by deadline with preference to those # actually running. all.sort(key=lambda x: 0 if (x in on_cpu) else 1) all.sort(key=lambda x: x.deadline) pos = _find_job(irq,all) _on = _find_job(irq,on_cpu) klit_pos = _find_irq_carrier(irq,on_cpu) # owner task is completed (weird!!!) if pos is None : if irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None elif pos is not None: # look for priorit of owner task # owner task is m-priority task in the cluster if pos in range(0,min(csize,len(all))): # owner task is not running if _on is None: # if the klitirqd task is running and is not blocked if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: if irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None #if the klitirqd task is not running or blocked (Or in noirqd case) else: if irq.inversion_start is None: irq.inversion_start = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) else: # here is very weird situation becasue owner must be blocked if irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None # owner task is not m-priority task in the cluster elif pos in range(csize,len(all)): if _on is None: # if the klitirqd task is running and is not blocked if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: if irq.inversion_start is None: irq.inversion_start = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) #if the klitirqd task is not running or blocked (Or in noirqd case) else: if irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None else: # here is very weird situation becasue owner must be blocked # don't count into tasklet inversion but task inversion if irq.inversion_start is not None: irq.inversion_end = when errors.append(Error(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.inversion_start = None irq.inversion_end = None return errors # Return records for any simultaneous_start and simultaneous_end def _cedf_check_irq_simultaneous( off_cpu,on_cpu, tasklet_on_cpu, work_on_cpu, irq_off_cpu,irq_on_cpu, when,csize, first_event_this_timestamp, irq_type): # List of error records to be returned errors = [] #Look for all irq is irq_off_cpu (not assign klitithread yet) for irq in irq_off_cpu: if irq.simultaneous_start is not None: irq.simultaneous_end = when errors.append(Error_simultaneous(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.simultaneous_start = None irq.simultaneous_end = None # have klitirqd take care of (already work_begin or tasklet_begin and assinged a klitthread) for irq in irq_on_cpu: _on = _find_job(irq,on_cpu) klit_pos = _find_irq_carrier(irq,on_cpu) if _on is None: if irq.simultaneous_start is not None: irq.simultaneous_end = when errors.append(Error_simultaneous(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) irq.simultaneous_start = None irq.simultaneous_end = None else: # here is very weird situation becasue owner must be blocked if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: if irq.simultaneous_start is None: irq.simultaneous_start = when errors.append(Error_simultaneous(irq, off_cpu, on_cpu, first_event_this_timestamp,irq_type)) return errors