diff options
| -rwxr-xr-x | unit-trace | 6 | ||||
| -rw-r--r-- | unit_trace/cedf_test.py | 386 | ||||
| -rw-r--r-- | unit_trace/stdout_printer.py | 83 |
3 files changed, 244 insertions, 231 deletions
| @@ -111,7 +111,7 @@ if options.progress is True: | |||
| 111 | # stream = gedf_test.gedf_test(stream) | 111 | # stream = gedf_test.gedf_test(stream) |
| 112 | 112 | ||
| 113 | # Produce C-EDF error records | 113 | # Produce C-EDF error records |
| 114 | if options.cedf is True: | 114 | if options.cedf is True and options.maxnum > 0: |
| 115 | from unit_trace import cedf_test | 115 | from unit_trace import cedf_test |
| 116 | stream = cedf_test.cedf_test(stream,options.csize,options.maxnum) | 116 | stream = cedf_test.cedf_test(stream,options.csize,options.maxnum) |
| 117 | 117 | ||
| @@ -165,10 +165,10 @@ if options.dist is True: | |||
| 165 | #cedf_stat_printer.cedf_stat_printer(stream9,options.cdf_unit,False,1,"Work_Item_inversion",options.out_file) | 165 | #cedf_stat_printer.cedf_stat_printer(stream9,options.cdf_unit,False,1,"Work_Item_inversion",options.out_file) |
| 166 | 166 | ||
| 167 | 167 | ||
| 168 | #cedf_stat_printer.cedf_stat_printer(stream5,options.cdf_unit,False,0,"Tasklet_simultaneous",options.out_file) | 168 | cedf_stat_printer.cedf_stat_printer(stream5,options.cdf_unit,False,0,"Tasklet_simultaneous",options.out_file) |
| 169 | #cedf_stat_printer.cedf_stat_printer(stream12,options.cdf_unit,False,1,"Tasklet_simultaneous",options.out_file) | 169 | #cedf_stat_printer.cedf_stat_printer(stream12,options.cdf_unit,False,1,"Tasklet_simultaneous",options.out_file) |
| 170 | 170 | ||
| 171 | #cedf_stat_printer.cedf_stat_printer(stream6,options.cdf_unit,False,0,"Work_Item_simultaneous",options.out_file) | 171 | cedf_stat_printer.cedf_stat_printer(stream6,options.cdf_unit,False,0,"Work_Item_simultaneous",options.out_file) |
| 172 | #cedf_stat_printer.cedf_stat_printer(stream15,options.cdf_unit,False,1,"Work_Item_simultaneous",options.out_file) | 172 | #cedf_stat_printer.cedf_stat_printer(stream15,options.cdf_unit,False,1,"Work_Item_simultaneous",options.out_file) |
| 173 | 173 | ||
| 174 | 174 | ||
diff --git a/unit_trace/cedf_test.py b/unit_trace/cedf_test.py index d09d001..7d9cc77 100644 --- a/unit_trace/cedf_test.py +++ b/unit_trace/cedf_test.py | |||
| @@ -66,12 +66,7 @@ def cedf_test(stream,csize,number): | |||
| 66 | # All records with when ==0 are garbage | 66 | # All records with when ==0 are garbage |
| 67 | if record.when == 0: | 67 | if record.when == 0: |
| 68 | continue | 68 | continue |
| 69 | #if processing count reach number(maximum) start to process | 69 | |
| 70 | if number > 0: | ||
| 71 | number -= 1 | ||
| 72 | elif number == 0: | ||
| 73 | break | ||
| 74 | |||
| 75 | #yield record | 70 | #yield record |
| 76 | #continue | 71 | #continue |
| 77 | 72 | ||
| @@ -83,7 +78,7 @@ def cedf_test(stream,csize,number): | |||
| 83 | if last_time is not None and last_time != record.when: | 78 | if last_time is not None and last_time != record.when: |
| 84 | 79 | ||
| 85 | # check PI for tasklet | 80 | # check PI for tasklet |
| 86 | errors = _cedf_check_irq( | 81 | errors = _cedf_check_irq_inversion( |
| 87 | off_cpu,on_cpu, | 82 | off_cpu,on_cpu, |
| 88 | tasklet_on_cpu,work_on_cpu, | 83 | tasklet_on_cpu,work_on_cpu, |
| 89 | tasklet_off_cpu,tasklet_on_cpu, | 84 | tasklet_off_cpu,tasklet_on_cpu, |
| @@ -91,15 +86,19 @@ def cedf_test(stream,csize,number): | |||
| 91 | first_event_this_timestamp, "Tasklet") | 86 | first_event_this_timestamp, "Tasklet") |
| 92 | for error in errors: | 87 | for error in errors: |
| 93 | yield error | 88 | yield error |
| 94 | # check Simultaneous execution of tasklet and owner | 89 | |
| 95 | #errors = _cedf_check_tasklet_simultaneous(off_cpu,on_cpu, | 90 | # check Simultaneous for tasklet |
| 96 | # tasklet_off_cpu,tasklet_on_cpu,last_time,csize, | 91 | errors = _cedf_check_irq_simultaneous( |
| 97 | # first_event_this_timestamp) | 92 | off_cpu,on_cpu, |
| 98 | #for error in errors: | 93 | tasklet_on_cpu,work_on_cpu, |
| 99 | # yield error | 94 | tasklet_off_cpu,tasklet_on_cpu, |
| 95 | last_time,csize, | ||
| 96 | first_event_this_timestamp, "Tasklet") | ||
| 97 | for error in errors: | ||
| 98 | yield error | ||
| 100 | 99 | ||
| 101 | # check PI for work item | 100 | # check PI for work item |
| 102 | errors = _cedf_check_irq( | 101 | errors = _cedf_check_irq_inversion( |
| 103 | off_cpu,on_cpu, | 102 | off_cpu,on_cpu, |
| 104 | tasklet_on_cpu,work_on_cpu, | 103 | tasklet_on_cpu,work_on_cpu, |
| 105 | work_off_cpu,work_on_cpu, | 104 | work_off_cpu,work_on_cpu, |
| @@ -108,12 +107,16 @@ def cedf_test(stream,csize,number): | |||
| 108 | for error in errors: | 107 | for error in errors: |
| 109 | yield error | 108 | yield error |
| 110 | 109 | ||
| 111 | # check Simultaneous execution of work item and owner | 110 | # check Simultaneous for work item |
| 112 | #errors = _cedf_check_work_simultaneous(off_cpu,on_cpu, | 111 | errors = _cedf_check_irq_simultaneous( |
| 113 | # work_off_cpu,work_on_cpu,last_time,csize, | 112 | off_cpu,on_cpu, |
| 114 | # first_event_this_timestamp) | 113 | tasklet_on_cpu,work_on_cpu, |
| 115 | #for error in errors: | 114 | work_off_cpu,work_on_cpu, |
| 116 | # yield error | 115 | last_time,csize, |
| 116 | first_event_this_timestamp, "Work_Item") | ||
| 117 | for error in errors: | ||
| 118 | yield error | ||
| 119 | |||
| 117 | 120 | ||
| 118 | # check PI for task | 121 | # check PI for task |
| 119 | for c in range(0,int(m/csize)): | 122 | for c in range(0,int(m/csize)): |
| @@ -178,6 +181,7 @@ def cedf_test(stream,csize,number): | |||
| 178 | msg = msg % (record.id,record.pid) | 181 | msg = msg % (record.id,record.pid) |
| 179 | print msg | 182 | print msg |
| 180 | #sys.stderr.write(msg) | 183 | #sys.stderr.write(msg) |
| 184 | yield record | ||
| 181 | #exit() | 185 | #exit() |
| 182 | continue | 186 | continue |
| 183 | 187 | ||
| @@ -204,7 +208,7 @@ def cedf_test(stream,csize,number): | |||
| 204 | " that was not running\n") | 208 | " that was not running\n") |
| 205 | msg = msg % (record.id, record.pid) | 209 | msg = msg % (record.id, record.pid) |
| 206 | print msg | 210 | print msg |
| 207 | #sys.stderr.write(msg) | 211 | #sys.stderr.write(msg) |
| 208 | #exit() | 212 | #exit() |
| 209 | yield record | 213 | yield record |
| 210 | continue | 214 | continue |
| @@ -229,6 +233,7 @@ def cedf_test(stream,csize,number): | |||
| 229 | msg = msg % (record.id,record.pid) | 233 | msg = msg % (record.id,record.pid) |
| 230 | print msg | 234 | print msg |
| 231 | #sys.stderr.write(msg) | 235 | #sys.stderr.write(msg) |
| 236 | yield record | ||
| 232 | #exit() | 237 | #exit() |
| 233 | continue | 238 | continue |
| 234 | off_cpu[pos].is_blocked =True | 239 | off_cpu[pos].is_blocked =True |
| @@ -248,6 +253,7 @@ def cedf_test(stream,csize,number): | |||
| 248 | msg = msg % (record.id,record.pid) | 253 | msg = msg % (record.id,record.pid) |
| 249 | print msg | 254 | print msg |
| 250 | #sys.stderr.write(msg) | 255 | #sys.stderr.write(msg) |
| 256 | yield record | ||
| 251 | #exit() | 257 | #exit() |
| 252 | continue | 258 | continue |
| 253 | off_cpu[pos].is_blocked = False | 259 | off_cpu[pos].is_blocked = False |
| @@ -303,6 +309,7 @@ def cedf_test(stream,csize,number): | |||
| 303 | msg = msg % (record.id, record.pid) | 309 | msg = msg % (record.id, record.pid) |
| 304 | print msg | 310 | print msg |
| 305 | #sys.stderr.write(msg) | 311 | #sys.stderr.write(msg) |
| 312 | yield record | ||
| 306 | #exit() #Continue see if anything wrong | 313 | #exit() #Continue see if anything wrong |
| 307 | continue | 314 | continue |
| 308 | job = tasklet_off_cpu[pos] | 315 | job = tasklet_off_cpu[pos] |
| @@ -320,6 +327,7 @@ def cedf_test(stream,csize,number): | |||
| 320 | msg = msg % (record.id, record.pid) | 327 | msg = msg % (record.id, record.pid) |
| 321 | print msg | 328 | print msg |
| 322 | #sys.stderr.write(msg) | 329 | #sys.stderr.write(msg) |
| 330 | yield record | ||
| 323 | #exit() #Continue see if anything wrong | 331 | #exit() #Continue see if anything wrong |
| 324 | continue | 332 | continue |
| 325 | job = work_off_cpu[pos] | 333 | job = work_off_cpu[pos] |
| @@ -335,6 +343,7 @@ def cedf_test(stream,csize,number): | |||
| 335 | " that was not running\n") | 343 | " that was not running\n") |
| 336 | msg = msg % (record.id, record.pid) | 344 | msg = msg % (record.id, record.pid) |
| 337 | print msg | 345 | print msg |
| 346 | yield record | ||
| 338 | #sys.stderr.write(msg) | 347 | #sys.stderr.write(msg) |
| 339 | pos = _find_job(record,tasklet_off_cpu) | 348 | pos = _find_job(record,tasklet_off_cpu) |
| 340 | if pos is not None: | 349 | if pos is not None: |
| @@ -359,6 +368,7 @@ def cedf_test(stream,csize,number): | |||
| 359 | " that was not running\n") | 368 | " that was not running\n") |
| 360 | msg = msg % (record.id, record.pid) | 369 | msg = msg % (record.id, record.pid) |
| 361 | print msg | 370 | print msg |
| 371 | yield record | ||
| 362 | #sys.stderr.write(msg) | 372 | #sys.stderr.write(msg) |
| 363 | pos = _find_job(record,work_off_cpu) | 373 | pos = _find_job(record,work_off_cpu) |
| 364 | if pos is not None: | 374 | if pos is not None: |
| @@ -374,34 +384,38 @@ def cedf_test(stream,csize,number): | |||
| 374 | del work_on_cpu[pos] | 384 | del work_on_cpu[pos] |
| 375 | 385 | ||
| 376 | # a Task has Priority inheritance | 386 | # a Task has Priority inheritance |
| 377 | #elif record.type_name == 'eff_prio_change': | 387 | elif record.type_name == 'eff_prio_change': |
| 378 | # inh_pos = _find_inh_job(record,off_cpu) | 388 | inh_pos = _find_inh_job(record,off_cpu) |
| 379 | # if inh_pos is None: | 389 | if inh_pos is None: |
| 380 | # inh_pos = _find_inh_job(record,on_cpu) | 390 | inh_pos = _find_inh_job(record,on_cpu) |
| 381 | # if inh_pos is not None: | 391 | if inh_pos is not None: |
| 382 | # inh_job = on_cpu[inh_pos] | 392 | inh_job = on_cpu[inh_pos] |
| 383 | # else: | 393 | else: |
| 384 | # inh_job = off_cpu[inh_pos] | 394 | inh_job = off_cpu[inh_pos] |
| 385 | # | 395 | |
| 386 | # pos = _find_job(record,on_cpu) | 396 | pos = _find_job(record,on_cpu) |
| 387 | # if pos is None: | 397 | if pos is None: |
| 388 | # pos = _find_job(record,off_cpu) | 398 | pos = _find_job(record,off_cpu) |
| 389 | # if pos is None: | 399 | if pos is None: |
| 390 | # msg = ("Event %d tried to change a jobs priority " + | 400 | msg = ("Event %d tried to change a jobs %d priority " + |
| 391 | # " that cannot found\n") | 401 | " that cannot found\n") |
| 392 | # msg = msg % (record.id) | 402 | msg = msg % (record.id,record.pid) |
| 393 | # sys.stderr.write(msg) | 403 | print msg |
| 394 | # #exit() #Continue see if anything wrong | 404 | #sys.stderr.write(msg) |
| 395 | # continue | 405 | yield record |
| 396 | # if inh_pos is not None: | 406 | #exit() #Continue see if anything wrong |
| 397 | # off_cpu[pos].inh_deadline = inh_job.deadline | 407 | continue |
| 398 | # else: | 408 | if inh_pos is not None: |
| 399 | # off_cpu[pos].inh_deadline = off_cpu[pos].deadline | 409 | off_cpu[pos].base_deadline = off_cpu[pos].deadline |
| 400 | # else: | 410 | off_cpu[pos].deadline = inh_job.deadline |
| 401 | # if inh_pos is not None: | 411 | else: |
| 402 | # on_cpu[pos].inh_deadline = inh_job.deadline | 412 | off_cpu[pos].deadline = off_cpu[pos].base_deadline |
| 403 | # else: | 413 | else: |
| 404 | # on_cpu[pos].inh_deadline = on_cpu[pos].deadline | 414 | if inh_pos is not None: |
| 415 | on_cpu[pos].base_deadline = on_cpu[pos].deadline | ||
| 416 | on_cpu[pos].deadline = inh_job.deadline | ||
| 417 | else: | ||
| 418 | on_cpu[pos].deadline = on_cpu[pos].base_deadline | ||
| 405 | else: | 419 | else: |
| 406 | continue # illegal event | 420 | continue # illegal event |
| 407 | 421 | ||
| @@ -415,6 +429,11 @@ def cedf_test(stream,csize,number): | |||
| 415 | start_time = time.time() | 429 | start_time = time.time() |
| 416 | count = record.id | 430 | count = record.id |
| 417 | 431 | ||
| 432 | #if processing count reach number(maximum) start to process | ||
| 433 | if record.id > number: | ||
| 434 | break | ||
| 435 | |||
| 436 | |||
| 418 | yield record | 437 | yield record |
| 419 | 438 | ||
| 420 | 439 | ||
| @@ -431,7 +450,7 @@ class Job(object): | |||
| 431 | self.cpu = record.cpu # for target cpu (-1: not assigned yet) | 450 | self.cpu = record.cpu # for target cpu (-1: not assigned yet) |
| 432 | self.cluster = -1 # for target cluster (-1: not assigned yet) | 451 | self.cluster = -1 # for target cluster (-1: not assigned yet) |
| 433 | self.deadline = None | 452 | self.deadline = None |
| 434 | #self.inh_deadline = None | 453 | self.base_deadline = None |
| 435 | self.exe_pid = -1 # handleing klitthread (-1: not assigned yet) | 454 | self.exe_pid = -1 # handleing klitthread (-1: not assigned yet) |
| 436 | self.is_complete = False | 455 | self.is_complete = False |
| 437 | self.is_blocked = False | 456 | self.is_blocked = False |
| @@ -439,10 +458,11 @@ class Job(object): | |||
| 439 | self.inversion_end = None | 458 | self.inversion_end = None |
| 440 | self.inversion_start_id = None | 459 | self.inversion_start_id = None |
| 441 | self.inversion_start_triggering_event_id = None | 460 | self.inversion_start_triggering_event_id = None |
| 442 | #self.simultaneous_start = None | 461 | |
| 443 | #self.simultaneous_end = None | 462 | self.simultaneous_start = None |
| 444 | #self.simultaneous_start_id = None | 463 | self.simultaneous_end = None |
| 445 | #self.simultaneous_start_triggering_event_id = None | 464 | self.simultaneous_start_id = None |
| 465 | self.simultaneous_start_triggering_event_id = None | ||
| 446 | 466 | ||
| 447 | def __str__(self): | 467 | def __str__(self): |
| 448 | return "(%d.%d:%d)" % (self.pid,self.job,self.deadline) | 468 | return "(%d.%d:%d)" % (self.pid,self.job,self.deadline) |
| @@ -470,24 +490,26 @@ class Error(object): | |||
| 470 | 490 | ||
| 471 | 491 | ||
| 472 | # C-EDF errors: the start or end of a simultaneous execution of owner and tasklet(work_item) | 492 | # C-EDF errors: the start or end of a simultaneous execution of owner and tasklet(work_item) |
| 473 | #class Error_simultaneous(object): | 493 | class Error_simultaneous(object): |
| 474 | # id = 0 | 494 | id = 0 |
| 475 | # def __init__(self, job, c, first_event_this_timestamp,irq_type): | 495 | def __init__(self, job, off_cpu, on_cpu,first_event_this_timestamp,irq_type): |
| 476 | # Error_simultaneous.id += 1 | 496 | Error_simultaneous.id += 1 |
| 477 | # self.error_type = "simultaneous" | 497 | self.error_type = "simultaneous" |
| 478 | # self.id = Error_simultaneous.id | 498 | self.id = Error_simultaneous.id |
| 479 | # self.job = copy.copy(job) | 499 | self.job = copy.copy(job) |
| 480 | # self.cluster = c | 500 | self.off_cpu = copy.copy(off_cpu) |
| 481 | # self.record_type = 'error' | 501 | self.on_cpu = copy.copy(on_cpu) |
| 482 | # self.triggering_event_id = first_event_this_timestamp | 502 | self.record_type = 'error' |
| 483 | # if job.simultaneous_end is None: | 503 | self.triggering_event_id = first_event_this_timestamp |
| 484 | # self.type_name = irq_type+'_simultaneous_start' | 504 | if job.simultaneous_end is None: |
| 485 | # job.simultaneous_start_id = self.id | 505 | self.type_name = irq_type+'_simultaneous_start' |
| 486 | # job.simultaneous_start_triggering_event_id = self.triggering_event_id | 506 | job.simultaneous_start_id = self.id |
| 487 | # else: | 507 | job.simultaneous_start_triggering_event_id = self.triggering_event_id |
| 488 | # self.type_name = irq_type+'_simultaneous_end' | 508 | else: |
| 489 | # self.simultaneous_start_id = job.simultaneous_start_id | 509 | self.type_name = irq_type+'_simultaneous_end' |
| 490 | # self.simultaneous_start_triggering_event_id = job.simultaneous_start_triggering_event_id | 510 | self.simultaneous_start_id = job.simultaneous_start_id |
| 511 | self.simultaneous_start_triggering_event_id = job.simultaneous_start_triggering_event_id | ||
| 512 | |||
| 491 | 513 | ||
| 492 | #insert into cluster map | 514 | #insert into cluster map |
| 493 | def _insert_cluster_map(job, cluster_map): | 515 | def _insert_cluster_map(job, cluster_map): |
| @@ -531,11 +553,11 @@ def _find_irq_carrier(job,list): | |||
| 531 | return None | 553 | return None |
| 532 | 554 | ||
| 533 | # Returns the position of a inheritanced Job in a list, or None | 555 | # Returns the position of a inheritanced Job in a list, or None |
| 534 | #def _find_inh_job(record,list): | 556 | def _find_inh_job(record,list): |
| 535 | # for i in range(0,len(list)): | 557 | for i in range(0,len(list)): |
| 536 | # if list[i].pid == record.inh_pid: | 558 | if list[i].pid == record.inh_pid: |
| 537 | # return i | 559 | return i |
| 538 | # return None | 560 | return None |
| 539 | 561 | ||
| 540 | # Return True if owner in off_cpu or on_cpu | 562 | # Return True if owner in off_cpu or on_cpu |
| 541 | def _is_owner(record,off_cpu,on_cpu): | 563 | def _is_owner(record,off_cpu,on_cpu): |
| @@ -630,8 +652,8 @@ def _cedf_check(c, | |||
| 630 | return errors | 652 | return errors |
| 631 | 653 | ||
| 632 | 654 | ||
| 633 | # Return records for any inversion_starts and inversion_ends | 655 | # Return records for any inversion_start and inversion_end |
| 634 | def _cedf_check_irq( | 656 | def _cedf_check_irq_inversion( |
| 635 | off_cpu,on_cpu, | 657 | off_cpu,on_cpu, |
| 636 | tasklet_on_cpu, work_on_cpu, | 658 | tasklet_on_cpu, work_on_cpu, |
| 637 | irq_off_cpu,irq_on_cpu, | 659 | irq_off_cpu,irq_on_cpu, |
| @@ -694,7 +716,7 @@ def _cedf_check_irq( | |||
| 694 | errors.append(Error(irq, off_cpu, on_cpu, | 716 | errors.append(Error(irq, off_cpu, on_cpu, |
| 695 | first_event_this_timestamp,irq_type)) | 717 | first_event_this_timestamp,irq_type)) |
| 696 | irq.inversion_start = None | 718 | irq.inversion_start = None |
| 697 | irq.inversion_end = None | 719 | irq.inversion_end = None |
| 698 | 720 | ||
| 699 | # have klitirqd take care of (already work_begin or tasklet_begin and assinged a klitthread) | 721 | # have klitirqd take care of (already work_begin or tasklet_begin and assinged a klitthread) |
| 700 | for irq in irq_on_cpu: | 722 | for irq in irq_on_cpu: |
| @@ -717,20 +739,22 @@ def _cedf_check_irq( | |||
| 717 | 739 | ||
| 718 | pos = _find_job(irq,all) | 740 | pos = _find_job(irq,all) |
| 719 | _on = _find_job(irq,on_cpu) | 741 | _on = _find_job(irq,on_cpu) |
| 742 | klit_pos = _find_irq_carrier(irq,on_cpu) | ||
| 720 | # owner task is completed (weird!!!) | 743 | # owner task is completed (weird!!!) |
| 721 | if pos is None and irq.inversion_start is not None: | 744 | if pos is None : |
| 722 | irq.inversion_end = when | 745 | if irq.inversion_start is not None: |
| 723 | errors.append(Error(irq, off_cpu, on_cpu, | 746 | irq.inversion_end = when |
| 724 | first_event_this_timestamp,irq_type)) | 747 | errors.append(Error(irq, off_cpu, on_cpu, |
| 725 | irq.inversion_start = None | 748 | first_event_this_timestamp,irq_type)) |
| 726 | irq.inversion_end = None | 749 | irq.inversion_start = None |
| 750 | irq.inversion_end = None | ||
| 751 | |||
| 727 | elif pos is not None: | 752 | elif pos is not None: |
| 728 | # look for priorit of owner task | 753 | # look for priorit of owner task |
| 729 | # owner task is m-priority task in the cluster | 754 | # owner task is m-priority task in the cluster |
| 730 | if pos in range(0,min(csize,len(all))): | 755 | if pos in range(0,min(csize,len(all))): |
| 731 | # owner task is not running | 756 | # owner task is not running |
| 732 | if _on is None: | 757 | if _on is None: |
| 733 | klit_pos = _find_irq_carrier(irq,on_cpu) | ||
| 734 | # if the klitirqd task is running and is not blocked | 758 | # if the klitirqd task is running and is not blocked |
| 735 | if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: | 759 | if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: |
| 736 | if irq.inversion_start is not None: | 760 | if irq.inversion_start is not None: |
| @@ -739,12 +763,16 @@ def _cedf_check_irq( | |||
| 739 | first_event_this_timestamp,irq_type)) | 763 | first_event_this_timestamp,irq_type)) |
| 740 | irq.inversion_start = None | 764 | irq.inversion_start = None |
| 741 | irq.inversion_end = None | 765 | irq.inversion_end = None |
| 742 | #if the klitirqd task is not running or blocked | 766 | |
| 767 | |||
| 768 | #if the klitirqd task is not running or blocked (Or in noirqd case) | ||
| 743 | else: | 769 | else: |
| 744 | if irq.inversion_start is None: | 770 | if irq.inversion_start is None: |
| 745 | irq.inversion_start = when | 771 | irq.inversion_start = when |
| 746 | errors.append(Error(irq, off_cpu, on_cpu, | 772 | errors.append(Error(irq, off_cpu, on_cpu, |
| 747 | first_event_this_timestamp,irq_type)) | 773 | first_event_this_timestamp,irq_type)) |
| 774 | |||
| 775 | |||
| 748 | else: | 776 | else: |
| 749 | # here is very weird situation becasue owner must be blocked | 777 | # here is very weird situation becasue owner must be blocked |
| 750 | if irq.inversion_start is not None: | 778 | if irq.inversion_start is not None: |
| @@ -754,116 +782,78 @@ def _cedf_check_irq( | |||
| 754 | irq.inversion_start = None | 782 | irq.inversion_start = None |
| 755 | irq.inversion_end = None | 783 | irq.inversion_end = None |
| 756 | 784 | ||
| 785 | |||
| 786 | |||
| 757 | # owner task is not m-priority task in the cluster | 787 | # owner task is not m-priority task in the cluster |
| 758 | elif pos in range(csize,len(all)): | 788 | elif pos in range(csize,len(all)): |
| 759 | if irq.inversion_start is not None: | 789 | if _on is None: |
| 760 | irq.inversion_end = when | 790 | # if the klitirqd task is running and is not blocked |
| 761 | errors.append(Error(irq, off_cpu, on_cpu, | 791 | if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: |
| 762 | first_event_this_timestamp,irq_type)) | 792 | if irq.inversion_start is None: |
| 763 | irq.inversion_start = None | 793 | irq.inversion_start = when |
| 764 | irq.inversion_end = None | 794 | errors.append(Error(irq, off_cpu, on_cpu, |
| 795 | first_event_this_timestamp,irq_type)) | ||
| 796 | #if the klitirqd task is not running or blocked (Or in noirqd case) | ||
| 797 | else: | ||
| 798 | if irq.inversion_start is not None: | ||
| 799 | irq.inversion_end = when | ||
| 800 | errors.append(Error(irq, off_cpu, on_cpu, | ||
| 801 | first_event_this_timestamp,irq_type)) | ||
| 802 | irq.inversion_start = None | ||
| 803 | irq.inversion_end = None | ||
| 804 | |||
| 805 | |||
| 806 | else: | ||
| 807 | # here is very weird situation becasue owner must be blocked | ||
| 808 | # don't count into tasklet inversion but task inversion | ||
| 809 | if irq.inversion_start is not None: | ||
| 810 | irq.inversion_end = when | ||
| 811 | errors.append(Error(irq, off_cpu, on_cpu, | ||
| 812 | first_event_this_timestamp,irq_type)) | ||
| 813 | irq.inversion_start = None | ||
| 814 | irq.inversion_end = None | ||
| 815 | |||
| 765 | return errors | 816 | return errors |
| 817 | |||
| 818 | # Return records for any simultaneous_start and simultaneous_end | ||
| 819 | def _cedf_check_irq_simultaneous( | ||
| 820 | off_cpu,on_cpu, | ||
| 821 | tasklet_on_cpu, work_on_cpu, | ||
| 822 | irq_off_cpu,irq_on_cpu, | ||
| 823 | when,csize, | ||
| 824 | first_event_this_timestamp, irq_type): | ||
| 766 | 825 | ||
| 826 | # List of error records to be returned | ||
| 827 | errors = [] | ||
| 828 | |||
| 829 | #Look for all irq is irq_off_cpu (not assign klitithread yet) | ||
| 830 | for irq in irq_off_cpu: | ||
| 831 | if irq.simultaneous_start is not None: | ||
| 832 | irq.simultaneous_end = when | ||
| 833 | errors.append(Error_simultaneous(irq, off_cpu, on_cpu, | ||
| 834 | first_event_this_timestamp,irq_type)) | ||
| 835 | irq.simultaneous_start = None | ||
| 836 | irq.simultaneous_end = None | ||
| 767 | 837 | ||
| 768 | # Return records for any inversion_starts and inversion_ends | 838 | # have klitirqd take care of (already work_begin or tasklet_begin and assinged a klitthread) |
| 769 | #def _cedf_check_work_simultaneous(off_cpu,on_cpu, | 839 | for irq in irq_on_cpu: |
| 770 | # work_off_cpu,work_on_cpu, | 840 | _on = _find_job(irq,on_cpu) |
| 771 | # when,csize,first_event_this_timestamp): | 841 | klit_pos = _find_irq_carrier(irq,on_cpu) |
| 772 | # | 842 | if _on is None: |
| 773 | # # List of error records to be returned | 843 | if irq.simultaneous_start is not None: |
| 774 | # errors = [] | 844 | irq.simultaneous_end = when |
| 775 | # | 845 | errors.append(Error_simultaneous(irq, off_cpu, on_cpu, |
| 776 | # #Look for all work is work_off_cpu (not running) | 846 | first_event_this_timestamp,irq_type)) |
| 777 | # for work in work_off_cpu: | 847 | irq.simultaneous_start = None |
| 778 | # # find owner's cpu | 848 | irq.simultaneous_end = None |
| 779 | # owner_cpu = _get_cpu(work,off_cpu,on_cpu,"Work_Item") | 849 | else: |
| 780 | # if owner_cpu is None: | 850 | # here is very weird situation becasue owner must be blocked |
| 781 | # work_off_cpu.remove(work) | 851 | if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: |
| 782 | # continue | 852 | if irq.simultaneous_start is None: |
| 783 | # | 853 | irq.simultaneous_start = when |
| 784 | # if work.simultaneous_start is not None: | 854 | errors.append(Error_simultaneous(irq, off_cpu, on_cpu, |
| 785 | # work.simultaneous_end = when | 855 | first_event_this_timestamp,irq_type)) |
| 786 | # errors.append(Error_simultaneous(tasklet, int(owner_cpu/csize), | 856 | |
| 787 | # first_event_this_timestamp,"Work_Item")) | 857 | return errors |
| 788 | # work.simultaneous_start = None | 858 | |
| 789 | # work.simultaneous_end = None | 859 | |
| 790 | # | ||
| 791 | # # have klitirqd take care of | ||
| 792 | # for work in work_on_cpu: | ||
| 793 | # # find owner's cpu | ||
| 794 | # owner_cpu = _get_cpu(work,off_cpu,on_cpu,"Work_Item") | ||
| 795 | # if owner_cpu is None: | ||
| 796 | # work_on_cpu.remove(work) | ||
| 797 | # continue | ||
| 798 | # klit_pos = _find_work_carrier(work,on_cpu) | ||
| 799 | # # if the klitirqd task is running and is not blocked | ||
| 800 | # if klit_pos is not None and on_cpu[klit_pos].is_blocked is False: | ||
| 801 | # _on = _find_job(work,on_cpu) | ||
| 802 | # # owner task is running | ||
| 803 | # if _on is not None and on_cpu[_on].is_complete is not True and on_cpu[_on].is_blocked is not True: | ||
| 804 | # if work.simultaneous_start is None: | ||
| 805 | # work.simultaneous_start = when | ||
| 806 | # errors.append(Error_simultaneous(work,int(owner_cpu/csize), | ||
| 807 | # first_event_this_timestamp,"Work_Item")) | ||
| 808 | # continue | ||
| 809 | # | ||
| 810 | # if work.simultaneous_start is not None: | ||
| 811 | # work.simultaneous_end = when | ||
| 812 | # errors.append(Error_simultaneous(work, int(owner_cpu/csize), | ||
| 813 | # first_event_this_timestamp,"Work_Item")) | ||
| 814 | # work.simultaneous_start = None | ||
| 815 | # work.simultaneous_end = None | ||
| 816 | # | ||
| 817 | # | ||
| 818 | # return errors | ||
| 819 | |||
| 820 | |||
| 821 | # Return records for any inversion_starts and inversion_ends | ||
| 822 | #def _cedf_check_tasklet_simultaneous(off_cpu,on_cpu, | ||
| 823 | # tasklet_off_cpu, tasklet_on_cpu, | ||
| 824 | # when,csize,first_event_this_timestamp): | ||
| 825 | # | ||
| 826 | # # List of error records to be returned | ||
| 827 | # errors = [] | ||
| 828 | # | ||
| 829 | # #Look for all tasklet is tasklet_off_cpu (not running) | ||
| 830 | # for tasklet in tasklet_off_cpu: | ||
| 831 | # # find owner's cpu | ||
| 832 | # owner_cpu = _get_cpu(tasklet,off_cpu,on_cpu,"Tasklet") | ||
| 833 | # if owner_cpu is None: | ||
| 834 | # tasklet_off_cpu.remove(tasklet) | ||
| 835 | # continue | ||
| 836 | # | ||
| 837 | # if tasklet.simultaneous_start is not None: | ||
| 838 | # tasklet.simultaneous_end = when | ||
| 839 | # errors.append(Error_simultaneous(tasklet, int(owner_cpu/csize), | ||
| 840 | # first_event_this_timestamp,"Tasklet")) | ||
| 841 | # tasklet.simultaneous_start = None | ||
| 842 | # tasklet.simultaneous_end = None | ||
| 843 | # | ||
| 844 | # | ||
| 845 | # #Look for all tasklet is tasklet_on_cpu (running) | ||
| 846 | # for tasklet in tasklet_on_cpu: | ||
| 847 | # # find owner's cpu | ||
| 848 | # owner_cpu = _get_cpu(tasklet,off_cpu,on_cpu,"Tasklet") | ||
| 849 | # if owner_cpu is None: | ||
| 850 | # tasklet_on_cpu.remove(tasklet) | ||
| 851 | # continue | ||
| 852 | # _on = _find_job(tasklet,on_cpu) | ||
| 853 | # # owner task is running | ||
| 854 | # if _on is not None and on_cpu[_on].is_complete is not True and on_cpu[_on].is_blocked is not True: | ||
| 855 | # if tasklet.simultaneous_start is None: | ||
| 856 | # tasklet.simultaneous_start = when | ||
| 857 | # errors.append(Error_simultaneous(tasklet,int(owner_cpu/csize), | ||
| 858 | # first_event_this_timestamp,"Tasklet")) | ||
| 859 | # else: | ||
| 860 | # if tasklet.simultaneous_start is not None: | ||
| 861 | # tasklet.simultaneous_end = when | ||
| 862 | # errors.append(Error_simultaneous(tasklet, int(owner_cpu/csize), | ||
| 863 | # first_event_this_timestamp,"Tasklet")) | ||
| 864 | # tasklet.simultaneous_start = None | ||
| 865 | # tasklet.simultaneous_end = None | ||
| 866 | # | ||
| 867 | # return errors | ||
| 868 | |||
| 869 | |||
diff --git a/unit_trace/stdout_printer.py b/unit_trace/stdout_printer.py index 2e4c783..111e0d1 100644 --- a/unit_trace/stdout_printer.py +++ b/unit_trace/stdout_printer.py | |||
| @@ -25,16 +25,14 @@ def stdout_printer(stream,csize): | |||
| 25 | elif record.record_type == "error" and record.type_name == 'Work_Item_inversion_end': | 25 | elif record.record_type == "error" and record.type_name == 'Work_Item_inversion_end': |
| 26 | _print_inversion_end(csize,record,"Work_Item_") | 26 | _print_inversion_end(csize,record,"Work_Item_") |
| 27 | 27 | ||
| 28 | #elif record.record_type == "error" and record.type_name == 'Tasklet_simultaneous_start': | 28 | elif record.record_type == "error" and record.type_name == 'Tasklet_simultaneous_start': |
| 29 | # _print_simultaneous_start(csize,record,"Tasklet_") | 29 | _print_simultaneous_start(csize,record,"Tasklet_") |
| 30 | #elif record.record_type == "error" and record.type_name == 'Tasklet_simultaneous_end': | 30 | elif record.record_type == "error" and record.type_name == 'Tasklet_simultaneous_end': |
| 31 | # _print_simultaneous_end(csize,record,"Tasklet_") | 31 | _print_simultaneous_end(csize,record,"Tasklet_") |
| 32 | #elif record.record_type == "error" and record.type_name == 'Work_Item_simultaneous_start': | 32 | elif record.record_type == "error" and record.type_name == 'Work_Item_simultaneous_start': |
| 33 | # _print_simultaneous_start(csize,record,"Work_Item_") | 33 | _print_simultaneous_start(csize,record,"Work_Item_") |
| 34 | #elif record.record_type == "error" and record.type_name == 'Work_Item_simultaneous_end': | 34 | elif record.record_type == "error" and record.type_name == 'Work_Item_simultaneous_end': |
| 35 | # _print_simultaneous_end(csize,record,"Work_Item_") | 35 | _print_simultaneous_end(csize,record,"Work_Item_") |
| 36 | |||
| 37 | |||
| 38 | else: | 36 | else: |
| 39 | continue | 37 | continue |
| 40 | print "" | 38 | print "" |
| @@ -103,25 +101,50 @@ def _print_inversion_end(csize,record,pi_type): | |||
| 103 | print str(job) + " ", | 101 | print str(job) + " ", |
| 104 | print #newline | 102 | print #newline |
| 105 | 103 | ||
| 106 | #def _print_simultaneous_start(csize,record,irq_type): | 104 | def _print_simultaneous_start(csize,record,irq_type): |
| 107 | # print "Type: %s" % (irq_type+"Simultaneous start") | 105 | print "Type: %s" % (irq_type+"Simultaneous start") |
| 108 | # print "Inversion Record IDs: (%d, U)" % (record.id) | 106 | print "Inversion Record IDs: (%d, U)" % (record.id) |
| 109 | # print "Triggering Event IDs: (%d, U)" % (record.triggering_event_id) | 107 | print "Triggering Event IDs: (%d, U)" % (record.triggering_event_id) |
| 110 | # print "Time: %d" % (record.job.simultaneous_start) | 108 | print "Time: %d" % (record.job.simultaneous_start) |
| 111 | # print "Job: %d.%d" % (record.job.pid,record.job.job) | 109 | print "Job: %d.%d" % (record.job.pid,record.job.job) |
| 110 | print "Deadline: %d" % (record.job.deadline) | ||
| 111 | print "Cluster: %d" % (record.job.cluster) | ||
| 112 | print "Handling Klit thread: %d" % (record.job.exe_pid) | ||
| 113 | print "Off CPU: ", | ||
| 114 | for job in record.off_cpu: | ||
| 115 | if job.cluster==record.job.cluster : | ||
| 116 | print str(job) + " ", | ||
| 117 | |||
| 118 | print "On CPU: ", | ||
| 119 | for job in record.on_cpu: | ||
| 120 | if job.cluster==record.job.cluster : | ||
| 121 | print str(job) + " ", | ||
| 122 | print #newline | ||
| 112 | 123 | ||
| 113 | 124 | ||
| 114 | #def _print_simultaneous_end(csize,record,irq_type): | 125 | def _print_simultaneous_end(csize,record,irq_type): |
| 115 | # print "Type: %s" % (irq_type+"Simultaneous end") | 126 | print "Type: %s" % (irq_type+"Simultaneous end") |
| 116 | # print "Inversion record IDs: (%d, %d)" % (record.simultaneous_start_id, | 127 | print "Inversion record IDs: (%d, %d)" % (record.simultaneous_start_id, |
| 117 | # record.id) | 128 | record.id) |
| 118 | # print("Triggering Event IDs: (%d, %d)" % | 129 | print("Triggering Event IDs: (%d, %d)" % |
| 119 | # (record.simultaneous_start_triggering_event_id, | 130 | (record.simultaneous_start_triggering_event_id, |
| 120 | # record.triggering_event_id)) | 131 | record.triggering_event_id)) |
| 121 | # print "Time: %d" % (record.job.simultaneous_end) | 132 | print "Time: %d" % (record.job.simultaneous_end) |
| 122 | # # NOTE: Here, we assume nanoseconds as the time unit. | 133 | # NOTE: Here, we assume nanoseconds as the time unit. |
| 123 | # # May have to be changed in the future. | 134 | # May have to be changed in the future. |
| 124 | # print "Duration: %f ms" % ( | 135 | print "Duration: %f ms" % ( |
| 125 | # float(record.job.simultaneous_end - record.job.simultaneous_start)/1000000) | 136 | float(record.job.simultaneous_end - record.job.simultaneous_start)/1000000) |
| 126 | # print "Job: %d.%d" % (record.job.pid,record.job.job) | 137 | print "Job: %d.%d" % (record.job.pid,record.job.job) |
| 127 | 138 | print "Deadline: %d" % (record.job.deadline) | |
| 139 | print "Cluster: %d" % (record.job.cluster) | ||
| 140 | print "Handling Klit thread: %d" % (record.job.exe_pid) | ||
| 141 | print "Off CPU: ", | ||
| 142 | for job in record.off_cpu: | ||
| 143 | if job.cluster==record.job.cluster : | ||
| 144 | print str(job) + " ", | ||
| 145 | |||
| 146 | print "On CPU: ", | ||
| 147 | for job in record.on_cpu: | ||
| 148 | if job.cluster==record.job.cluster : | ||
| 149 | print str(job) + " ", | ||
| 150 | print #newline | ||
