diff options
Diffstat (limited to 'litmus/rsm_lock.c')
-rw-r--r-- | litmus/rsm_lock.c | 222 |
1 files changed, 190 insertions, 32 deletions
diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c index 3dfd8ae9d221..ae6dd3fb237b 100644 --- a/litmus/rsm_lock.c +++ b/litmus/rsm_lock.c | |||
@@ -5,6 +5,8 @@ | |||
5 | #include <litmus/sched_plugin.h> | 5 | #include <litmus/sched_plugin.h> |
6 | #include <litmus/rsm_lock.h> | 6 | #include <litmus/rsm_lock.h> |
7 | 7 | ||
8 | #include <litmus/litmus_proc.h> | ||
9 | |||
8 | //#include <litmus/edf_common.h> | 10 | //#include <litmus/edf_common.h> |
9 | 11 | ||
10 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 12 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
@@ -14,43 +16,43 @@ | |||
14 | 16 | ||
15 | /* caller is responsible for locking */ | 17 | /* caller is responsible for locking */ |
16 | static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | 18 | static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, |
17 | struct task_struct* skip) | 19 | struct task_struct* skip) |
18 | { | 20 | { |
19 | wait_queue_t *q; | 21 | wait_queue_t *q; |
20 | struct list_head *pos; | 22 | struct list_head *pos; |
21 | struct task_struct *queued = NULL, *found = NULL; | 23 | struct task_struct *queued = NULL, *found = NULL; |
22 | 24 | ||
23 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 25 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
24 | dgl_wait_state_t *dgl_wait = NULL; | 26 | dgl_wait_state_t *dgl_wait = NULL; |
25 | #endif | 27 | #endif |
26 | 28 | ||
27 | list_for_each(pos, &mutex->wait.task_list) { | 29 | list_for_each(pos, &mutex->wait.task_list) { |
28 | q = list_entry(pos, wait_queue_t, task_list); | 30 | q = list_entry(pos, wait_queue_t, task_list); |
29 | 31 | ||
30 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 32 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
31 | if(q->func == dgl_wake_up) { | 33 | if(q->func == dgl_wake_up) { |
32 | dgl_wait = (dgl_wait_state_t*) q->private; | 34 | dgl_wait = (dgl_wait_state_t*) q->private; |
33 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { | 35 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { |
34 | queued = dgl_wait->task; | 36 | queued = dgl_wait->task; |
35 | } | 37 | } |
36 | else { | 38 | else { |
37 | queued = NULL; // skip it. | 39 | queued = NULL; // skip it. |
38 | } | 40 | } |
39 | } | 41 | } |
40 | else { | 42 | else { |
41 | queued = (struct task_struct*) q->private; | 43 | queued = (struct task_struct*) q->private; |
42 | } | 44 | } |
43 | #else | 45 | #else |
44 | queued = (struct task_struct*) q->private; | 46 | queued = (struct task_struct*) q->private; |
45 | #endif | 47 | #endif |
46 | 48 | ||
47 | /* Compare task prios, find high prio task. */ | 49 | /* Compare task prios, find high prio task. */ |
48 | //if (queued && queued != skip && edf_higher_prio(queued, found)) { | 50 | //if (queued && queued != skip && edf_higher_prio(queued, found)) { |
49 | if (queued && queued != skip && litmus->compare(queued, found)) { | 51 | if (queued && queued != skip && litmus->compare(queued, found)) { |
50 | found = queued; | 52 | found = queued; |
51 | } | 53 | } |
52 | } | 54 | } |
53 | return found; | 55 | return found; |
54 | } | 56 | } |
55 | 57 | ||
56 | 58 | ||
@@ -76,7 +78,8 @@ int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, | |||
76 | BUG_ON(t != current); | 78 | BUG_ON(t != current); |
77 | 79 | ||
78 | if (mutex->owner) { | 80 | if (mutex->owner) { |
79 | TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); | 81 | TRACE_TASK(t, "Enqueuing on lock %d (held by %s/%d).\n", |
82 | l->ident, mutex->owner->comm, mutex->owner->pid); | ||
80 | 83 | ||
81 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | 84 | init_dgl_waitqueue_entry(wq_node, dgl_wait); |
82 | 85 | ||
@@ -205,7 +208,8 @@ int rsm_mutex_lock(struct litmus_lock* l) | |||
205 | lock_fine_irqsave(&mutex->lock, flags); | 208 | lock_fine_irqsave(&mutex->lock, flags); |
206 | 209 | ||
207 | if (mutex->owner) { | 210 | if (mutex->owner) { |
208 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | 211 | TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n", |
212 | l->ident, mutex->owner->comm, mutex->owner->pid); | ||
209 | 213 | ||
210 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 214 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
211 | // KLUDGE: don't count this suspension as time in the critical gpu | 215 | // KLUDGE: don't count this suspension as time in the critical gpu |
@@ -358,7 +362,7 @@ int rsm_mutex_unlock(struct litmus_lock* l) | |||
358 | top_priority(&tsk_rt(t)->hp_blocked_tasks); | 362 | top_priority(&tsk_rt(t)->hp_blocked_tasks); |
359 | 363 | ||
360 | if((new_max_eff_prio == NULL) || | 364 | if((new_max_eff_prio == NULL) || |
361 | /* there was a change in eff prio */ | 365 | /* there was a change in eff prio */ |
362 | ( (new_max_eff_prio != old_max_eff_prio) && | 366 | ( (new_max_eff_prio != old_max_eff_prio) && |
363 | /* and owner had the old eff prio */ | 367 | /* and owner had the old eff prio */ |
364 | (effective_priority(t) == old_max_eff_prio)) ) | 368 | (effective_priority(t) == old_max_eff_prio)) ) |
@@ -402,7 +406,7 @@ int rsm_mutex_unlock(struct litmus_lock* l) | |||
402 | if (next) { | 406 | if (next) { |
403 | /* next becomes the resouce holder */ | 407 | /* next becomes the resouce holder */ |
404 | mutex->owner = next; | 408 | mutex->owner = next; |
405 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | 409 | TRACE_CUR("lock %d ownership passed to %s/%d\n", l->ident, next->comm, next->pid); |
406 | 410 | ||
407 | /* determine new hp_waiter if necessary */ | 411 | /* determine new hp_waiter if necessary */ |
408 | if (next == mutex->hp_waiter) { | 412 | if (next == mutex->hp_waiter) { |
@@ -459,7 +463,7 @@ int rsm_mutex_unlock(struct litmus_lock* l) | |||
459 | #endif | 463 | #endif |
460 | 464 | ||
461 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | 465 | /* It is possible that 'next' *should* be the hp_waiter, but isn't |
462 | * because that update hasn't yet executed (update operation is | 466 | * because that update hasn't yet executed (update operation is |
463 | * probably blocked on mutex->lock). So only inherit if the top of | 467 | * probably blocked on mutex->lock). So only inherit if the top of |
464 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | 468 | * 'next's top heap node is indeed the effective prio. of hp_waiter. |
465 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | 469 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) |
@@ -693,7 +697,7 @@ void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
693 | } | 697 | } |
694 | 698 | ||
695 | // beware: recursion | 699 | // beware: recursion |
696 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | 700 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock |
697 | } | 701 | } |
698 | else { | 702 | else { |
699 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 703 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
@@ -754,8 +758,11 @@ int rsm_mutex_close(struct litmus_lock* l) | |||
754 | unlock_fine_irqrestore(&mutex->lock, flags); | 758 | unlock_fine_irqrestore(&mutex->lock, flags); |
755 | unlock_global_irqrestore(dgl_lock, flags); | 759 | unlock_global_irqrestore(dgl_lock, flags); |
756 | 760 | ||
761 | /* | ||
762 | TODO: Currently panic. FIX THIS! | ||
757 | if (owner) | 763 | if (owner) |
758 | rsm_mutex_unlock(l); | 764 | rsm_mutex_unlock(l); |
765 | */ | ||
759 | 766 | ||
760 | return 0; | 767 | return 0; |
761 | } | 768 | } |
@@ -765,6 +772,154 @@ void rsm_mutex_free(struct litmus_lock* lock) | |||
765 | kfree(rsm_mutex_from_lock(lock)); | 772 | kfree(rsm_mutex_from_lock(lock)); |
766 | } | 773 | } |
767 | 774 | ||
775 | |||
776 | /* The following may race if DGLs are enabled. Only examine /proc if things | ||
777 | appear to be locked up. TODO: FIX THIS! Must find an elegant way to transmit | ||
778 | DGL lock to function. */ | ||
779 | static int rsm_proc_print(char *page, char **start, off_t off, int count, int *eof, void *data) | ||
780 | { | ||
781 | struct rsm_mutex *mutex = rsm_mutex_from_lock((struct litmus_lock*)data); | ||
782 | |||
783 | int attempts = 0; | ||
784 | const int max_attempts = 10; | ||
785 | int locked = 0; | ||
786 | unsigned long flags; | ||
787 | |||
788 | int size = count; | ||
789 | char *next = page; | ||
790 | int w; | ||
791 | |||
792 | while(attempts < max_attempts) | ||
793 | { | ||
794 | locked = raw_spin_trylock_irqsave(&mutex->lock, flags); | ||
795 | |||
796 | if (unlikely(!locked)) { | ||
797 | ++attempts; | ||
798 | cpu_relax(); | ||
799 | } | ||
800 | else { | ||
801 | break; | ||
802 | } | ||
803 | } | ||
804 | |||
805 | if (locked) { | ||
806 | w = scnprintf(next, size, "%s:\n", mutex->litmus_lock.name); | ||
807 | size -= w; | ||
808 | next += w; | ||
809 | |||
810 | w = scnprintf(next, size, | ||
811 | "owner: %s/%d (inh: %s/%d)\n", | ||
812 | (mutex->owner) ? | ||
813 | mutex->owner->comm : "nil", | ||
814 | (mutex->owner) ? | ||
815 | mutex->owner->pid : -1, | ||
816 | (mutex->owner && tsk_rt(mutex->owner)->inh_task) ? | ||
817 | tsk_rt(mutex->owner)->inh_task->comm : "nil", | ||
818 | (mutex->owner && tsk_rt(mutex->owner)->inh_task) ? | ||
819 | tsk_rt(mutex->owner)->inh_task->pid : -1); | ||
820 | size -= w; | ||
821 | next += w; | ||
822 | |||
823 | w = scnprintf(next, size, | ||
824 | "hp waiter: %s/%d (inh: %s/%d)\n", | ||
825 | (mutex->hp_waiter) ? | ||
826 | mutex->hp_waiter->comm : "nil", | ||
827 | (mutex->hp_waiter) ? | ||
828 | mutex->hp_waiter->pid : -1, | ||
829 | (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ? | ||
830 | tsk_rt(mutex->hp_waiter)->inh_task->comm : "nil", | ||
831 | (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ? | ||
832 | tsk_rt(mutex->hp_waiter)->inh_task->pid : -1); | ||
833 | size -= w; | ||
834 | next += w; | ||
835 | |||
836 | w = scnprintf(next, size, "\nblocked tasks, front to back:\n"); | ||
837 | size -= w; | ||
838 | next += w; | ||
839 | |||
840 | if (waitqueue_active(&mutex->wait)) { | ||
841 | wait_queue_t *q; | ||
842 | struct list_head *pos; | ||
843 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
844 | dgl_wait_state_t *dgl_wait = NULL; | ||
845 | #endif | ||
846 | list_for_each(pos, &mutex->wait.task_list) { | ||
847 | struct task_struct *blocked_task; | ||
848 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
849 | int enabled = 1; | ||
850 | #endif | ||
851 | q = list_entry(pos, wait_queue_t, task_list); | ||
852 | |||
853 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
854 | if(q->func == dgl_wake_up) { | ||
855 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
856 | blocked_task = dgl_wait->task; | ||
857 | |||
858 | if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock) | ||
859 | enabled = 0; | ||
860 | } | ||
861 | else { | ||
862 | blocked_task = (struct task_struct*) q->private; | ||
863 | } | ||
864 | #else | ||
865 | blocked_task = (struct task_struct*) q->private; | ||
866 | #endif | ||
867 | |||
868 | w = scnprintf(next, size, | ||
869 | "\t%s/%d (inh: %s/%d)" | ||
870 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
871 | " DGL enabled: %d" | ||
872 | #endif | ||
873 | "\n", | ||
874 | blocked_task->comm, blocked_task->pid, | ||
875 | (tsk_rt(blocked_task)->inh_task) ? | ||
876 | tsk_rt(blocked_task)->inh_task->comm : "nil", | ||
877 | (tsk_rt(blocked_task)->inh_task) ? | ||
878 | tsk_rt(blocked_task)->inh_task->pid : -1 | ||
879 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
880 | , enabled | ||
881 | #endif | ||
882 | ); | ||
883 | size -= w; | ||
884 | next += w; | ||
885 | } | ||
886 | } | ||
887 | else { | ||
888 | w = scnprintf(next, size, "\t<NONE>\n"); | ||
889 | size -= w; | ||
890 | next += w; | ||
891 | } | ||
892 | |||
893 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
894 | } | ||
895 | else { | ||
896 | w = scnprintf(next, size, "%s is busy.\n", mutex->litmus_lock.name); | ||
897 | size -= w; | ||
898 | next += w; | ||
899 | } | ||
900 | |||
901 | return count - size; | ||
902 | } | ||
903 | |||
904 | static void rsm_proc_add(struct litmus_lock* l) | ||
905 | { | ||
906 | snprintf(l->name, LOCK_NAME_LEN, "rsm-%d", l->ident); | ||
907 | |||
908 | l->proc_entry = litmus_add_proc_lock(l, rsm_proc_print); | ||
909 | } | ||
910 | |||
911 | static void rsm_proc_remove(struct litmus_lock* l) | ||
912 | { | ||
913 | litmus_remove_proc_lock(l); | ||
914 | } | ||
915 | |||
916 | static struct litmus_lock_proc_ops rsm_proc_ops = | ||
917 | { | ||
918 | .add = rsm_proc_add, | ||
919 | .remove = rsm_proc_remove | ||
920 | }; | ||
921 | |||
922 | |||
768 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) | 923 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) |
769 | { | 924 | { |
770 | struct rsm_mutex* mutex; | 925 | struct rsm_mutex* mutex; |
@@ -772,6 +927,7 @@ struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) | |||
772 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); | 927 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); |
773 | if (!mutex) | 928 | if (!mutex) |
774 | return NULL; | 929 | return NULL; |
930 | memset(mutex, 0, sizeof(*mutex)); | ||
775 | 931 | ||
776 | mutex->litmus_lock.ops = ops; | 932 | mutex->litmus_lock.ops = ops; |
777 | mutex->owner = NULL; | 933 | mutex->owner = NULL; |
@@ -791,6 +947,8 @@ struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) | |||
791 | 947 | ||
792 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; | 948 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; |
793 | 949 | ||
950 | ((struct litmus_lock*)mutex)->proc = &rsm_proc_ops; | ||
951 | |||
794 | return &mutex->litmus_lock; | 952 | return &mutex->litmus_lock; |
795 | } | 953 | } |
796 | 954 | ||