diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-06-04 22:45:57 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-06-04 22:45:57 -0400 |
commit | 091c4dab6d83da85cb26eb13110589cd72374c14 (patch) | |
tree | adb2af4f486696e2fc89bbc99bba1848d77ef714 | |
parent | 5cc1791a31bed5268f27062e3d373a353b477017 (diff) |
Replace rwlock_t in sched_trace.c
In PreemptRT rwlock_t can sleep and it's not irq safe.
To grant proper creation and deletion of the kfifo buffer the vanilla
rwlock_t is substituted for a raw_spinlock and two atomic_t variables.
The spinlock only protects the atomic variables, so the kernel can use
blocking variant of various lock in the kfifo implementation and in the
memory allocation.
-rw-r--r-- | litmus/sched_trace.c | 66 |
1 files changed, 50 insertions, 16 deletions
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c index f3943c16738d..8a9a7ddc4870 100644 --- a/litmus/sched_trace.c +++ b/litmus/sched_trace.c | |||
@@ -28,13 +28,18 @@ | |||
28 | /* Max length for one write --- from kernel --- to the buffer */ | 28 | /* Max length for one write --- from kernel --- to the buffer */ |
29 | #define MSG_SIZE 255 | 29 | #define MSG_SIZE 255 |
30 | 30 | ||
31 | /* Inner ring buffer structure */ | 31 | /* Inner ring buffer structure |
32 | * | ||
33 | * In PreemptRT we easily replace a rwlock_t (which may block here) with | ||
34 | * a raw_spinlock_t and two atomic_t | ||
35 | */ | ||
32 | typedef struct { | 36 | typedef struct { |
33 | /* in PREEMPT-RT rwlock_t can sleep, instead we need to access the | 37 | /* only used to protect the atomic ops */ |
34 | * buffer from atomic contexts. Changing rwlock_t in raw_spinlock_t | ||
35 | * adds an extra serialization, but it's probably unavoidable | ||
36 | */ | ||
37 | raw_spinlock_t del_lock; | 38 | raw_spinlock_t del_lock; |
39 | /* is the buffer initialized? */ | ||
40 | atomic_t init; | ||
41 | /* is the buffer in use? */ | ||
42 | atomic_t in_use; | ||
38 | 43 | ||
39 | /* the buffer */ | 44 | /* the buffer */ |
40 | struct kfifo kfifo; | 45 | struct kfifo kfifo; |
@@ -53,6 +58,8 @@ typedef struct { | |||
53 | */ | 58 | */ |
54 | void rb_init(ring_buffer_t* buf) | 59 | void rb_init(ring_buffer_t* buf) |
55 | { | 60 | { |
61 | atomic_set(&buf->init, 0); | ||
62 | atomic_set(&buf->in_use, 0); | ||
56 | raw_spin_lock_init(&buf->del_lock); | 63 | raw_spin_lock_init(&buf->del_lock); |
57 | } | 64 | } |
58 | 65 | ||
@@ -61,17 +68,21 @@ int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) | |||
61 | unsigned long flags; | 68 | unsigned long flags; |
62 | int ret = 0; | 69 | int ret = 0; |
63 | 70 | ||
64 | raw_spin_lock_irqsave(&buf->del_lock, flags); | ||
65 | |||
66 | /* kfifo size must be a power of 2 | 71 | /* kfifo size must be a power of 2 |
67 | * atm kfifo alloc is automatically rounding the size | 72 | * atm kfifo alloc is automatically rounding the size |
68 | */ | 73 | */ |
69 | ret = kfifo_alloc(&buf->kfifo, size, GFP_ATOMIC); | 74 | ret = kfifo_alloc(&buf->kfifo, size, GFP_ATOMIC); |
70 | 75 | ||
71 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | 76 | if(ret < 0) { |
72 | 77 | ||
73 | if(ret < 0) | ||
74 | printk(KERN_ERR "kfifo_alloc failed\n"); | 78 | printk(KERN_ERR "kfifo_alloc failed\n"); |
79 | return ret; | ||
80 | } | ||
81 | |||
82 | /* now the buffer is allocated, others can access */ | ||
83 | raw_spin_lock_irqsave(&buf->del_lock, flags); | ||
84 | atomic_set(&buf->init, 1); | ||
85 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
75 | 86 | ||
76 | return ret; | 87 | return ret; |
77 | } | 88 | } |
@@ -80,20 +91,22 @@ int rb_free_buf(ring_buffer_t* buf) | |||
80 | { | 91 | { |
81 | unsigned long flags; | 92 | unsigned long flags; |
82 | 93 | ||
94 | /* be sure to synchronize with put / get */ | ||
83 | raw_spin_lock_irqsave(&buf->del_lock, flags); | 95 | raw_spin_lock_irqsave(&buf->del_lock, flags); |
96 | atomic_set(&buf->init, 0); | ||
97 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
98 | |||
99 | /* if someone started to use the buffer wait it ends */ | ||
100 | while(atomic_read(&buf->in_use)) | ||
101 | cpu_relax(); | ||
84 | 102 | ||
85 | BUG_ON(!kfifo_initialized(&buf->kfifo)); | 103 | BUG_ON(!kfifo_initialized(&buf->kfifo)); |
86 | kfifo_free(&buf->kfifo); | 104 | kfifo_free(&buf->kfifo); |
87 | 105 | ||
88 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
89 | |||
90 | return 0; | 106 | return 0; |
91 | } | 107 | } |
92 | 108 | ||
93 | /* | 109 | /* |
94 | * In preempt-rt, changing the rwlock_t in a raw_spinlock_t has the side | ||
95 | * effect to serialize writings. | ||
96 | * | ||
97 | * Will only succeed if there is enough space for all len bytes. | 110 | * Will only succeed if there is enough space for all len bytes. |
98 | */ | 111 | */ |
99 | int rb_put(ring_buffer_t* buf, char* mem, size_t len) | 112 | int rb_put(ring_buffer_t* buf, char* mem, size_t len) |
@@ -101,8 +114,18 @@ int rb_put(ring_buffer_t* buf, char* mem, size_t len) | |||
101 | unsigned long flags; | 114 | unsigned long flags; |
102 | int error = 0; | 115 | int error = 0; |
103 | 116 | ||
117 | /* be sure we see a free */ | ||
104 | raw_spin_lock_irqsave(&buf->del_lock, flags); | 118 | raw_spin_lock_irqsave(&buf->del_lock, flags); |
105 | 119 | ||
120 | /* if not init'ed yet, quit */ | ||
121 | if (!atomic_read(&buf->init)) { | ||
122 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
123 | goto out; | ||
124 | } | ||
125 | |||
126 | atomic_inc(&buf->in_use); | ||
127 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
128 | |||
106 | if (!kfifo_initialized(&buf->kfifo)) { | 129 | if (!kfifo_initialized(&buf->kfifo)) { |
107 | error = -ENODEV; | 130 | error = -ENODEV; |
108 | goto out; | 131 | goto out; |
@@ -113,8 +136,8 @@ int rb_put(ring_buffer_t* buf, char* mem, size_t len) | |||
113 | goto out; | 136 | goto out; |
114 | } | 137 | } |
115 | 138 | ||
139 | atomic_dec(&buf->in_use); | ||
116 | out: | 140 | out: |
117 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
118 | return error; | 141 | return error; |
119 | } | 142 | } |
120 | 143 | ||
@@ -127,7 +150,18 @@ int rb_get(ring_buffer_t* buf, char* mem, size_t len) | |||
127 | unsigned long flags; | 150 | unsigned long flags; |
128 | int error = 0; | 151 | int error = 0; |
129 | 152 | ||
153 | /* be sure we see a free */ | ||
130 | raw_spin_lock_irqsave(&buf->del_lock, flags); | 154 | raw_spin_lock_irqsave(&buf->del_lock, flags); |
155 | |||
156 | /* if not init'ed yet, quit */ | ||
157 | if (!atomic_read(&buf->init)) { | ||
158 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | atomic_inc(&buf->in_use); | ||
163 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
164 | |||
131 | if (!kfifo_initialized(&buf->kfifo)) { | 165 | if (!kfifo_initialized(&buf->kfifo)) { |
132 | error = -ENODEV; | 166 | error = -ENODEV; |
133 | goto out; | 167 | goto out; |
@@ -135,8 +169,8 @@ int rb_get(ring_buffer_t* buf, char* mem, size_t len) | |||
135 | 169 | ||
136 | error = kfifo_out(&buf->kfifo, (unsigned char*)mem, len); | 170 | error = kfifo_out(&buf->kfifo, (unsigned char*)mem, len); |
137 | 171 | ||
172 | atomic_dec(&buf->in_use); | ||
138 | out: | 173 | out: |
139 | raw_spin_unlock_irqrestore(&buf->del_lock, flags); | ||
140 | return error; | 174 | return error; |
141 | } | 175 | } |
142 | 176 | ||