diff options
Diffstat (limited to 'drivers/uwb/rsv.c')
-rw-r--r-- | drivers/uwb/rsv.c | 565 |
1 files changed, 437 insertions, 128 deletions
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c index bae16204576d..ec6eecb32f30 100644 --- a/drivers/uwb/rsv.c +++ b/drivers/uwb/rsv.c | |||
@@ -15,23 +15,33 @@ | |||
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/uwb.h> | 19 | #include <linux/uwb.h> |
20 | #include <linux/random.h> | ||
21 | 21 | ||
22 | #include "uwb-internal.h" | 22 | #include "uwb-internal.h" |
23 | 23 | ||
24 | static void uwb_rsv_timer(unsigned long arg); | 24 | static void uwb_rsv_timer(unsigned long arg); |
25 | 25 | ||
26 | static const char *rsv_states[] = { | 26 | static const char *rsv_states[] = { |
27 | [UWB_RSV_STATE_NONE] = "none", | 27 | [UWB_RSV_STATE_NONE] = "none ", |
28 | [UWB_RSV_STATE_O_INITIATED] = "initiated", | 28 | [UWB_RSV_STATE_O_INITIATED] = "o initiated ", |
29 | [UWB_RSV_STATE_O_PENDING] = "pending", | 29 | [UWB_RSV_STATE_O_PENDING] = "o pending ", |
30 | [UWB_RSV_STATE_O_MODIFIED] = "modified", | 30 | [UWB_RSV_STATE_O_MODIFIED] = "o modified ", |
31 | [UWB_RSV_STATE_O_ESTABLISHED] = "established", | 31 | [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", |
32 | [UWB_RSV_STATE_T_ACCEPTED] = "accepted", | 32 | [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", |
33 | [UWB_RSV_STATE_T_DENIED] = "denied", | 33 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", |
34 | [UWB_RSV_STATE_T_PENDING] = "pending", | 34 | [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", |
35 | [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", | ||
36 | [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", | ||
37 | [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", | ||
38 | [UWB_RSV_STATE_T_PENDING] = "t pending ", | ||
39 | [UWB_RSV_STATE_T_DENIED] = "t denied ", | ||
40 | [UWB_RSV_STATE_T_RESIZED] = "t resized ", | ||
41 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", | ||
42 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", | ||
43 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", | ||
44 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", | ||
35 | }; | 45 | }; |
36 | 46 | ||
37 | static const char *rsv_types[] = { | 47 | static const char *rsv_types[] = { |
@@ -42,6 +52,31 @@ static const char *rsv_types[] = { | |||
42 | [UWB_DRP_TYPE_PCA] = "pca", | 52 | [UWB_DRP_TYPE_PCA] = "pca", |
43 | }; | 53 | }; |
44 | 54 | ||
55 | bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) | ||
56 | { | ||
57 | static const bool has_two_drp_ies[] = { | ||
58 | [UWB_RSV_STATE_O_INITIATED] = false, | ||
59 | [UWB_RSV_STATE_O_PENDING] = false, | ||
60 | [UWB_RSV_STATE_O_MODIFIED] = false, | ||
61 | [UWB_RSV_STATE_O_ESTABLISHED] = false, | ||
62 | [UWB_RSV_STATE_O_TO_BE_MOVED] = false, | ||
63 | [UWB_RSV_STATE_O_MOVE_COMBINING] = false, | ||
64 | [UWB_RSV_STATE_O_MOVE_REDUCING] = false, | ||
65 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, | ||
66 | [UWB_RSV_STATE_T_ACCEPTED] = false, | ||
67 | [UWB_RSV_STATE_T_CONFLICT] = false, | ||
68 | [UWB_RSV_STATE_T_PENDING] = false, | ||
69 | [UWB_RSV_STATE_T_DENIED] = false, | ||
70 | [UWB_RSV_STATE_T_RESIZED] = false, | ||
71 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, | ||
72 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, | ||
73 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, | ||
74 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, | ||
75 | }; | ||
76 | |||
77 | return has_two_drp_ies[rsv->state]; | ||
78 | } | ||
79 | |||
45 | /** | 80 | /** |
46 | * uwb_rsv_state_str - return a string for a reservation state | 81 | * uwb_rsv_state_str - return a string for a reservation state |
47 | * @state: the reservation state. | 82 | * @state: the reservation state. |
@@ -66,7 +101,7 @@ const char *uwb_rsv_type_str(enum uwb_drp_type type) | |||
66 | } | 101 | } |
67 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); | 102 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); |
68 | 103 | ||
69 | static void uwb_rsv_dump(struct uwb_rsv *rsv) | 104 | void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) |
70 | { | 105 | { |
71 | struct device *dev = &rsv->rc->uwb_dev.dev; | 106 | struct device *dev = &rsv->rc->uwb_dev.dev; |
72 | struct uwb_dev_addr devaddr; | 107 | struct uwb_dev_addr devaddr; |
@@ -82,6 +117,23 @@ static void uwb_rsv_dump(struct uwb_rsv *rsv) | |||
82 | dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); | 117 | dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); |
83 | } | 118 | } |
84 | 119 | ||
120 | static void uwb_rsv_release(struct kref *kref) | ||
121 | { | ||
122 | struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); | ||
123 | |||
124 | kfree(rsv); | ||
125 | } | ||
126 | |||
127 | void uwb_rsv_get(struct uwb_rsv *rsv) | ||
128 | { | ||
129 | kref_get(&rsv->kref); | ||
130 | } | ||
131 | |||
132 | void uwb_rsv_put(struct uwb_rsv *rsv) | ||
133 | { | ||
134 | kref_put(&rsv->kref, uwb_rsv_release); | ||
135 | } | ||
136 | |||
85 | /* | 137 | /* |
86 | * Get a free stream index for a reservation. | 138 | * Get a free stream index for a reservation. |
87 | * | 139 | * |
@@ -92,6 +144,7 @@ static void uwb_rsv_dump(struct uwb_rsv *rsv) | |||
92 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | 144 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) |
93 | { | 145 | { |
94 | struct uwb_rc *rc = rsv->rc; | 146 | struct uwb_rc *rc = rsv->rc; |
147 | struct device *dev = &rc->uwb_dev.dev; | ||
95 | unsigned long *streams_bm; | 148 | unsigned long *streams_bm; |
96 | int stream; | 149 | int stream; |
97 | 150 | ||
@@ -113,12 +166,15 @@ static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | |||
113 | rsv->stream = stream; | 166 | rsv->stream = stream; |
114 | set_bit(stream, streams_bm); | 167 | set_bit(stream, streams_bm); |
115 | 168 | ||
169 | dev_dbg(dev, "get stream %d\n", rsv->stream); | ||
170 | |||
116 | return 0; | 171 | return 0; |
117 | } | 172 | } |
118 | 173 | ||
119 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | 174 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) |
120 | { | 175 | { |
121 | struct uwb_rc *rc = rsv->rc; | 176 | struct uwb_rc *rc = rsv->rc; |
177 | struct device *dev = &rc->uwb_dev.dev; | ||
122 | unsigned long *streams_bm; | 178 | unsigned long *streams_bm; |
123 | 179 | ||
124 | switch (rsv->target.type) { | 180 | switch (rsv->target.type) { |
@@ -133,86 +189,52 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | |||
133 | } | 189 | } |
134 | 190 | ||
135 | clear_bit(rsv->stream, streams_bm); | 191 | clear_bit(rsv->stream, streams_bm); |
192 | |||
193 | dev_dbg(dev, "put stream %d\n", rsv->stream); | ||
136 | } | 194 | } |
137 | 195 | ||
138 | /* | 196 | void uwb_rsv_backoff_win_timer(unsigned long arg) |
139 | * Generate a MAS allocation with a single row component. | ||
140 | */ | ||
141 | static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, | ||
142 | int first_mas, int mas_per_zone, | ||
143 | int zs, int ze) | ||
144 | { | 197 | { |
145 | struct uwb_mas_bm col; | 198 | struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; |
146 | int z; | 199 | struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); |
147 | 200 | struct device *dev = &rc->uwb_dev.dev; | |
148 | bitmap_zero(mas->bm, UWB_NUM_MAS); | 201 | |
149 | bitmap_zero(col.bm, UWB_NUM_MAS); | 202 | bow->can_reserve_extra_mases = true; |
150 | bitmap_fill(col.bm, mas_per_zone); | 203 | if (bow->total_expired <= 4) { |
151 | bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 204 | bow->total_expired++; |
152 | 205 | } else { | |
153 | for (z = zs; z <= ze; z++) { | 206 | /* after 4 backoff window has expired we can exit from |
154 | bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); | 207 | * the backoff procedure */ |
155 | bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 208 | bow->total_expired = 0; |
209 | bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; | ||
156 | } | 210 | } |
211 | dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); | ||
212 | |||
213 | /* try to relocate all the "to be moved" relocations */ | ||
214 | uwb_rsv_handle_drp_avail_change(rc); | ||
157 | } | 215 | } |
158 | 216 | ||
159 | /* | 217 | void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) |
160 | * Allocate some MAS for this reservation based on current local | ||
161 | * availability, the reservation parameters (max_mas, min_mas, | ||
162 | * sparsity), and the WiMedia rules for MAS allocations. | ||
163 | * | ||
164 | * Returns -EBUSY is insufficient free MAS are available. | ||
165 | * | ||
166 | * FIXME: to simplify this, only safe reservations with a single row | ||
167 | * component in zones 1 to 15 are tried (zone 0 is skipped to avoid | ||
168 | * problems with the MAS reserved for the BP). | ||
169 | * | ||
170 | * [ECMA-368] section B.2. | ||
171 | */ | ||
172 | static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) | ||
173 | { | 218 | { |
174 | static const int safe_mas_in_row[UWB_NUM_ZONES] = { | 219 | struct uwb_drp_backoff_win *bow = &rc->bow; |
175 | 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, | 220 | struct device *dev = &rc->uwb_dev.dev; |
176 | }; | 221 | unsigned timeout_us; |
177 | int n, r; | ||
178 | struct uwb_mas_bm mas; | ||
179 | bool found = false; | ||
180 | 222 | ||
181 | /* | 223 | dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); |
182 | * Search all valid safe allocations until either: too few MAS | ||
183 | * are available; or the smallest allocation with sufficient | ||
184 | * MAS is found. | ||
185 | * | ||
186 | * The top of the zones are preferred, so space for larger | ||
187 | * allocations is available in the bottom of the zone (e.g., a | ||
188 | * 15 MAS allocation should start in row 14 leaving space for | ||
189 | * a 120 MAS allocation at row 0). | ||
190 | */ | ||
191 | for (n = safe_mas_in_row[0]; n >= 1; n--) { | ||
192 | int num_mas; | ||
193 | 224 | ||
194 | num_mas = n * (UWB_NUM_ZONES - 1); | 225 | bow->can_reserve_extra_mases = false; |
195 | if (num_mas < rsv->min_mas) | ||
196 | break; | ||
197 | if (found && num_mas < rsv->max_mas) | ||
198 | break; | ||
199 | 226 | ||
200 | for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { | 227 | if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) |
201 | if (safe_mas_in_row[r] < n) | 228 | return; |
202 | continue; | ||
203 | uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); | ||
204 | if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { | ||
205 | found = true; | ||
206 | break; | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | 229 | ||
211 | if (!found) | 230 | bow->window <<= 1; |
212 | return -EBUSY; | 231 | bow->n = random32() & (bow->window - 1); |
232 | dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); | ||
213 | 233 | ||
214 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | 234 | /* reset the timer associated variables */ |
215 | return 0; | 235 | timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; |
236 | bow->total_expired = 0; | ||
237 | mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
216 | } | 238 | } |
217 | 239 | ||
218 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | 240 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) |
@@ -225,13 +247,16 @@ static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | |||
225 | * received. | 247 | * received. |
226 | */ | 248 | */ |
227 | if (rsv->is_multicast) { | 249 | if (rsv->is_multicast) { |
228 | if (rsv->state == UWB_RSV_STATE_O_INITIATED) | 250 | if (rsv->state == UWB_RSV_STATE_O_INITIATED |
251 | || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING | ||
252 | || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING | ||
253 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) | ||
229 | sframes = 1; | 254 | sframes = 1; |
230 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) | 255 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) |
231 | sframes = 0; | 256 | sframes = 0; |
257 | |||
232 | } | 258 | } |
233 | 259 | ||
234 | rsv->expired = false; | ||
235 | if (sframes > 0) { | 260 | if (sframes > 0) { |
236 | /* | 261 | /* |
237 | * Add an additional 2 superframes to account for the | 262 | * Add an additional 2 superframes to account for the |
@@ -253,7 +278,7 @@ static void uwb_rsv_state_update(struct uwb_rsv *rsv, | |||
253 | rsv->state = new_state; | 278 | rsv->state = new_state; |
254 | rsv->ie_valid = false; | 279 | rsv->ie_valid = false; |
255 | 280 | ||
256 | uwb_rsv_dump(rsv); | 281 | uwb_rsv_dump("SU", rsv); |
257 | 282 | ||
258 | uwb_rsv_stroke_timer(rsv); | 283 | uwb_rsv_stroke_timer(rsv); |
259 | uwb_rsv_sched_update(rsv->rc); | 284 | uwb_rsv_sched_update(rsv->rc); |
@@ -267,10 +292,17 @@ static void uwb_rsv_callback(struct uwb_rsv *rsv) | |||
267 | 292 | ||
268 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | 293 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) |
269 | { | 294 | { |
295 | struct uwb_rsv_move *mv = &rsv->mv; | ||
296 | |||
270 | if (rsv->state == new_state) { | 297 | if (rsv->state == new_state) { |
271 | switch (rsv->state) { | 298 | switch (rsv->state) { |
272 | case UWB_RSV_STATE_O_ESTABLISHED: | 299 | case UWB_RSV_STATE_O_ESTABLISHED: |
300 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
301 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
302 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
273 | case UWB_RSV_STATE_T_ACCEPTED: | 303 | case UWB_RSV_STATE_T_ACCEPTED: |
304 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
305 | case UWB_RSV_STATE_T_RESIZED: | ||
274 | case UWB_RSV_STATE_NONE: | 306 | case UWB_RSV_STATE_NONE: |
275 | uwb_rsv_stroke_timer(rsv); | 307 | uwb_rsv_stroke_timer(rsv); |
276 | break; | 308 | break; |
@@ -282,10 +314,10 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
282 | return; | 314 | return; |
283 | } | 315 | } |
284 | 316 | ||
317 | uwb_rsv_dump("SC", rsv); | ||
318 | |||
285 | switch (new_state) { | 319 | switch (new_state) { |
286 | case UWB_RSV_STATE_NONE: | 320 | case UWB_RSV_STATE_NONE: |
287 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
288 | uwb_rsv_put_stream(rsv); | ||
289 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); | 321 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); |
290 | uwb_rsv_callback(rsv); | 322 | uwb_rsv_callback(rsv); |
291 | break; | 323 | break; |
@@ -295,12 +327,45 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
295 | case UWB_RSV_STATE_O_PENDING: | 327 | case UWB_RSV_STATE_O_PENDING: |
296 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); | 328 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); |
297 | break; | 329 | break; |
330 | case UWB_RSV_STATE_O_MODIFIED: | ||
331 | /* in the companion there are the MASes to drop */ | ||
332 | bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
333 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
334 | break; | ||
298 | case UWB_RSV_STATE_O_ESTABLISHED: | 335 | case UWB_RSV_STATE_O_ESTABLISHED: |
336 | if (rsv->state == UWB_RSV_STATE_O_MODIFIED | ||
337 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { | ||
338 | uwb_drp_avail_release(rsv->rc, &mv->companion_mas); | ||
339 | rsv->needs_release_companion_mas = false; | ||
340 | } | ||
299 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 341 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
300 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 342 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); |
301 | uwb_rsv_callback(rsv); | 343 | uwb_rsv_callback(rsv); |
302 | break; | 344 | break; |
345 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
346 | rsv->needs_release_companion_mas = true; | ||
347 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
348 | break; | ||
349 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
350 | rsv->needs_release_companion_mas = false; | ||
351 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
352 | bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
353 | rsv->mas.safe += mv->companion_mas.safe; | ||
354 | rsv->mas.unsafe += mv->companion_mas.unsafe; | ||
355 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
356 | break; | ||
357 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
358 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
359 | rsv->needs_release_companion_mas = true; | ||
360 | rsv->mas.safe = mv->final_mas.safe; | ||
361 | rsv->mas.unsafe = mv->final_mas.unsafe; | ||
362 | bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
363 | bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); | ||
364 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
365 | break; | ||
303 | case UWB_RSV_STATE_T_ACCEPTED: | 366 | case UWB_RSV_STATE_T_ACCEPTED: |
367 | case UWB_RSV_STATE_T_RESIZED: | ||
368 | rsv->needs_release_companion_mas = false; | ||
304 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 369 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
305 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); | 370 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); |
306 | uwb_rsv_callback(rsv); | 371 | uwb_rsv_callback(rsv); |
@@ -308,12 +373,82 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
308 | case UWB_RSV_STATE_T_DENIED: | 373 | case UWB_RSV_STATE_T_DENIED: |
309 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); | 374 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); |
310 | break; | 375 | break; |
376 | case UWB_RSV_STATE_T_CONFLICT: | ||
377 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
378 | break; | ||
379 | case UWB_RSV_STATE_T_PENDING: | ||
380 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); | ||
381 | break; | ||
382 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
383 | rsv->needs_release_companion_mas = true; | ||
384 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
385 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); | ||
386 | break; | ||
311 | default: | 387 | default: |
312 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", | 388 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", |
313 | uwb_rsv_state_str(new_state), new_state); | 389 | uwb_rsv_state_str(new_state), new_state); |
314 | } | 390 | } |
315 | } | 391 | } |
316 | 392 | ||
393 | static void uwb_rsv_handle_timeout_work(struct work_struct *work) | ||
394 | { | ||
395 | struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, | ||
396 | handle_timeout_work); | ||
397 | struct uwb_rc *rc = rsv->rc; | ||
398 | |||
399 | mutex_lock(&rc->rsvs_mutex); | ||
400 | |||
401 | uwb_rsv_dump("TO", rsv); | ||
402 | |||
403 | switch (rsv->state) { | ||
404 | case UWB_RSV_STATE_O_INITIATED: | ||
405 | if (rsv->is_multicast) { | ||
406 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
407 | goto unlock; | ||
408 | } | ||
409 | break; | ||
410 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
411 | if (rsv->is_multicast) { | ||
412 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
413 | goto unlock; | ||
414 | } | ||
415 | break; | ||
416 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
417 | if (rsv->is_multicast) { | ||
418 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
419 | goto unlock; | ||
420 | } | ||
421 | break; | ||
422 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
423 | if (rsv->is_multicast) { | ||
424 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
425 | goto unlock; | ||
426 | } | ||
427 | break; | ||
428 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
429 | if (rsv->is_multicast) | ||
430 | goto unlock; | ||
431 | break; | ||
432 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
433 | /* | ||
434 | * The time out could be for the main or of the | ||
435 | * companion DRP, assume it's for the companion and | ||
436 | * drop that first. A further time out is required to | ||
437 | * drop the main. | ||
438 | */ | ||
439 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
440 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
441 | goto unlock; | ||
442 | default: | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | uwb_rsv_remove(rsv); | ||
447 | |||
448 | unlock: | ||
449 | mutex_unlock(&rc->rsvs_mutex); | ||
450 | } | ||
451 | |||
317 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | 452 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) |
318 | { | 453 | { |
319 | struct uwb_rsv *rsv; | 454 | struct uwb_rsv *rsv; |
@@ -324,23 +459,17 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | |||
324 | 459 | ||
325 | INIT_LIST_HEAD(&rsv->rc_node); | 460 | INIT_LIST_HEAD(&rsv->rc_node); |
326 | INIT_LIST_HEAD(&rsv->pal_node); | 461 | INIT_LIST_HEAD(&rsv->pal_node); |
462 | kref_init(&rsv->kref); | ||
327 | init_timer(&rsv->timer); | 463 | init_timer(&rsv->timer); |
328 | rsv->timer.function = uwb_rsv_timer; | 464 | rsv->timer.function = uwb_rsv_timer; |
329 | rsv->timer.data = (unsigned long)rsv; | 465 | rsv->timer.data = (unsigned long)rsv; |
330 | 466 | ||
331 | rsv->rc = rc; | 467 | rsv->rc = rc; |
468 | INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); | ||
332 | 469 | ||
333 | return rsv; | 470 | return rsv; |
334 | } | 471 | } |
335 | 472 | ||
336 | static void uwb_rsv_free(struct uwb_rsv *rsv) | ||
337 | { | ||
338 | uwb_dev_put(rsv->owner); | ||
339 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
340 | uwb_dev_put(rsv->target.dev); | ||
341 | kfree(rsv); | ||
342 | } | ||
343 | |||
344 | /** | 473 | /** |
345 | * uwb_rsv_create - allocate and initialize a UWB reservation structure | 474 | * uwb_rsv_create - allocate and initialize a UWB reservation structure |
346 | * @rc: the radio controller | 475 | * @rc: the radio controller |
@@ -371,26 +500,36 @@ EXPORT_SYMBOL_GPL(uwb_rsv_create); | |||
371 | 500 | ||
372 | void uwb_rsv_remove(struct uwb_rsv *rsv) | 501 | void uwb_rsv_remove(struct uwb_rsv *rsv) |
373 | { | 502 | { |
503 | uwb_rsv_dump("RM", rsv); | ||
504 | |||
374 | if (rsv->state != UWB_RSV_STATE_NONE) | 505 | if (rsv->state != UWB_RSV_STATE_NONE) |
375 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 506 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
507 | |||
508 | if (rsv->needs_release_companion_mas) | ||
509 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
510 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
511 | |||
512 | if (uwb_rsv_is_owner(rsv)) | ||
513 | uwb_rsv_put_stream(rsv); | ||
514 | |||
376 | del_timer_sync(&rsv->timer); | 515 | del_timer_sync(&rsv->timer); |
377 | list_del(&rsv->rc_node); | 516 | uwb_dev_put(rsv->owner); |
378 | uwb_rsv_free(rsv); | 517 | if (rsv->target.type == UWB_RSV_TARGET_DEV) |
518 | uwb_dev_put(rsv->target.dev); | ||
519 | |||
520 | list_del_init(&rsv->rc_node); | ||
521 | uwb_rsv_put(rsv); | ||
379 | } | 522 | } |
380 | 523 | ||
381 | /** | 524 | /** |
382 | * uwb_rsv_destroy - free a UWB reservation structure | 525 | * uwb_rsv_destroy - free a UWB reservation structure |
383 | * @rsv: the reservation to free | 526 | * @rsv: the reservation to free |
384 | * | 527 | * |
385 | * The reservation will be terminated if it is pending or established. | 528 | * The reservation must already be terminated. |
386 | */ | 529 | */ |
387 | void uwb_rsv_destroy(struct uwb_rsv *rsv) | 530 | void uwb_rsv_destroy(struct uwb_rsv *rsv) |
388 | { | 531 | { |
389 | struct uwb_rc *rc = rsv->rc; | 532 | uwb_rsv_put(rsv); |
390 | |||
391 | mutex_lock(&rc->rsvs_mutex); | ||
392 | uwb_rsv_remove(rsv); | ||
393 | mutex_unlock(&rc->rsvs_mutex); | ||
394 | } | 533 | } |
395 | EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | 534 | EXPORT_SYMBOL_GPL(uwb_rsv_destroy); |
396 | 535 | ||
@@ -399,7 +538,7 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
399 | * @rsv: the reservation | 538 | * @rsv: the reservation |
400 | * | 539 | * |
401 | * The PAL should fill in @rsv's owner, target, type, max_mas, | 540 | * The PAL should fill in @rsv's owner, target, type, max_mas, |
402 | * min_mas, sparsity and is_multicast fields. If the target is a | 541 | * min_mas, max_interval and is_multicast fields. If the target is a |
403 | * uwb_dev it must be referenced. | 542 | * uwb_dev it must be referenced. |
404 | * | 543 | * |
405 | * The reservation's callback will be called when the reservation is | 544 | * The reservation's callback will be called when the reservation is |
@@ -408,20 +547,32 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
408 | int uwb_rsv_establish(struct uwb_rsv *rsv) | 547 | int uwb_rsv_establish(struct uwb_rsv *rsv) |
409 | { | 548 | { |
410 | struct uwb_rc *rc = rsv->rc; | 549 | struct uwb_rc *rc = rsv->rc; |
550 | struct uwb_mas_bm available; | ||
411 | int ret; | 551 | int ret; |
412 | 552 | ||
413 | mutex_lock(&rc->rsvs_mutex); | 553 | mutex_lock(&rc->rsvs_mutex); |
414 | |||
415 | ret = uwb_rsv_get_stream(rsv); | 554 | ret = uwb_rsv_get_stream(rsv); |
416 | if (ret) | 555 | if (ret) |
417 | goto out; | 556 | goto out; |
418 | 557 | ||
419 | ret = uwb_rsv_alloc_mas(rsv); | 558 | rsv->tiebreaker = random32() & 1; |
420 | if (ret) { | 559 | /* get available mas bitmap */ |
560 | uwb_drp_available(rc, &available); | ||
561 | |||
562 | ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); | ||
563 | if (ret == UWB_RSV_ALLOC_NOT_FOUND) { | ||
564 | ret = -EBUSY; | ||
565 | uwb_rsv_put_stream(rsv); | ||
566 | goto out; | ||
567 | } | ||
568 | |||
569 | ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); | ||
570 | if (ret != 0) { | ||
421 | uwb_rsv_put_stream(rsv); | 571 | uwb_rsv_put_stream(rsv); |
422 | goto out; | 572 | goto out; |
423 | } | 573 | } |
424 | 574 | ||
575 | uwb_rsv_get(rsv); | ||
425 | list_add_tail(&rsv->rc_node, &rc->reservations); | 576 | list_add_tail(&rsv->rc_node, &rc->reservations); |
426 | rsv->owner = &rc->uwb_dev; | 577 | rsv->owner = &rc->uwb_dev; |
427 | uwb_dev_get(rsv->owner); | 578 | uwb_dev_get(rsv->owner); |
@@ -437,16 +588,71 @@ EXPORT_SYMBOL_GPL(uwb_rsv_establish); | |||
437 | * @rsv: the reservation to modify | 588 | * @rsv: the reservation to modify |
438 | * @max_mas: new maximum MAS to reserve | 589 | * @max_mas: new maximum MAS to reserve |
439 | * @min_mas: new minimum MAS to reserve | 590 | * @min_mas: new minimum MAS to reserve |
440 | * @sparsity: new sparsity to use | 591 | * @max_interval: new max_interval to use |
441 | * | 592 | * |
442 | * FIXME: implement this once there are PALs that use it. | 593 | * FIXME: implement this once there are PALs that use it. |
443 | */ | 594 | */ |
444 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) | 595 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) |
445 | { | 596 | { |
446 | return -ENOSYS; | 597 | return -ENOSYS; |
447 | } | 598 | } |
448 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); | 599 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); |
449 | 600 | ||
601 | /* | ||
602 | * move an already established reservation (rc->rsvs_mutex must to be | ||
603 | * taken when tis function is called) | ||
604 | */ | ||
605 | int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) | ||
606 | { | ||
607 | struct uwb_rc *rc = rsv->rc; | ||
608 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
609 | struct device *dev = &rc->uwb_dev.dev; | ||
610 | struct uwb_rsv_move *mv; | ||
611 | int ret = 0; | ||
612 | |||
613 | if (bow->can_reserve_extra_mases == false) | ||
614 | return -EBUSY; | ||
615 | |||
616 | mv = &rsv->mv; | ||
617 | |||
618 | if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { | ||
619 | |||
620 | if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { | ||
621 | /* We want to move the reservation */ | ||
622 | bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
623 | uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); | ||
624 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
625 | } | ||
626 | } else { | ||
627 | dev_dbg(dev, "new allocation not found\n"); | ||
628 | } | ||
629 | |||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | /* It will try to move every reservation in state O_ESTABLISHED giving | ||
634 | * to the MAS allocator algorithm an availability that is the real one | ||
635 | * plus the allocation already established from the reservation. */ | ||
636 | void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) | ||
637 | { | ||
638 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
639 | struct uwb_rsv *rsv; | ||
640 | struct uwb_mas_bm mas; | ||
641 | |||
642 | if (bow->can_reserve_extra_mases == false) | ||
643 | return; | ||
644 | |||
645 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
646 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || | ||
647 | rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { | ||
648 | uwb_drp_available(rc, &mas); | ||
649 | bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
650 | uwb_rsv_try_move(rsv, &mas); | ||
651 | } | ||
652 | } | ||
653 | |||
654 | } | ||
655 | |||
450 | /** | 656 | /** |
451 | * uwb_rsv_terminate - terminate an established reservation | 657 | * uwb_rsv_terminate - terminate an established reservation |
452 | * @rsv: the reservation to terminate | 658 | * @rsv: the reservation to terminate |
@@ -463,7 +669,8 @@ void uwb_rsv_terminate(struct uwb_rsv *rsv) | |||
463 | 669 | ||
464 | mutex_lock(&rc->rsvs_mutex); | 670 | mutex_lock(&rc->rsvs_mutex); |
465 | 671 | ||
466 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 672 | if (rsv->state != UWB_RSV_STATE_NONE) |
673 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
467 | 674 | ||
468 | mutex_unlock(&rc->rsvs_mutex); | 675 | mutex_unlock(&rc->rsvs_mutex); |
469 | } | 676 | } |
@@ -477,9 +684,14 @@ EXPORT_SYMBOL_GPL(uwb_rsv_terminate); | |||
477 | * | 684 | * |
478 | * Reservation requests from peers are denied unless a PAL accepts it | 685 | * Reservation requests from peers are denied unless a PAL accepts it |
479 | * by calling this function. | 686 | * by calling this function. |
687 | * | ||
688 | * The PAL call uwb_rsv_destroy() for all accepted reservations before | ||
689 | * calling uwb_pal_unregister(). | ||
480 | */ | 690 | */ |
481 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) | 691 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) |
482 | { | 692 | { |
693 | uwb_rsv_get(rsv); | ||
694 | |||
483 | rsv->callback = cb; | 695 | rsv->callback = cb; |
484 | rsv->pal_priv = pal_priv; | 696 | rsv->pal_priv = pal_priv; |
485 | rsv->state = UWB_RSV_STATE_T_ACCEPTED; | 697 | rsv->state = UWB_RSV_STATE_T_ACCEPTED; |
@@ -530,9 +742,9 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
530 | uwb_dev_get(rsv->owner); | 742 | uwb_dev_get(rsv->owner); |
531 | rsv->target.type = UWB_RSV_TARGET_DEV; | 743 | rsv->target.type = UWB_RSV_TARGET_DEV; |
532 | rsv->target.dev = &rc->uwb_dev; | 744 | rsv->target.dev = &rc->uwb_dev; |
745 | uwb_dev_get(&rc->uwb_dev); | ||
533 | rsv->type = uwb_ie_drp_type(drp_ie); | 746 | rsv->type = uwb_ie_drp_type(drp_ie); |
534 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); | 747 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); |
535 | set_bit(rsv->stream, rsv->owner->streams); | ||
536 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); | 748 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); |
537 | 749 | ||
538 | /* | 750 | /* |
@@ -540,24 +752,46 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
540 | * deny the request. | 752 | * deny the request. |
541 | */ | 753 | */ |
542 | rsv->state = UWB_RSV_STATE_T_DENIED; | 754 | rsv->state = UWB_RSV_STATE_T_DENIED; |
543 | spin_lock(&rc->pal_lock); | 755 | mutex_lock(&rc->uwb_dev.mutex); |
544 | list_for_each_entry(pal, &rc->pals, node) { | 756 | list_for_each_entry(pal, &rc->pals, node) { |
545 | if (pal->new_rsv) | 757 | if (pal->new_rsv) |
546 | pal->new_rsv(rsv); | 758 | pal->new_rsv(pal, rsv); |
547 | if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) | 759 | if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) |
548 | break; | 760 | break; |
549 | } | 761 | } |
550 | spin_unlock(&rc->pal_lock); | 762 | mutex_unlock(&rc->uwb_dev.mutex); |
551 | 763 | ||
552 | list_add_tail(&rsv->rc_node, &rc->reservations); | 764 | list_add_tail(&rsv->rc_node, &rc->reservations); |
553 | state = rsv->state; | 765 | state = rsv->state; |
554 | rsv->state = UWB_RSV_STATE_NONE; | 766 | rsv->state = UWB_RSV_STATE_NONE; |
555 | uwb_rsv_set_state(rsv, state); | 767 | |
768 | /* FIXME: do something sensible here */ | ||
769 | if (state == UWB_RSV_STATE_T_ACCEPTED | ||
770 | && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { | ||
771 | /* FIXME: do something sensible here */ | ||
772 | } else { | ||
773 | uwb_rsv_set_state(rsv, state); | ||
774 | } | ||
556 | 775 | ||
557 | return rsv; | 776 | return rsv; |
558 | } | 777 | } |
559 | 778 | ||
560 | /** | 779 | /** |
780 | * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations | ||
781 | * @rsv: the reservation. | ||
782 | * @mas: returns the available MAS. | ||
783 | * | ||
784 | * The usable MAS of a reservation may be less than the negotiated MAS | ||
785 | * if alien BPs are present. | ||
786 | */ | ||
787 | void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) | ||
788 | { | ||
789 | bitmap_zero(mas->bm, UWB_NUM_MAS); | ||
790 | bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); | ||
791 | } | ||
792 | EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); | ||
793 | |||
794 | /** | ||
561 | * uwb_rsv_find - find a reservation for a received DRP IE. | 795 | * uwb_rsv_find - find a reservation for a received DRP IE. |
562 | * @rc: the radio controller | 796 | * @rc: the radio controller |
563 | * @src: source of the DRP IE | 797 | * @src: source of the DRP IE |
@@ -596,8 +830,6 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
596 | bool ie_updated = false; | 830 | bool ie_updated = false; |
597 | 831 | ||
598 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | 832 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { |
599 | if (rsv->expired) | ||
600 | uwb_drp_handle_timeout(rsv); | ||
601 | if (!rsv->ie_valid) { | 833 | if (!rsv->ie_valid) { |
602 | uwb_drp_ie_update(rsv); | 834 | uwb_drp_ie_update(rsv); |
603 | ie_updated = true; | 835 | ie_updated = true; |
@@ -607,9 +839,47 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
607 | return ie_updated; | 839 | return ie_updated; |
608 | } | 840 | } |
609 | 841 | ||
842 | void uwb_rsv_queue_update(struct uwb_rc *rc) | ||
843 | { | ||
844 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
845 | |||
846 | queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); | ||
847 | } | ||
848 | |||
849 | /** | ||
850 | * uwb_rsv_sched_update - schedule an update of the DRP IEs | ||
851 | * @rc: the radio controller. | ||
852 | * | ||
853 | * To improve performance and ensure correctness with [ECMA-368] the | ||
854 | * number of SET-DRP-IE commands that are done are limited. | ||
855 | * | ||
856 | * DRP IEs update come from two sources: DRP events from the hardware | ||
857 | * which all occur at the beginning of the superframe ('syncronous' | ||
858 | * events) and reservation establishment/termination requests from | ||
859 | * PALs or timers ('asynchronous' events). | ||
860 | * | ||
861 | * A delayed work ensures that all the synchronous events result in | ||
862 | * one SET-DRP-IE command. | ||
863 | * | ||
864 | * Additional logic (the set_drp_ie_pending and rsv_updated_postponed | ||
865 | * flags) will prevent an asynchrous event starting a SET-DRP-IE | ||
866 | * command if one is currently awaiting a response. | ||
867 | * | ||
868 | * FIXME: this does leave a window where an asynchrous event can delay | ||
869 | * the SET-DRP-IE for a synchronous event by one superframe. | ||
870 | */ | ||
610 | void uwb_rsv_sched_update(struct uwb_rc *rc) | 871 | void uwb_rsv_sched_update(struct uwb_rc *rc) |
611 | { | 872 | { |
612 | queue_work(rc->rsv_workq, &rc->rsv_update_work); | 873 | spin_lock(&rc->rsvs_lock); |
874 | if (!delayed_work_pending(&rc->rsv_update_work)) { | ||
875 | if (rc->set_drp_ie_pending > 0) { | ||
876 | rc->set_drp_ie_pending++; | ||
877 | goto unlock; | ||
878 | } | ||
879 | uwb_rsv_queue_update(rc); | ||
880 | } | ||
881 | unlock: | ||
882 | spin_unlock(&rc->rsvs_lock); | ||
613 | } | 883 | } |
614 | 884 | ||
615 | /* | 885 | /* |
@@ -618,7 +888,8 @@ void uwb_rsv_sched_update(struct uwb_rc *rc) | |||
618 | */ | 888 | */ |
619 | static void uwb_rsv_update_work(struct work_struct *work) | 889 | static void uwb_rsv_update_work(struct work_struct *work) |
620 | { | 890 | { |
621 | struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); | 891 | struct uwb_rc *rc = container_of(work, struct uwb_rc, |
892 | rsv_update_work.work); | ||
622 | bool ie_updated; | 893 | bool ie_updated; |
623 | 894 | ||
624 | mutex_lock(&rc->rsvs_mutex); | 895 | mutex_lock(&rc->rsvs_mutex); |
@@ -630,25 +901,71 @@ static void uwb_rsv_update_work(struct work_struct *work) | |||
630 | ie_updated = true; | 901 | ie_updated = true; |
631 | } | 902 | } |
632 | 903 | ||
633 | if (ie_updated) | 904 | if (ie_updated && (rc->set_drp_ie_pending == 0)) |
634 | uwb_rc_send_all_drp_ie(rc); | 905 | uwb_rc_send_all_drp_ie(rc); |
635 | 906 | ||
636 | mutex_unlock(&rc->rsvs_mutex); | 907 | mutex_unlock(&rc->rsvs_mutex); |
637 | } | 908 | } |
638 | 909 | ||
910 | static void uwb_rsv_alien_bp_work(struct work_struct *work) | ||
911 | { | ||
912 | struct uwb_rc *rc = container_of(work, struct uwb_rc, | ||
913 | rsv_alien_bp_work.work); | ||
914 | struct uwb_rsv *rsv; | ||
915 | |||
916 | mutex_lock(&rc->rsvs_mutex); | ||
917 | |||
918 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
919 | if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { | ||
920 | rsv->callback(rsv); | ||
921 | } | ||
922 | } | ||
923 | |||
924 | mutex_unlock(&rc->rsvs_mutex); | ||
925 | } | ||
926 | |||
639 | static void uwb_rsv_timer(unsigned long arg) | 927 | static void uwb_rsv_timer(unsigned long arg) |
640 | { | 928 | { |
641 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; | 929 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; |
642 | 930 | ||
643 | rsv->expired = true; | 931 | queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); |
644 | uwb_rsv_sched_update(rsv->rc); | 932 | } |
933 | |||
934 | /** | ||
935 | * uwb_rsv_remove_all - remove all reservations | ||
936 | * @rc: the radio controller | ||
937 | * | ||
938 | * A DRP IE update is not done. | ||
939 | */ | ||
940 | void uwb_rsv_remove_all(struct uwb_rc *rc) | ||
941 | { | ||
942 | struct uwb_rsv *rsv, *t; | ||
943 | |||
944 | mutex_lock(&rc->rsvs_mutex); | ||
945 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
946 | uwb_rsv_remove(rsv); | ||
947 | } | ||
948 | /* Cancel any postponed update. */ | ||
949 | rc->set_drp_ie_pending = 0; | ||
950 | mutex_unlock(&rc->rsvs_mutex); | ||
951 | |||
952 | cancel_delayed_work_sync(&rc->rsv_update_work); | ||
645 | } | 953 | } |
646 | 954 | ||
647 | void uwb_rsv_init(struct uwb_rc *rc) | 955 | void uwb_rsv_init(struct uwb_rc *rc) |
648 | { | 956 | { |
649 | INIT_LIST_HEAD(&rc->reservations); | 957 | INIT_LIST_HEAD(&rc->reservations); |
958 | INIT_LIST_HEAD(&rc->cnflt_alien_list); | ||
650 | mutex_init(&rc->rsvs_mutex); | 959 | mutex_init(&rc->rsvs_mutex); |
651 | INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | 960 | spin_lock_init(&rc->rsvs_lock); |
961 | INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | ||
962 | INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); | ||
963 | rc->bow.can_reserve_extra_mases = true; | ||
964 | rc->bow.total_expired = 0; | ||
965 | rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; | ||
966 | init_timer(&rc->bow.timer); | ||
967 | rc->bow.timer.function = uwb_rsv_backoff_win_timer; | ||
968 | rc->bow.timer.data = (unsigned long)&rc->bow; | ||
652 | 969 | ||
653 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); | 970 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); |
654 | } | 971 | } |
@@ -667,14 +984,6 @@ int uwb_rsv_setup(struct uwb_rc *rc) | |||
667 | 984 | ||
668 | void uwb_rsv_cleanup(struct uwb_rc *rc) | 985 | void uwb_rsv_cleanup(struct uwb_rc *rc) |
669 | { | 986 | { |
670 | struct uwb_rsv *rsv, *t; | 987 | uwb_rsv_remove_all(rc); |
671 | |||
672 | mutex_lock(&rc->rsvs_mutex); | ||
673 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
674 | uwb_rsv_remove(rsv); | ||
675 | } | ||
676 | mutex_unlock(&rc->rsvs_mutex); | ||
677 | |||
678 | cancel_work_sync(&rc->rsv_update_work); | ||
679 | destroy_workqueue(rc->rsv_workq); | 988 | destroy_workqueue(rc->rsv_workq); |
680 | } | 989 | } |