diff options
Diffstat (limited to 'drivers/uwb/rsv.c')
-rw-r--r-- | drivers/uwb/rsv.c | 482 |
1 files changed, 382 insertions, 100 deletions
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c index 1cd84f927540..165aec6a8f97 100644 --- a/drivers/uwb/rsv.c +++ b/drivers/uwb/rsv.c | |||
@@ -17,20 +17,31 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/uwb.h> | 19 | #include <linux/uwb.h> |
20 | #include <linux/random.h> | ||
20 | 21 | ||
21 | #include "uwb-internal.h" | 22 | #include "uwb-internal.h" |
22 | 23 | ||
23 | static void uwb_rsv_timer(unsigned long arg); | 24 | static void uwb_rsv_timer(unsigned long arg); |
24 | 25 | ||
25 | static const char *rsv_states[] = { | 26 | static const char *rsv_states[] = { |
26 | [UWB_RSV_STATE_NONE] = "none", | 27 | [UWB_RSV_STATE_NONE] = "none ", |
27 | [UWB_RSV_STATE_O_INITIATED] = "initiated", | 28 | [UWB_RSV_STATE_O_INITIATED] = "o initiated ", |
28 | [UWB_RSV_STATE_O_PENDING] = "pending", | 29 | [UWB_RSV_STATE_O_PENDING] = "o pending ", |
29 | [UWB_RSV_STATE_O_MODIFIED] = "modified", | 30 | [UWB_RSV_STATE_O_MODIFIED] = "o modified ", |
30 | [UWB_RSV_STATE_O_ESTABLISHED] = "established", | 31 | [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", |
31 | [UWB_RSV_STATE_T_ACCEPTED] = "accepted", | 32 | [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", |
32 | [UWB_RSV_STATE_T_DENIED] = "denied", | 33 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", |
33 | [UWB_RSV_STATE_T_PENDING] = "pending", | 34 | [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", |
35 | [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", | ||
36 | [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", | ||
37 | [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", | ||
38 | [UWB_RSV_STATE_T_PENDING] = "t pending ", | ||
39 | [UWB_RSV_STATE_T_DENIED] = "t denied ", | ||
40 | [UWB_RSV_STATE_T_RESIZED] = "t resized ", | ||
41 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", | ||
42 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", | ||
43 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", | ||
44 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", | ||
34 | }; | 45 | }; |
35 | 46 | ||
36 | static const char *rsv_types[] = { | 47 | static const char *rsv_types[] = { |
@@ -41,6 +52,31 @@ static const char *rsv_types[] = { | |||
41 | [UWB_DRP_TYPE_PCA] = "pca", | 52 | [UWB_DRP_TYPE_PCA] = "pca", |
42 | }; | 53 | }; |
43 | 54 | ||
55 | bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) | ||
56 | { | ||
57 | static const bool has_two_drp_ies[] = { | ||
58 | [UWB_RSV_STATE_O_INITIATED] = false, | ||
59 | [UWB_RSV_STATE_O_PENDING] = false, | ||
60 | [UWB_RSV_STATE_O_MODIFIED] = false, | ||
61 | [UWB_RSV_STATE_O_ESTABLISHED] = false, | ||
62 | [UWB_RSV_STATE_O_TO_BE_MOVED] = false, | ||
63 | [UWB_RSV_STATE_O_MOVE_COMBINING] = false, | ||
64 | [UWB_RSV_STATE_O_MOVE_REDUCING] = false, | ||
65 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, | ||
66 | [UWB_RSV_STATE_T_ACCEPTED] = false, | ||
67 | [UWB_RSV_STATE_T_CONFLICT] = false, | ||
68 | [UWB_RSV_STATE_T_PENDING] = false, | ||
69 | [UWB_RSV_STATE_T_DENIED] = false, | ||
70 | [UWB_RSV_STATE_T_RESIZED] = false, | ||
71 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, | ||
72 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, | ||
73 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, | ||
74 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, | ||
75 | }; | ||
76 | |||
77 | return has_two_drp_ies[rsv->state]; | ||
78 | } | ||
79 | |||
44 | /** | 80 | /** |
45 | * uwb_rsv_state_str - return a string for a reservation state | 81 | * uwb_rsv_state_str - return a string for a reservation state |
46 | * @state: the reservation state. | 82 | * @state: the reservation state. |
@@ -65,7 +101,7 @@ const char *uwb_rsv_type_str(enum uwb_drp_type type) | |||
65 | } | 101 | } |
66 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); | 102 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); |
67 | 103 | ||
68 | static void uwb_rsv_dump(struct uwb_rsv *rsv) | 104 | void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) |
69 | { | 105 | { |
70 | struct device *dev = &rsv->rc->uwb_dev.dev; | 106 | struct device *dev = &rsv->rc->uwb_dev.dev; |
71 | struct uwb_dev_addr devaddr; | 107 | struct uwb_dev_addr devaddr; |
@@ -88,12 +124,12 @@ static void uwb_rsv_release(struct kref *kref) | |||
88 | kfree(rsv); | 124 | kfree(rsv); |
89 | } | 125 | } |
90 | 126 | ||
91 | static void uwb_rsv_get(struct uwb_rsv *rsv) | 127 | void uwb_rsv_get(struct uwb_rsv *rsv) |
92 | { | 128 | { |
93 | kref_get(&rsv->kref); | 129 | kref_get(&rsv->kref); |
94 | } | 130 | } |
95 | 131 | ||
96 | static void uwb_rsv_put(struct uwb_rsv *rsv) | 132 | void uwb_rsv_put(struct uwb_rsv *rsv) |
97 | { | 133 | { |
98 | kref_put(&rsv->kref, uwb_rsv_release); | 134 | kref_put(&rsv->kref, uwb_rsv_release); |
99 | } | 135 | } |
@@ -108,6 +144,7 @@ static void uwb_rsv_put(struct uwb_rsv *rsv) | |||
108 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | 144 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) |
109 | { | 145 | { |
110 | struct uwb_rc *rc = rsv->rc; | 146 | struct uwb_rc *rc = rsv->rc; |
147 | struct device *dev = &rc->uwb_dev.dev; | ||
111 | unsigned long *streams_bm; | 148 | unsigned long *streams_bm; |
112 | int stream; | 149 | int stream; |
113 | 150 | ||
@@ -129,12 +166,15 @@ static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | |||
129 | rsv->stream = stream; | 166 | rsv->stream = stream; |
130 | set_bit(stream, streams_bm); | 167 | set_bit(stream, streams_bm); |
131 | 168 | ||
169 | dev_dbg(dev, "get stream %d\n", rsv->stream); | ||
170 | |||
132 | return 0; | 171 | return 0; |
133 | } | 172 | } |
134 | 173 | ||
135 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | 174 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) |
136 | { | 175 | { |
137 | struct uwb_rc *rc = rsv->rc; | 176 | struct uwb_rc *rc = rsv->rc; |
177 | struct device *dev = &rc->uwb_dev.dev; | ||
138 | unsigned long *streams_bm; | 178 | unsigned long *streams_bm; |
139 | 179 | ||
140 | switch (rsv->target.type) { | 180 | switch (rsv->target.type) { |
@@ -149,86 +189,52 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | |||
149 | } | 189 | } |
150 | 190 | ||
151 | clear_bit(rsv->stream, streams_bm); | 191 | clear_bit(rsv->stream, streams_bm); |
192 | |||
193 | dev_dbg(dev, "put stream %d\n", rsv->stream); | ||
152 | } | 194 | } |
153 | 195 | ||
154 | /* | 196 | void uwb_rsv_backoff_win_timer(unsigned long arg) |
155 | * Generate a MAS allocation with a single row component. | ||
156 | */ | ||
157 | static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, | ||
158 | int first_mas, int mas_per_zone, | ||
159 | int zs, int ze) | ||
160 | { | 197 | { |
161 | struct uwb_mas_bm col; | 198 | struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; |
162 | int z; | 199 | struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); |
200 | struct device *dev = &rc->uwb_dev.dev; | ||
163 | 201 | ||
164 | bitmap_zero(mas->bm, UWB_NUM_MAS); | 202 | bow->can_reserve_extra_mases = true; |
165 | bitmap_zero(col.bm, UWB_NUM_MAS); | 203 | if (bow->total_expired <= 4) { |
166 | bitmap_fill(col.bm, mas_per_zone); | 204 | bow->total_expired++; |
167 | bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 205 | } else { |
168 | 206 | /* after 4 backoff window has expired we can exit from | |
169 | for (z = zs; z <= ze; z++) { | 207 | * the backoff procedure */ |
170 | bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); | 208 | bow->total_expired = 0; |
171 | bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 209 | bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; |
172 | } | 210 | } |
211 | dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); | ||
212 | |||
213 | /* try to relocate all the "to be moved" relocations */ | ||
214 | uwb_rsv_handle_drp_avail_change(rc); | ||
173 | } | 215 | } |
174 | 216 | ||
175 | /* | 217 | void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) |
176 | * Allocate some MAS for this reservation based on current local | ||
177 | * availability, the reservation parameters (max_mas, min_mas, | ||
178 | * sparsity), and the WiMedia rules for MAS allocations. | ||
179 | * | ||
180 | * Returns -EBUSY is insufficient free MAS are available. | ||
181 | * | ||
182 | * FIXME: to simplify this, only safe reservations with a single row | ||
183 | * component in zones 1 to 15 are tried (zone 0 is skipped to avoid | ||
184 | * problems with the MAS reserved for the BP). | ||
185 | * | ||
186 | * [ECMA-368] section B.2. | ||
187 | */ | ||
188 | static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) | ||
189 | { | 218 | { |
190 | static const int safe_mas_in_row[UWB_NUM_ZONES] = { | 219 | struct uwb_drp_backoff_win *bow = &rc->bow; |
191 | 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, | 220 | struct device *dev = &rc->uwb_dev.dev; |
192 | }; | 221 | unsigned timeout_us; |
193 | int n, r; | ||
194 | struct uwb_mas_bm mas; | ||
195 | bool found = false; | ||
196 | 222 | ||
197 | /* | 223 | dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); |
198 | * Search all valid safe allocations until either: too few MAS | ||
199 | * are available; or the smallest allocation with sufficient | ||
200 | * MAS is found. | ||
201 | * | ||
202 | * The top of the zones are preferred, so space for larger | ||
203 | * allocations is available in the bottom of the zone (e.g., a | ||
204 | * 15 MAS allocation should start in row 14 leaving space for | ||
205 | * a 120 MAS allocation at row 0). | ||
206 | */ | ||
207 | for (n = safe_mas_in_row[0]; n >= 1; n--) { | ||
208 | int num_mas; | ||
209 | 224 | ||
210 | num_mas = n * (UWB_NUM_ZONES - 1); | 225 | bow->can_reserve_extra_mases = false; |
211 | if (num_mas < rsv->min_mas) | ||
212 | break; | ||
213 | if (found && num_mas < rsv->max_mas) | ||
214 | break; | ||
215 | 226 | ||
216 | for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { | 227 | if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) |
217 | if (safe_mas_in_row[r] < n) | 228 | return; |
218 | continue; | ||
219 | uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); | ||
220 | if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { | ||
221 | found = true; | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | } | ||
226 | 229 | ||
227 | if (!found) | 230 | bow->window <<= 1; |
228 | return -EBUSY; | 231 | bow->n = random32() & (bow->window - 1); |
232 | dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); | ||
229 | 233 | ||
230 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | 234 | /* reset the timer associated variables */ |
231 | return 0; | 235 | timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; |
236 | bow->total_expired = 0; | ||
237 | mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
232 | } | 238 | } |
233 | 239 | ||
234 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | 240 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) |
@@ -241,13 +247,16 @@ static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | |||
241 | * received. | 247 | * received. |
242 | */ | 248 | */ |
243 | if (rsv->is_multicast) { | 249 | if (rsv->is_multicast) { |
244 | if (rsv->state == UWB_RSV_STATE_O_INITIATED) | 250 | if (rsv->state == UWB_RSV_STATE_O_INITIATED |
251 | || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING | ||
252 | || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING | ||
253 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) | ||
245 | sframes = 1; | 254 | sframes = 1; |
246 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) | 255 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) |
247 | sframes = 0; | 256 | sframes = 0; |
257 | |||
248 | } | 258 | } |
249 | 259 | ||
250 | rsv->expired = false; | ||
251 | if (sframes > 0) { | 260 | if (sframes > 0) { |
252 | /* | 261 | /* |
253 | * Add an additional 2 superframes to account for the | 262 | * Add an additional 2 superframes to account for the |
@@ -269,7 +278,7 @@ static void uwb_rsv_state_update(struct uwb_rsv *rsv, | |||
269 | rsv->state = new_state; | 278 | rsv->state = new_state; |
270 | rsv->ie_valid = false; | 279 | rsv->ie_valid = false; |
271 | 280 | ||
272 | uwb_rsv_dump(rsv); | 281 | uwb_rsv_dump("SU", rsv); |
273 | 282 | ||
274 | uwb_rsv_stroke_timer(rsv); | 283 | uwb_rsv_stroke_timer(rsv); |
275 | uwb_rsv_sched_update(rsv->rc); | 284 | uwb_rsv_sched_update(rsv->rc); |
@@ -283,10 +292,17 @@ static void uwb_rsv_callback(struct uwb_rsv *rsv) | |||
283 | 292 | ||
284 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | 293 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) |
285 | { | 294 | { |
295 | struct uwb_rsv_move *mv = &rsv->mv; | ||
296 | |||
286 | if (rsv->state == new_state) { | 297 | if (rsv->state == new_state) { |
287 | switch (rsv->state) { | 298 | switch (rsv->state) { |
288 | case UWB_RSV_STATE_O_ESTABLISHED: | 299 | case UWB_RSV_STATE_O_ESTABLISHED: |
300 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
301 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
302 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
289 | case UWB_RSV_STATE_T_ACCEPTED: | 303 | case UWB_RSV_STATE_T_ACCEPTED: |
304 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
305 | case UWB_RSV_STATE_T_RESIZED: | ||
290 | case UWB_RSV_STATE_NONE: | 306 | case UWB_RSV_STATE_NONE: |
291 | uwb_rsv_stroke_timer(rsv); | 307 | uwb_rsv_stroke_timer(rsv); |
292 | break; | 308 | break; |
@@ -298,11 +314,10 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
298 | return; | 314 | return; |
299 | } | 315 | } |
300 | 316 | ||
317 | uwb_rsv_dump("SC", rsv); | ||
318 | |||
301 | switch (new_state) { | 319 | switch (new_state) { |
302 | case UWB_RSV_STATE_NONE: | 320 | case UWB_RSV_STATE_NONE: |
303 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
304 | if (uwb_rsv_is_owner(rsv)) | ||
305 | uwb_rsv_put_stream(rsv); | ||
306 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); | 321 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); |
307 | uwb_rsv_callback(rsv); | 322 | uwb_rsv_callback(rsv); |
308 | break; | 323 | break; |
@@ -312,12 +327,45 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
312 | case UWB_RSV_STATE_O_PENDING: | 327 | case UWB_RSV_STATE_O_PENDING: |
313 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); | 328 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); |
314 | break; | 329 | break; |
330 | case UWB_RSV_STATE_O_MODIFIED: | ||
331 | /* in the companion there are the MASes to drop */ | ||
332 | bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
333 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
334 | break; | ||
315 | case UWB_RSV_STATE_O_ESTABLISHED: | 335 | case UWB_RSV_STATE_O_ESTABLISHED: |
336 | if (rsv->state == UWB_RSV_STATE_O_MODIFIED | ||
337 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { | ||
338 | uwb_drp_avail_release(rsv->rc, &mv->companion_mas); | ||
339 | rsv->needs_release_companion_mas = false; | ||
340 | } | ||
316 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 341 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
317 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 342 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); |
318 | uwb_rsv_callback(rsv); | 343 | uwb_rsv_callback(rsv); |
319 | break; | 344 | break; |
345 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
346 | rsv->needs_release_companion_mas = true; | ||
347 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
348 | break; | ||
349 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
350 | rsv->needs_release_companion_mas = false; | ||
351 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
352 | bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
353 | rsv->mas.safe += mv->companion_mas.safe; | ||
354 | rsv->mas.unsafe += mv->companion_mas.unsafe; | ||
355 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
356 | break; | ||
357 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
358 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
359 | rsv->needs_release_companion_mas = true; | ||
360 | rsv->mas.safe = mv->final_mas.safe; | ||
361 | rsv->mas.unsafe = mv->final_mas.unsafe; | ||
362 | bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
363 | bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); | ||
364 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
365 | break; | ||
320 | case UWB_RSV_STATE_T_ACCEPTED: | 366 | case UWB_RSV_STATE_T_ACCEPTED: |
367 | case UWB_RSV_STATE_T_RESIZED: | ||
368 | rsv->needs_release_companion_mas = false; | ||
321 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 369 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
322 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); | 370 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); |
323 | uwb_rsv_callback(rsv); | 371 | uwb_rsv_callback(rsv); |
@@ -325,12 +373,82 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
325 | case UWB_RSV_STATE_T_DENIED: | 373 | case UWB_RSV_STATE_T_DENIED: |
326 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); | 374 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); |
327 | break; | 375 | break; |
376 | case UWB_RSV_STATE_T_CONFLICT: | ||
377 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
378 | break; | ||
379 | case UWB_RSV_STATE_T_PENDING: | ||
380 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); | ||
381 | break; | ||
382 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
383 | rsv->needs_release_companion_mas = true; | ||
384 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
385 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); | ||
386 | break; | ||
328 | default: | 387 | default: |
329 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", | 388 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", |
330 | uwb_rsv_state_str(new_state), new_state); | 389 | uwb_rsv_state_str(new_state), new_state); |
331 | } | 390 | } |
332 | } | 391 | } |
333 | 392 | ||
393 | static void uwb_rsv_handle_timeout_work(struct work_struct *work) | ||
394 | { | ||
395 | struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, | ||
396 | handle_timeout_work); | ||
397 | struct uwb_rc *rc = rsv->rc; | ||
398 | |||
399 | mutex_lock(&rc->rsvs_mutex); | ||
400 | |||
401 | uwb_rsv_dump("TO", rsv); | ||
402 | |||
403 | switch (rsv->state) { | ||
404 | case UWB_RSV_STATE_O_INITIATED: | ||
405 | if (rsv->is_multicast) { | ||
406 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
407 | goto unlock; | ||
408 | } | ||
409 | break; | ||
410 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
411 | if (rsv->is_multicast) { | ||
412 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
413 | goto unlock; | ||
414 | } | ||
415 | break; | ||
416 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
417 | if (rsv->is_multicast) { | ||
418 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
419 | goto unlock; | ||
420 | } | ||
421 | break; | ||
422 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
423 | if (rsv->is_multicast) { | ||
424 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
425 | goto unlock; | ||
426 | } | ||
427 | break; | ||
428 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
429 | if (rsv->is_multicast) | ||
430 | goto unlock; | ||
431 | break; | ||
432 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
433 | /* | ||
434 | * The time out could be for the main or of the | ||
435 | * companion DRP, assume it's for the companion and | ||
436 | * drop that first. A further time out is required to | ||
437 | * drop the main. | ||
438 | */ | ||
439 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
440 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
441 | goto unlock; | ||
442 | default: | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | uwb_rsv_remove(rsv); | ||
447 | |||
448 | unlock: | ||
449 | mutex_unlock(&rc->rsvs_mutex); | ||
450 | } | ||
451 | |||
334 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | 452 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) |
335 | { | 453 | { |
336 | struct uwb_rsv *rsv; | 454 | struct uwb_rsv *rsv; |
@@ -347,6 +465,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | |||
347 | rsv->timer.data = (unsigned long)rsv; | 465 | rsv->timer.data = (unsigned long)rsv; |
348 | 466 | ||
349 | rsv->rc = rc; | 467 | rsv->rc = rc; |
468 | INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); | ||
350 | 469 | ||
351 | return rsv; | 470 | return rsv; |
352 | } | 471 | } |
@@ -381,8 +500,18 @@ EXPORT_SYMBOL_GPL(uwb_rsv_create); | |||
381 | 500 | ||
382 | void uwb_rsv_remove(struct uwb_rsv *rsv) | 501 | void uwb_rsv_remove(struct uwb_rsv *rsv) |
383 | { | 502 | { |
503 | uwb_rsv_dump("RM", rsv); | ||
504 | |||
384 | if (rsv->state != UWB_RSV_STATE_NONE) | 505 | if (rsv->state != UWB_RSV_STATE_NONE) |
385 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 506 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
507 | |||
508 | if (rsv->needs_release_companion_mas) | ||
509 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
510 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
511 | |||
512 | if (uwb_rsv_is_owner(rsv)) | ||
513 | uwb_rsv_put_stream(rsv); | ||
514 | |||
386 | del_timer_sync(&rsv->timer); | 515 | del_timer_sync(&rsv->timer); |
387 | uwb_dev_put(rsv->owner); | 516 | uwb_dev_put(rsv->owner); |
388 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | 517 | if (rsv->target.type == UWB_RSV_TARGET_DEV) |
@@ -409,7 +538,7 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
409 | * @rsv: the reservation | 538 | * @rsv: the reservation |
410 | * | 539 | * |
411 | * The PAL should fill in @rsv's owner, target, type, max_mas, | 540 | * The PAL should fill in @rsv's owner, target, type, max_mas, |
412 | * min_mas, sparsity and is_multicast fields. If the target is a | 541 | * min_mas, max_interval and is_multicast fields. If the target is a |
413 | * uwb_dev it must be referenced. | 542 | * uwb_dev it must be referenced. |
414 | * | 543 | * |
415 | * The reservation's callback will be called when the reservation is | 544 | * The reservation's callback will be called when the reservation is |
@@ -418,16 +547,27 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
418 | int uwb_rsv_establish(struct uwb_rsv *rsv) | 547 | int uwb_rsv_establish(struct uwb_rsv *rsv) |
419 | { | 548 | { |
420 | struct uwb_rc *rc = rsv->rc; | 549 | struct uwb_rc *rc = rsv->rc; |
550 | struct uwb_mas_bm available; | ||
421 | int ret; | 551 | int ret; |
422 | 552 | ||
423 | mutex_lock(&rc->rsvs_mutex); | 553 | mutex_lock(&rc->rsvs_mutex); |
424 | |||
425 | ret = uwb_rsv_get_stream(rsv); | 554 | ret = uwb_rsv_get_stream(rsv); |
426 | if (ret) | 555 | if (ret) |
427 | goto out; | 556 | goto out; |
428 | 557 | ||
429 | ret = uwb_rsv_alloc_mas(rsv); | 558 | rsv->tiebreaker = random32() & 1; |
430 | if (ret) { | 559 | /* get available mas bitmap */ |
560 | uwb_drp_available(rc, &available); | ||
561 | |||
562 | ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); | ||
563 | if (ret == UWB_RSV_ALLOC_NOT_FOUND) { | ||
564 | ret = -EBUSY; | ||
565 | uwb_rsv_put_stream(rsv); | ||
566 | goto out; | ||
567 | } | ||
568 | |||
569 | ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); | ||
570 | if (ret != 0) { | ||
431 | uwb_rsv_put_stream(rsv); | 571 | uwb_rsv_put_stream(rsv); |
432 | goto out; | 572 | goto out; |
433 | } | 573 | } |
@@ -448,16 +588,71 @@ EXPORT_SYMBOL_GPL(uwb_rsv_establish); | |||
448 | * @rsv: the reservation to modify | 588 | * @rsv: the reservation to modify |
449 | * @max_mas: new maximum MAS to reserve | 589 | * @max_mas: new maximum MAS to reserve |
450 | * @min_mas: new minimum MAS to reserve | 590 | * @min_mas: new minimum MAS to reserve |
451 | * @sparsity: new sparsity to use | 591 | * @max_interval: new max_interval to use |
452 | * | 592 | * |
453 | * FIXME: implement this once there are PALs that use it. | 593 | * FIXME: implement this once there are PALs that use it. |
454 | */ | 594 | */ |
455 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) | 595 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) |
456 | { | 596 | { |
457 | return -ENOSYS; | 597 | return -ENOSYS; |
458 | } | 598 | } |
459 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); | 599 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); |
460 | 600 | ||
601 | /* | ||
602 | * move an already established reservation (rc->rsvs_mutex must to be | ||
603 | * taken when tis function is called) | ||
604 | */ | ||
605 | int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) | ||
606 | { | ||
607 | struct uwb_rc *rc = rsv->rc; | ||
608 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
609 | struct device *dev = &rc->uwb_dev.dev; | ||
610 | struct uwb_rsv_move *mv; | ||
611 | int ret = 0; | ||
612 | |||
613 | if (bow->can_reserve_extra_mases == false) | ||
614 | return -EBUSY; | ||
615 | |||
616 | mv = &rsv->mv; | ||
617 | |||
618 | if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { | ||
619 | |||
620 | if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { | ||
621 | /* We want to move the reservation */ | ||
622 | bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
623 | uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); | ||
624 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
625 | } | ||
626 | } else { | ||
627 | dev_dbg(dev, "new allocation not found\n"); | ||
628 | } | ||
629 | |||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | /* It will try to move every reservation in state O_ESTABLISHED giving | ||
634 | * to the MAS allocator algorithm an availability that is the real one | ||
635 | * plus the allocation already established from the reservation. */ | ||
636 | void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) | ||
637 | { | ||
638 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
639 | struct uwb_rsv *rsv; | ||
640 | struct uwb_mas_bm mas; | ||
641 | |||
642 | if (bow->can_reserve_extra_mases == false) | ||
643 | return; | ||
644 | |||
645 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
646 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || | ||
647 | rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { | ||
648 | uwb_drp_available(rc, &mas); | ||
649 | bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
650 | uwb_rsv_try_move(rsv, &mas); | ||
651 | } | ||
652 | } | ||
653 | |||
654 | } | ||
655 | |||
461 | /** | 656 | /** |
462 | * uwb_rsv_terminate - terminate an established reservation | 657 | * uwb_rsv_terminate - terminate an established reservation |
463 | * @rsv: the reservation to terminate | 658 | * @rsv: the reservation to terminate |
@@ -546,6 +741,7 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
546 | uwb_dev_get(rsv->owner); | 741 | uwb_dev_get(rsv->owner); |
547 | rsv->target.type = UWB_RSV_TARGET_DEV; | 742 | rsv->target.type = UWB_RSV_TARGET_DEV; |
548 | rsv->target.dev = &rc->uwb_dev; | 743 | rsv->target.dev = &rc->uwb_dev; |
744 | uwb_dev_get(&rc->uwb_dev); | ||
549 | rsv->type = uwb_ie_drp_type(drp_ie); | 745 | rsv->type = uwb_ie_drp_type(drp_ie); |
550 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); | 746 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); |
551 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); | 747 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); |
@@ -567,12 +763,34 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
567 | list_add_tail(&rsv->rc_node, &rc->reservations); | 763 | list_add_tail(&rsv->rc_node, &rc->reservations); |
568 | state = rsv->state; | 764 | state = rsv->state; |
569 | rsv->state = UWB_RSV_STATE_NONE; | 765 | rsv->state = UWB_RSV_STATE_NONE; |
570 | uwb_rsv_set_state(rsv, state); | 766 | |
767 | /* FIXME: do something sensible here */ | ||
768 | if (state == UWB_RSV_STATE_T_ACCEPTED | ||
769 | && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { | ||
770 | /* FIXME: do something sensible here */ | ||
771 | } else { | ||
772 | uwb_rsv_set_state(rsv, state); | ||
773 | } | ||
571 | 774 | ||
572 | return rsv; | 775 | return rsv; |
573 | } | 776 | } |
574 | 777 | ||
575 | /** | 778 | /** |
779 | * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations | ||
780 | * @rsv: the reservation. | ||
781 | * @mas: returns the available MAS. | ||
782 | * | ||
783 | * The usable MAS of a reservation may be less than the negotiated MAS | ||
784 | * if alien BPs are present. | ||
785 | */ | ||
786 | void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) | ||
787 | { | ||
788 | bitmap_zero(mas->bm, UWB_NUM_MAS); | ||
789 | bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); | ||
790 | } | ||
791 | EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); | ||
792 | |||
793 | /** | ||
576 | * uwb_rsv_find - find a reservation for a received DRP IE. | 794 | * uwb_rsv_find - find a reservation for a received DRP IE. |
577 | * @rc: the radio controller | 795 | * @rc: the radio controller |
578 | * @src: source of the DRP IE | 796 | * @src: source of the DRP IE |
@@ -611,8 +829,6 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
611 | bool ie_updated = false; | 829 | bool ie_updated = false; |
612 | 830 | ||
613 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | 831 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { |
614 | if (rsv->expired) | ||
615 | uwb_drp_handle_timeout(rsv); | ||
616 | if (!rsv->ie_valid) { | 832 | if (!rsv->ie_valid) { |
617 | uwb_drp_ie_update(rsv); | 833 | uwb_drp_ie_update(rsv); |
618 | ie_updated = true; | 834 | ie_updated = true; |
@@ -622,9 +838,47 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
622 | return ie_updated; | 838 | return ie_updated; |
623 | } | 839 | } |
624 | 840 | ||
841 | void uwb_rsv_queue_update(struct uwb_rc *rc) | ||
842 | { | ||
843 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
844 | |||
845 | queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); | ||
846 | } | ||
847 | |||
848 | /** | ||
849 | * uwb_rsv_sched_update - schedule an update of the DRP IEs | ||
850 | * @rc: the radio controller. | ||
851 | * | ||
852 | * To improve performance and ensure correctness with [ECMA-368] the | ||
853 | * number of SET-DRP-IE commands that are done are limited. | ||
854 | * | ||
855 | * DRP IEs update come from two sources: DRP events from the hardware | ||
856 | * which all occur at the beginning of the superframe ('syncronous' | ||
857 | * events) and reservation establishment/termination requests from | ||
858 | * PALs or timers ('asynchronous' events). | ||
859 | * | ||
860 | * A delayed work ensures that all the synchronous events result in | ||
861 | * one SET-DRP-IE command. | ||
862 | * | ||
863 | * Additional logic (the set_drp_ie_pending and rsv_updated_postponed | ||
864 | * flags) will prevent an asynchrous event starting a SET-DRP-IE | ||
865 | * command if one is currently awaiting a response. | ||
866 | * | ||
867 | * FIXME: this does leave a window where an asynchrous event can delay | ||
868 | * the SET-DRP-IE for a synchronous event by one superframe. | ||
869 | */ | ||
625 | void uwb_rsv_sched_update(struct uwb_rc *rc) | 870 | void uwb_rsv_sched_update(struct uwb_rc *rc) |
626 | { | 871 | { |
627 | queue_work(rc->rsv_workq, &rc->rsv_update_work); | 872 | spin_lock(&rc->rsvs_lock); |
873 | if (!delayed_work_pending(&rc->rsv_update_work)) { | ||
874 | if (rc->set_drp_ie_pending > 0) { | ||
875 | rc->set_drp_ie_pending++; | ||
876 | goto unlock; | ||
877 | } | ||
878 | uwb_rsv_queue_update(rc); | ||
879 | } | ||
880 | unlock: | ||
881 | spin_unlock(&rc->rsvs_lock); | ||
628 | } | 882 | } |
629 | 883 | ||
630 | /* | 884 | /* |
@@ -633,7 +887,8 @@ void uwb_rsv_sched_update(struct uwb_rc *rc) | |||
633 | */ | 887 | */ |
634 | static void uwb_rsv_update_work(struct work_struct *work) | 888 | static void uwb_rsv_update_work(struct work_struct *work) |
635 | { | 889 | { |
636 | struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); | 890 | struct uwb_rc *rc = container_of(work, struct uwb_rc, |
891 | rsv_update_work.work); | ||
637 | bool ie_updated; | 892 | bool ie_updated; |
638 | 893 | ||
639 | mutex_lock(&rc->rsvs_mutex); | 894 | mutex_lock(&rc->rsvs_mutex); |
@@ -645,18 +900,34 @@ static void uwb_rsv_update_work(struct work_struct *work) | |||
645 | ie_updated = true; | 900 | ie_updated = true; |
646 | } | 901 | } |
647 | 902 | ||
648 | if (ie_updated) | 903 | if (ie_updated && (rc->set_drp_ie_pending == 0)) |
649 | uwb_rc_send_all_drp_ie(rc); | 904 | uwb_rc_send_all_drp_ie(rc); |
650 | 905 | ||
651 | mutex_unlock(&rc->rsvs_mutex); | 906 | mutex_unlock(&rc->rsvs_mutex); |
652 | } | 907 | } |
653 | 908 | ||
909 | static void uwb_rsv_alien_bp_work(struct work_struct *work) | ||
910 | { | ||
911 | struct uwb_rc *rc = container_of(work, struct uwb_rc, | ||
912 | rsv_alien_bp_work.work); | ||
913 | struct uwb_rsv *rsv; | ||
914 | |||
915 | mutex_lock(&rc->rsvs_mutex); | ||
916 | |||
917 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
918 | if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { | ||
919 | rsv->callback(rsv); | ||
920 | } | ||
921 | } | ||
922 | |||
923 | mutex_unlock(&rc->rsvs_mutex); | ||
924 | } | ||
925 | |||
654 | static void uwb_rsv_timer(unsigned long arg) | 926 | static void uwb_rsv_timer(unsigned long arg) |
655 | { | 927 | { |
656 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; | 928 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; |
657 | 929 | ||
658 | rsv->expired = true; | 930 | queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); |
659 | uwb_rsv_sched_update(rsv->rc); | ||
660 | } | 931 | } |
661 | 932 | ||
662 | /** | 933 | /** |
@@ -673,16 +944,27 @@ void uwb_rsv_remove_all(struct uwb_rc *rc) | |||
673 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | 944 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { |
674 | uwb_rsv_remove(rsv); | 945 | uwb_rsv_remove(rsv); |
675 | } | 946 | } |
947 | /* Cancel any postponed update. */ | ||
948 | rc->set_drp_ie_pending = 0; | ||
676 | mutex_unlock(&rc->rsvs_mutex); | 949 | mutex_unlock(&rc->rsvs_mutex); |
677 | 950 | ||
678 | cancel_work_sync(&rc->rsv_update_work); | 951 | cancel_delayed_work_sync(&rc->rsv_update_work); |
679 | } | 952 | } |
680 | 953 | ||
681 | void uwb_rsv_init(struct uwb_rc *rc) | 954 | void uwb_rsv_init(struct uwb_rc *rc) |
682 | { | 955 | { |
683 | INIT_LIST_HEAD(&rc->reservations); | 956 | INIT_LIST_HEAD(&rc->reservations); |
957 | INIT_LIST_HEAD(&rc->cnflt_alien_list); | ||
684 | mutex_init(&rc->rsvs_mutex); | 958 | mutex_init(&rc->rsvs_mutex); |
685 | INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | 959 | spin_lock_init(&rc->rsvs_lock); |
960 | INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | ||
961 | INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); | ||
962 | rc->bow.can_reserve_extra_mases = true; | ||
963 | rc->bow.total_expired = 0; | ||
964 | rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; | ||
965 | init_timer(&rc->bow.timer); | ||
966 | rc->bow.timer.function = uwb_rsv_backoff_win_timer; | ||
967 | rc->bow.timer.data = (unsigned long)&rc->bow; | ||
686 | 968 | ||
687 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); | 969 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); |
688 | } | 970 | } |