aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/uwb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/uwb')
-rw-r--r--drivers/uwb/Makefile1
-rw-r--r--drivers/uwb/allocator.c386
-rw-r--r--drivers/uwb/drp-avail.c4
-rw-r--r--drivers/uwb/drp-ie.c160
-rw-r--r--drivers/uwb/drp.c681
-rw-r--r--drivers/uwb/rsv.c482
-rw-r--r--drivers/uwb/uwb-debug.c49
-rw-r--r--drivers/uwb/uwb-internal.h80
8 files changed, 1527 insertions, 316 deletions
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile
index ce21a95da04a..2f98d080fe78 100644
--- a/drivers/uwb/Makefile
+++ b/drivers/uwb/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_UWB_I1480U) += i1480/
6 6
7uwb-objs := \ 7uwb-objs := \
8 address.o \ 8 address.o \
9 allocator.o \
9 beacon.o \ 10 beacon.o \
10 driver.o \ 11 driver.o \
11 drp.o \ 12 drp.o \
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c
new file mode 100644
index 000000000000..c8185e6b0cd5
--- /dev/null
+++ b/drivers/uwb/allocator.c
@@ -0,0 +1,386 @@
1/*
2 * UWB reservation management.
3 *
4 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/uwb.h>
21
22#include "uwb-internal.h"
23
24static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai)
25{
26 int col, mas, safe_mas, unsafe_mas;
27 unsigned char *bm = ai->bm;
28 struct uwb_rsv_col_info *ci = ai->ci;
29 unsigned char c;
30
31 for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) {
32
33 safe_mas = ci->csi.safe_mas_per_col;
34 unsafe_mas = ci->csi.unsafe_mas_per_col;
35
36 for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) {
37 if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) {
38
39 if (safe_mas > 0) {
40 safe_mas--;
41 c = UWB_RSV_MAS_SAFE;
42 } else if (unsafe_mas > 0) {
43 unsafe_mas--;
44 c = UWB_RSV_MAS_UNSAFE;
45 } else {
46 break;
47 }
48 bm[col * UWB_MAS_PER_ZONE + mas] = c;
49 }
50 }
51 }
52}
53
54static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai)
55{
56 int mas, col, rows;
57 unsigned char *bm = ai->bm;
58 struct uwb_rsv_row_info *ri = &ai->ri;
59 unsigned char c;
60
61 rows = 1;
62 c = UWB_RSV_MAS_SAFE;
63 for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) {
64 if (ri->avail[mas] == 1) {
65
66 if (rows > ri->used_rows) {
67 break;
68 } else if (rows > 7) {
69 c = UWB_RSV_MAS_UNSAFE;
70 }
71
72 for (col = 0; col < UWB_NUM_ZONES; col++) {
73 if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) {
74 bm[col * UWB_NUM_ZONES + mas] = c;
75 if(c == UWB_RSV_MAS_SAFE)
76 ai->safe_allocated_mases++;
77 else
78 ai->unsafe_allocated_mases++;
79 }
80 }
81 rows++;
82 }
83 }
84 ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
85}
86
87/*
88 * Find the best column set for a given availability, interval, num safe mas and
89 * num unsafe mas.
90 *
91 * The different sets are tried in order as shown below, depending on the interval.
92 *
93 * interval = 16
94 * deep = 0
95 * set 1 -> { 8 }
96 * deep = 1
97 * set 1 -> { 4 }
98 * set 2 -> { 12 }
99 * deep = 2
100 * set 1 -> { 2 }
101 * set 2 -> { 6 }
102 * set 3 -> { 10 }
103 * set 4 -> { 14 }
104 * deep = 3
105 * set 1 -> { 1 }
106 * set 2 -> { 3 }
107 * set 3 -> { 5 }
108 * set 4 -> { 7 }
109 * set 5 -> { 9 }
110 * set 6 -> { 11 }
111 * set 7 -> { 13 }
112 * set 8 -> { 15 }
113 *
114 * interval = 8
115 * deep = 0
116 * set 1 -> { 4 12 }
117 * deep = 1
118 * set 1 -> { 2 10 }
119 * set 2 -> { 6 14 }
120 * deep = 2
121 * set 1 -> { 1 9 }
122 * set 2 -> { 3 11 }
123 * set 3 -> { 5 13 }
124 * set 4 -> { 7 15 }
125 *
126 * interval = 4
127 * deep = 0
128 * set 1 -> { 2 6 10 14 }
129 * deep = 1
130 * set 1 -> { 1 5 9 13 }
131 * set 2 -> { 3 7 11 15 }
132 *
133 * interval = 2
134 * deep = 0
135 * set 1 -> { 1 3 5 7 9 11 13 15 }
136 */
137static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval,
138 int num_safe_mas, int num_unsafe_mas)
139{
140 struct uwb_rsv_col_info *ci = ai->ci;
141 struct uwb_rsv_col_set_info *csi = &ci->csi;
142 struct uwb_rsv_col_set_info tmp_csi;
143 int deep, set, col, start_col_deep, col_start_set;
144 int start_col, max_mas_in_set, lowest_max_mas_in_deep;
145 int n_mas;
146 int found = UWB_RSV_ALLOC_NOT_FOUND;
147
148 tmp_csi.start_col = 0;
149 start_col_deep = interval;
150 n_mas = num_unsafe_mas + num_safe_mas;
151
152 for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) {
153 start_col_deep /= 2;
154 col_start_set = 0;
155 lowest_max_mas_in_deep = UWB_MAS_PER_ZONE;
156
157 for (set = 1; set <= (1 << deep); set++) {
158 max_mas_in_set = 0;
159 start_col = start_col_deep + col_start_set;
160 for (col = start_col; col < UWB_NUM_ZONES; col += interval) {
161
162 if (ci[col].max_avail_safe >= num_safe_mas &&
163 ci[col].max_avail_unsafe >= n_mas) {
164 if (ci[col].highest_mas[n_mas] > max_mas_in_set)
165 max_mas_in_set = ci[col].highest_mas[n_mas];
166 } else {
167 max_mas_in_set = 0;
168 break;
169 }
170 }
171 if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) {
172 lowest_max_mas_in_deep = max_mas_in_set;
173
174 tmp_csi.start_col = start_col;
175 }
176 col_start_set += (interval >> deep);
177 }
178
179 if (lowest_max_mas_in_deep < 8) {
180 csi->start_col = tmp_csi.start_col;
181 found = UWB_RSV_ALLOC_FOUND;
182 break;
183 } else if ((lowest_max_mas_in_deep > 8) &&
184 (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) &&
185 (found == UWB_RSV_ALLOC_NOT_FOUND)) {
186 csi->start_col = tmp_csi.start_col;
187 found = UWB_RSV_ALLOC_FOUND;
188 }
189 }
190
191 if (found == UWB_RSV_ALLOC_FOUND) {
192 csi->interval = interval;
193 csi->safe_mas_per_col = num_safe_mas;
194 csi->unsafe_mas_per_col = num_unsafe_mas;
195
196 ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas;
197 ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas;
198 ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
199 ai->interval = interval;
200 }
201 return found;
202}
203
204static void get_row_descriptors(struct uwb_rsv_alloc_info *ai)
205{
206 unsigned char *bm = ai->bm;
207 struct uwb_rsv_row_info *ri = &ai->ri;
208 int col, mas;
209
210 ri->free_rows = 16;
211 for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
212 ri->avail[mas] = 1;
213 for (col = 1; col < UWB_NUM_ZONES; col++) {
214 if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) {
215 ri->free_rows--;
216 ri->avail[mas]=0;
217 break;
218 }
219 }
220 }
221}
222
223static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci)
224{
225 int mas;
226 int block_count = 0, start_block = 0;
227 int previous_avail = 0;
228 int available = 0;
229 int safe_mas_in_row[UWB_MAS_PER_ZONE] = {
230 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
231 };
232
233 rci->max_avail_safe = 0;
234
235 for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
236 if (!bm[column * UWB_NUM_ZONES + mas]) {
237 available++;
238 rci->max_avail_unsafe = available;
239
240 rci->highest_mas[available] = mas;
241
242 if (previous_avail) {
243 block_count++;
244 if ((block_count > safe_mas_in_row[start_block]) &&
245 (!rci->max_avail_safe))
246 rci->max_avail_safe = available - 1;
247 } else {
248 previous_avail = 1;
249 start_block = mas;
250 block_count = 1;
251 }
252 } else {
253 previous_avail = 0;
254 }
255 }
256 if (!rci->max_avail_safe)
257 rci->max_avail_safe = rci->max_avail_unsafe;
258}
259
260static void get_column_descriptors(struct uwb_rsv_alloc_info *ai)
261{
262 unsigned char *bm = ai->bm;
263 struct uwb_rsv_col_info *ci = ai->ci;
264 int col;
265
266 for (col = 1; col < UWB_NUM_ZONES; col++) {
267 uwb_rsv_fill_column_info(bm, col, &ci[col]);
268 }
269}
270
271static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai)
272{
273 int n_rows;
274 int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW;
275 int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW;
276 if (ai->min_mas % UWB_USABLE_MAS_PER_ROW)
277 min_rows++;
278 for (n_rows = max_rows; n_rows >= min_rows; n_rows--) {
279 if (n_rows <= ai->ri.free_rows) {
280 ai->ri.used_rows = n_rows;
281 ai->interval = 1; /* row reservation */
282 uwb_rsv_fill_row_alloc(ai);
283 return UWB_RSV_ALLOC_FOUND;
284 }
285 }
286 return UWB_RSV_ALLOC_NOT_FOUND;
287}
288
289static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval)
290{
291 int n_safe, n_unsafe, n_mas;
292 int n_column = UWB_NUM_ZONES / interval;
293 int max_per_zone = ai->max_mas / n_column;
294 int min_per_zone = ai->min_mas / n_column;
295
296 if (ai->min_mas % n_column)
297 min_per_zone++;
298
299 if (min_per_zone > UWB_MAS_PER_ZONE) {
300 return UWB_RSV_ALLOC_NOT_FOUND;
301 }
302
303 if (max_per_zone > UWB_MAS_PER_ZONE) {
304 max_per_zone = UWB_MAS_PER_ZONE;
305 }
306
307 for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) {
308 if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND)
309 continue;
310 for (n_safe = n_mas; n_safe >= 0; n_safe--) {
311 n_unsafe = n_mas - n_safe;
312 if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) {
313 uwb_rsv_fill_column_alloc(ai);
314 return UWB_RSV_ALLOC_FOUND;
315 }
316 }
317 }
318 return UWB_RSV_ALLOC_NOT_FOUND;
319}
320
321int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available,
322 struct uwb_mas_bm *result)
323{
324 struct uwb_rsv_alloc_info *ai;
325 int interval;
326 int bit_index;
327
328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
329
330 ai->min_mas = rsv->min_mas;
331 ai->max_mas = rsv->max_mas;
332 ai->max_interval = rsv->max_interval;
333
334
335 /* fill the not available vector from the available bm */
336 for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) {
337 if (!test_bit(bit_index, available->bm))
338 ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL;
339 }
340
341 if (ai->max_interval == 1) {
342 get_row_descriptors(ai);
343 if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
344 goto alloc_found;
345 else
346 goto alloc_not_found;
347 }
348
349 get_column_descriptors(ai);
350
351 for (interval = 16; interval >= 2; interval>>=1) {
352 if (interval > ai->max_interval)
353 continue;
354 if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND)
355 goto alloc_found;
356 }
357
358 /* try row reservation if no column is found */
359 get_row_descriptors(ai);
360 if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
361 goto alloc_found;
362 else
363 goto alloc_not_found;
364
365 alloc_found:
366 bitmap_zero(result->bm, UWB_NUM_MAS);
367 bitmap_zero(result->unsafe_bm, UWB_NUM_MAS);
368 /* fill the safe and unsafe bitmaps */
369 for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) {
370 if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE)
371 set_bit(bit_index, result->bm);
372 else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE)
373 set_bit(bit_index, result->unsafe_bm);
374 }
375 bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS);
376
377 result->safe = ai->safe_allocated_mases;
378 result->unsafe = ai->unsafe_allocated_mases;
379
380 kfree(ai);
381 return UWB_RSV_ALLOC_FOUND;
382
383 alloc_not_found:
384 kfree(ai);
385 return UWB_RSV_ALLOC_NOT_FOUND;
386}
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c
index 3febd8552808..40a540a5a72e 100644
--- a/drivers/uwb/drp-avail.c
+++ b/drivers/uwb/drp-avail.c
@@ -58,7 +58,7 @@ void uwb_drp_avail_init(struct uwb_rc *rc)
58 * 58 *
59 * avail = global & local & pending 59 * avail = global & local & pending
60 */ 60 */
61static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) 61void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
62{ 62{
63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); 63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); 64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
@@ -105,6 +105,7 @@ void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); 105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); 106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
107 rc->drp_avail.ie_valid = false; 107 rc->drp_avail.ie_valid = false;
108 uwb_rsv_handle_drp_avail_change(rc);
108} 109}
109 110
110/** 111/**
@@ -280,6 +281,7 @@ int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
280 mutex_lock(&rc->rsvs_mutex); 281 mutex_lock(&rc->rsvs_mutex);
281 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); 282 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
282 rc->drp_avail.ie_valid = false; 283 rc->drp_avail.ie_valid = false;
284 uwb_rsv_handle_drp_avail_change(rc);
283 mutex_unlock(&rc->rsvs_mutex); 285 mutex_unlock(&rc->rsvs_mutex);
284 286
285 uwb_rsv_sched_update(rc); 287 uwb_rsv_sched_update(rc);
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
index 75491d47806b..2840d7bf9e67 100644
--- a/drivers/uwb/drp-ie.c
+++ b/drivers/uwb/drp-ie.c
@@ -22,6 +22,96 @@
22 22
23#include "uwb-internal.h" 23#include "uwb-internal.h"
24 24
25
26/*
27 * Return the reason code for a reservations's DRP IE.
28 */
29int uwb_rsv_reason_code(struct uwb_rsv *rsv)
30{
31 static const int reason_codes[] = {
32 [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED,
33 [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED,
34 [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED,
35 [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED,
36 [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED,
37 [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED,
38 [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED,
39 [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
40 [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
41 [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT,
42 [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING,
43 [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED,
44 [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED,
45 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
46 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
47 [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
48 [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
49 };
50
51 return reason_codes[rsv->state];
52}
53
54/*
55 * Return the reason code for a reservations's companion DRP IE .
56 */
57int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
58{
59 static const int companion_reason_codes[] = {
60 [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
61 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
62 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
63 [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
64 [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
65 };
66
67 return companion_reason_codes[rsv->state];
68}
69
70/*
71 * Return the status bit for a reservations's DRP IE.
72 */
73int uwb_rsv_status(struct uwb_rsv *rsv)
74{
75 static const int statuses[] = {
76 [UWB_RSV_STATE_O_INITIATED] = 0,
77 [UWB_RSV_STATE_O_PENDING] = 0,
78 [UWB_RSV_STATE_O_MODIFIED] = 1,
79 [UWB_RSV_STATE_O_ESTABLISHED] = 1,
80 [UWB_RSV_STATE_O_TO_BE_MOVED] = 0,
81 [UWB_RSV_STATE_O_MOVE_COMBINING] = 1,
82 [UWB_RSV_STATE_O_MOVE_REDUCING] = 1,
83 [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1,
84 [UWB_RSV_STATE_T_ACCEPTED] = 1,
85 [UWB_RSV_STATE_T_CONFLICT] = 0,
86 [UWB_RSV_STATE_T_PENDING] = 0,
87 [UWB_RSV_STATE_T_DENIED] = 0,
88 [UWB_RSV_STATE_T_RESIZED] = 1,
89 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
90 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1,
91 [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1,
92 [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1,
93
94 };
95
96 return statuses[rsv->state];
97}
98
99/*
100 * Return the status bit for a reservations's companion DRP IE .
101 */
102int uwb_rsv_companion_status(struct uwb_rsv *rsv)
103{
104 static const int companion_statuses[] = {
105 [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0,
106 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
107 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0,
108 [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0,
109 [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0,
110 };
111
112 return companion_statuses[rsv->state];
113}
114
25/* 115/*
26 * Allocate a DRP IE. 116 * Allocate a DRP IE.
27 * 117 *
@@ -33,16 +123,12 @@
33static struct uwb_ie_drp *uwb_drp_ie_alloc(void) 123static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
34{ 124{
35 struct uwb_ie_drp *drp_ie; 125 struct uwb_ie_drp *drp_ie;
36 unsigned tiebreaker;
37 126
38 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + 127 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
39 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), 128 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
40 GFP_KERNEL); 129 GFP_KERNEL);
41 if (drp_ie) { 130 if (drp_ie) {
42 drp_ie->hdr.element_id = UWB_IE_DRP; 131 drp_ie->hdr.element_id = UWB_IE_DRP;
43
44 get_random_bytes(&tiebreaker, sizeof(unsigned));
45 uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1);
46 } 132 }
47 return drp_ie; 133 return drp_ie;
48} 134}
@@ -103,43 +189,17 @@ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
103 */ 189 */
104int uwb_drp_ie_update(struct uwb_rsv *rsv) 190int uwb_drp_ie_update(struct uwb_rsv *rsv)
105{ 191{
106 struct device *dev = &rsv->rc->uwb_dev.dev;
107 struct uwb_ie_drp *drp_ie; 192 struct uwb_ie_drp *drp_ie;
108 int reason_code, status; 193 struct uwb_rsv_move *mv;
194 int unsafe;
109 195
110 switch (rsv->state) { 196 if (rsv->state == UWB_RSV_STATE_NONE) {
111 case UWB_RSV_STATE_NONE:
112 kfree(rsv->drp_ie); 197 kfree(rsv->drp_ie);
113 rsv->drp_ie = NULL; 198 rsv->drp_ie = NULL;
114 return 0; 199 return 0;
115 case UWB_RSV_STATE_O_INITIATED:
116 reason_code = UWB_DRP_REASON_ACCEPTED;
117 status = 0;
118 break;
119 case UWB_RSV_STATE_O_PENDING:
120 reason_code = UWB_DRP_REASON_ACCEPTED;
121 status = 0;
122 break;
123 case UWB_RSV_STATE_O_MODIFIED:
124 reason_code = UWB_DRP_REASON_MODIFIED;
125 status = 1;
126 break;
127 case UWB_RSV_STATE_O_ESTABLISHED:
128 reason_code = UWB_DRP_REASON_ACCEPTED;
129 status = 1;
130 break;
131 case UWB_RSV_STATE_T_ACCEPTED:
132 reason_code = UWB_DRP_REASON_ACCEPTED;
133 status = 1;
134 break;
135 case UWB_RSV_STATE_T_DENIED:
136 reason_code = UWB_DRP_REASON_DENIED;
137 status = 0;
138 break;
139 default:
140 dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state);
141 return -EINVAL;
142 } 200 }
201
202 unsafe = rsv->mas.unsafe ? 1 : 0;
143 203
144 if (rsv->drp_ie == NULL) { 204 if (rsv->drp_ie == NULL) {
145 rsv->drp_ie = uwb_drp_ie_alloc(); 205 rsv->drp_ie = uwb_drp_ie_alloc();
@@ -148,9 +208,11 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv)
148 } 208 }
149 drp_ie = rsv->drp_ie; 209 drp_ie = rsv->drp_ie;
150 210
211 uwb_ie_drp_set_unsafe(drp_ie, unsafe);
212 uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker);
151 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); 213 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
152 uwb_ie_drp_set_status(drp_ie, status); 214 uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv));
153 uwb_ie_drp_set_reason_code(drp_ie, reason_code); 215 uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv));
154 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); 216 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
155 uwb_ie_drp_set_type(drp_ie, rsv->type); 217 uwb_ie_drp_set_type(drp_ie, rsv->type);
156 218
@@ -168,6 +230,27 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv)
168 230
169 uwb_drp_ie_from_bm(drp_ie, &rsv->mas); 231 uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
170 232
233 if (uwb_rsv_has_two_drp_ies(rsv)) {
234 mv = &rsv->mv;
235 if (mv->companion_drp_ie == NULL) {
236 mv->companion_drp_ie = uwb_drp_ie_alloc();
237 if (mv->companion_drp_ie == NULL)
238 return -ENOMEM;
239 }
240 drp_ie = mv->companion_drp_ie;
241
242 /* keep all the same configuration of the main drp_ie */
243 memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
244
245
246 /* FIXME: handle properly the unsafe bit */
247 uwb_ie_drp_set_unsafe(drp_ie, 1);
248 uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv));
249 uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv));
250
251 uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
252 }
253
171 rsv->ie_valid = true; 254 rsv->ie_valid = true;
172 return 0; 255 return 0;
173} 256}
@@ -218,6 +301,8 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
218 u8 zone; 301 u8 zone;
219 u16 zone_mask; 302 u16 zone_mask;
220 303
304 bitmap_zero(bm->bm, UWB_NUM_MAS);
305
221 for (cnt = 0; cnt < numallocs; cnt++) { 306 for (cnt = 0; cnt < numallocs; cnt++) {
222 alloc = &drp_ie->allocs[cnt]; 307 alloc = &drp_ie->allocs[cnt];
223 zone_bm = le16_to_cpu(alloc->zone_bm); 308 zone_bm = le16_to_cpu(alloc->zone_bm);
@@ -229,3 +314,4 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
229 } 314 }
230 } 315 }
231} 316}
317
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
index fe328146adb7..2b4f9406789d 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/uwb/drp.c
@@ -23,6 +23,59 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include "uwb-internal.h" 24#include "uwb-internal.h"
25 25
26
27/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
28enum uwb_drp_conflict_action {
29 /* Reservation is mantained, no action needed */
30 UWB_DRP_CONFLICT_MANTAIN = 0,
31
32 /* the device shall not transmit frames in conflicting MASs in
33 * the following superframe. If the device is the reservation
34 * target, it shall also set the Reason Code in its DRP IE to
35 * Conflict in its beacon in the following superframe.
36 */
37 UWB_DRP_CONFLICT_ACT1,
38
39 /* the device shall not set the Reservation Status bit to ONE
40 * and shall not transmit frames in conflicting MASs. If the
41 * device is the reservation target, it shall also set the
42 * Reason Code in its DRP IE to Conflict.
43 */
44 UWB_DRP_CONFLICT_ACT2,
45
46 /* the device shall not transmit frames in conflicting MASs in
47 * the following superframe. It shall remove the conflicting
48 * MASs from the reservation or set the Reservation Status to
49 * ZERO in its beacon in the following superframe. If the
50 * device is the reservation target, it shall also set the
51 * Reason Code in its DRP IE to Conflict.
52 */
53 UWB_DRP_CONFLICT_ACT3,
54};
55
56
57static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
58 struct uwb_rceb *reply, ssize_t reply_size)
59{
60 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
61
62 if (r != NULL) {
63 if (r->bResultCode != UWB_RC_RES_SUCCESS)
64 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
65 uwb_rc_strerror(r->bResultCode), r->bResultCode);
66 } else
67 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
68
69 spin_lock(&rc->rsvs_lock);
70 if (rc->set_drp_ie_pending > 1) {
71 rc->set_drp_ie_pending = 0;
72 uwb_rsv_queue_update(rc);
73 } else {
74 rc->set_drp_ie_pending = 0;
75 }
76 spin_unlock(&rc->rsvs_lock);
77}
78
26/** 79/**
27 * Construct and send the SET DRP IE 80 * Construct and send the SET DRP IE
28 * 81 *
@@ -46,18 +99,23 @@
46int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) 99int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
47{ 100{
48 int result; 101 int result;
49 struct device *dev = &rc->uwb_dev.dev;
50 struct uwb_rc_cmd_set_drp_ie *cmd; 102 struct uwb_rc_cmd_set_drp_ie *cmd;
51 struct uwb_rc_evt_set_drp_ie reply;
52 struct uwb_rsv *rsv; 103 struct uwb_rsv *rsv;
104 struct uwb_rsv_move *mv;
53 int num_bytes = 0; 105 int num_bytes = 0;
54 u8 *IEDataptr; 106 u8 *IEDataptr;
55 107
56 result = -ENOMEM; 108 result = -ENOMEM;
57 /* First traverse all reservations to determine memory needed. */ 109 /* First traverse all reservations to determine memory needed. */
58 list_for_each_entry(rsv, &rc->reservations, rc_node) { 110 list_for_each_entry(rsv, &rc->reservations, rc_node) {
59 if (rsv->drp_ie != NULL) 111 if (rsv->drp_ie != NULL) {
60 num_bytes += rsv->drp_ie->hdr.length + 2; 112 num_bytes += rsv->drp_ie->hdr.length + 2;
113 if (uwb_rsv_has_two_drp_ies(rsv) &&
114 (rsv->mv.companion_drp_ie != NULL)) {
115 mv = &rsv->mv;
116 num_bytes += mv->companion_drp_ie->hdr.length + 2;
117 }
118 }
61 } 119 }
62 num_bytes += sizeof(rc->drp_avail.ie); 120 num_bytes += sizeof(rc->drp_avail.ie);
63 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); 121 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
@@ -68,109 +126,322 @@ int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
68 cmd->wIELength = num_bytes; 126 cmd->wIELength = num_bytes;
69 IEDataptr = (u8 *)&cmd->IEData[0]; 127 IEDataptr = (u8 *)&cmd->IEData[0];
70 128
129 /* FIXME: DRV avail IE is not always needed */
130 /* put DRP avail IE first */
131 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
132 IEDataptr += sizeof(struct uwb_ie_drp_avail);
133
71 /* Next traverse all reservations to place IEs in allocated memory. */ 134 /* Next traverse all reservations to place IEs in allocated memory. */
72 list_for_each_entry(rsv, &rc->reservations, rc_node) { 135 list_for_each_entry(rsv, &rc->reservations, rc_node) {
73 if (rsv->drp_ie != NULL) { 136 if (rsv->drp_ie != NULL) {
74 memcpy(IEDataptr, rsv->drp_ie, 137 memcpy(IEDataptr, rsv->drp_ie,
75 rsv->drp_ie->hdr.length + 2); 138 rsv->drp_ie->hdr.length + 2);
76 IEDataptr += rsv->drp_ie->hdr.length + 2; 139 IEDataptr += rsv->drp_ie->hdr.length + 2;
140
141 if (uwb_rsv_has_two_drp_ies(rsv) &&
142 (rsv->mv.companion_drp_ie != NULL)) {
143 mv = &rsv->mv;
144 memcpy(IEDataptr, mv->companion_drp_ie,
145 mv->companion_drp_ie->hdr.length + 2);
146 IEDataptr += mv->companion_drp_ie->hdr.length + 2;
147 }
77 } 148 }
78 } 149 }
79 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
80 150
81 reply.rceb.bEventType = UWB_RC_CET_GENERAL; 151 result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes,
82 reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; 152 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
83 result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, 153 uwb_rc_set_drp_cmd_done, NULL);
84 sizeof(*cmd) + num_bytes, &reply.rceb, 154
85 sizeof(reply)); 155 rc->set_drp_ie_pending = 1;
86 if (result < 0) 156
87 goto error_cmd;
88 result = le16_to_cpu(reply.wRemainingSpace);
89 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
90 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution "
91 "failed: %s (%d). RemainingSpace in beacon "
92 "= %d\n", uwb_rc_strerror(reply.bResultCode),
93 reply.bResultCode, result);
94 result = -EIO;
95 } else {
96 dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon "
97 "= %d.\n", result);
98 result = 0;
99 }
100error_cmd:
101 kfree(cmd); 157 kfree(cmd);
102error: 158error:
103 return result; 159 return result;
104} 160}
105 161
106void uwb_drp_handle_timeout(struct uwb_rsv *rsv) 162/*
163 * Evaluate the action to perform using conflict resolution rules
164 *
165 * Return a uwb_drp_conflict_action.
166 */
167static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
168 struct uwb_rsv *rsv, int our_status)
107{ 169{
108 struct device *dev = &rsv->rc->uwb_dev.dev; 170 int our_tie_breaker = rsv->tiebreaker;
171 int our_type = rsv->type;
172 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
173
174 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
175 int ext_status = uwb_ie_drp_status(ext_drp_ie);
176 int ext_type = uwb_ie_drp_type(ext_drp_ie);
177
178
179 /* [ECMA-368 2nd Edition] 17.4.6 */
180 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
181 return UWB_DRP_CONFLICT_MANTAIN;
182 }
183
184 /* [ECMA-368 2nd Edition] 17.4.6-1 */
185 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
186 return UWB_DRP_CONFLICT_MANTAIN;
187 }
188
189 /* [ECMA-368 2nd Edition] 17.4.6-2 */
190 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
191 /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
192 return UWB_DRP_CONFLICT_ACT1;
193 }
194
195 /* [ECMA-368 2nd Edition] 17.4.6-3 */
196 if (our_status == 0 && ext_status == 1) {
197 return UWB_DRP_CONFLICT_ACT2;
198 }
109 199
110 dev_dbg(dev, "reservation timeout in state %s (%d)\n", 200 /* [ECMA-368 2nd Edition] 17.4.6-4 */
111 uwb_rsv_state_str(rsv->state), rsv->state); 201 if (our_status == 1 && ext_status == 0) {
202 return UWB_DRP_CONFLICT_MANTAIN;
203 }
112 204
113 switch (rsv->state) { 205 /* [ECMA-368 2nd Edition] 17.4.6-5a */
114 case UWB_RSV_STATE_O_INITIATED: 206 if (our_tie_breaker == ext_tie_breaker &&
115 if (rsv->is_multicast) { 207 our_beacon_slot < ext_beacon_slot) {
116 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 208 return UWB_DRP_CONFLICT_MANTAIN;
117 return; 209 }
210
211 /* [ECMA-368 2nd Edition] 17.4.6-5b */
212 if (our_tie_breaker != ext_tie_breaker &&
213 our_beacon_slot > ext_beacon_slot) {
214 return UWB_DRP_CONFLICT_MANTAIN;
215 }
216
217 if (our_status == 0) {
218 if (our_tie_breaker == ext_tie_breaker) {
219 /* [ECMA-368 2nd Edition] 17.4.6-6a */
220 if (our_beacon_slot > ext_beacon_slot) {
221 return UWB_DRP_CONFLICT_ACT2;
222 }
223 } else {
224 /* [ECMA-368 2nd Edition] 17.4.6-6b */
225 if (our_beacon_slot < ext_beacon_slot) {
226 return UWB_DRP_CONFLICT_ACT2;
227 }
118 } 228 }
119 break; 229 } else {
120 case UWB_RSV_STATE_O_ESTABLISHED: 230 if (our_tie_breaker == ext_tie_breaker) {
121 if (rsv->is_multicast) 231 /* [ECMA-368 2nd Edition] 17.4.6-7a */
122 return; 232 if (our_beacon_slot > ext_beacon_slot) {
123 break; 233 return UWB_DRP_CONFLICT_ACT3;
124 default: 234 }
125 break; 235 } else {
236 /* [ECMA-368 2nd Edition] 17.4.6-7b */
237 if (our_beacon_slot < ext_beacon_slot) {
238 return UWB_DRP_CONFLICT_ACT3;
239 }
240 }
241 }
242 return UWB_DRP_CONFLICT_MANTAIN;
243}
244
245static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
246 int ext_beacon_slot,
247 struct uwb_rsv *rsv,
248 struct uwb_mas_bm *conflicting_mas)
249{
250 struct uwb_rc *rc = rsv->rc;
251 struct uwb_rsv_move *mv = &rsv->mv;
252 struct uwb_drp_backoff_win *bow = &rc->bow;
253 int action;
254
255 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
256
257 if (uwb_rsv_is_owner(rsv)) {
258 switch(action) {
259 case UWB_DRP_CONFLICT_ACT2:
260 /* try move */
261 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
262 if (bow->can_reserve_extra_mases == false)
263 uwb_rsv_backoff_win_increment(rc);
264
265 break;
266 case UWB_DRP_CONFLICT_ACT3:
267 uwb_rsv_backoff_win_increment(rc);
268 /* drop some mases with reason modified */
269 /* put in the companion the mases to be dropped */
270 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
271 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
272 default:
273 break;
274 }
275 } else {
276 switch(action) {
277 case UWB_DRP_CONFLICT_ACT2:
278 case UWB_DRP_CONFLICT_ACT3:
279 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
280 default:
281 break;
282 }
283
284 }
285
286}
287
288static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
289 struct uwb_rsv *rsv, bool companion_only,
290 struct uwb_mas_bm *conflicting_mas)
291{
292 struct uwb_rc *rc = rsv->rc;
293 struct uwb_drp_backoff_win *bow = &rc->bow;
294 struct uwb_rsv_move *mv = &rsv->mv;
295 int action;
296
297 if (companion_only) {
298 /* status of companion is 0 at this point */
299 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
300 if (uwb_rsv_is_owner(rsv)) {
301 switch(action) {
302 case UWB_DRP_CONFLICT_ACT2:
303 case UWB_DRP_CONFLICT_ACT3:
304 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
305 rsv->needs_release_companion_mas = false;
306 if (bow->can_reserve_extra_mases == false)
307 uwb_rsv_backoff_win_increment(rc);
308 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
309 }
310 } else { /* rsv is target */
311 switch(action) {
312 case UWB_DRP_CONFLICT_ACT2:
313 case UWB_DRP_CONFLICT_ACT3:
314 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT);
315 /* send_drp_avail_ie = true; */
316 }
317 }
318 } else { /* also base part of the reservation is conflicting */
319 if (uwb_rsv_is_owner(rsv)) {
320 uwb_rsv_backoff_win_increment(rc);
321 /* remove companion part */
322 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
323
324 /* drop some mases with reason modified */
325
326 /* put in the companion the mases to be dropped */
327 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
328 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
329 } else { /* it is a target rsv */
330 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
331 /* send_drp_avail_ie = true; */
332 }
333 }
334}
335
336static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
337 struct uwb_rc_evt_drp *drp_evt,
338 struct uwb_ie_drp *drp_ie,
339 struct uwb_mas_bm *conflicting_mas)
340{
341 struct uwb_rsv_move *mv;
342
343 /* check if the conflicting reservation has two drp_ies */
344 if (uwb_rsv_has_two_drp_ies(rsv)) {
345 mv = &rsv->mv;
346 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
347 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
348 rsv, false, conflicting_mas);
349 } else {
350 if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
351 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
352 rsv, true, conflicting_mas);
353 }
354 }
355 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
356 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas);
126 } 357 }
127 uwb_rsv_remove(rsv);
128} 358}
129 359
360static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
361 struct uwb_rc_evt_drp *drp_evt,
362 struct uwb_ie_drp *drp_ie,
363 struct uwb_mas_bm *conflicting_mas)
364{
365 struct uwb_rsv *rsv;
366
367 list_for_each_entry(rsv, &rc->reservations, rc_node) {
368 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas);
369 }
370}
371
130/* 372/*
131 * Based on the DRP IE, transition a target reservation to a new 373 * Based on the DRP IE, transition a target reservation to a new
132 * state. 374 * state.
133 */ 375 */
134static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, 376static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
135 struct uwb_ie_drp *drp_ie) 377 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
136{ 378{
137 struct device *dev = &rc->uwb_dev.dev; 379 struct device *dev = &rc->uwb_dev.dev;
380 struct uwb_rsv_move *mv = &rsv->mv;
138 int status; 381 int status;
139 enum uwb_drp_reason reason_code; 382 enum uwb_drp_reason reason_code;
140 383 struct uwb_mas_bm mas;
384
141 status = uwb_ie_drp_status(drp_ie); 385 status = uwb_ie_drp_status(drp_ie);
142 reason_code = uwb_ie_drp_reason_code(drp_ie); 386 reason_code = uwb_ie_drp_reason_code(drp_ie);
387 uwb_drp_ie_to_bm(&mas, drp_ie);
143 388
144 if (status) { 389 switch (reason_code) {
145 switch (reason_code) { 390 case UWB_DRP_REASON_ACCEPTED:
146 case UWB_DRP_REASON_ACCEPTED: 391
147 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 392 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
148 break; 393 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
149 case UWB_DRP_REASON_MODIFIED:
150 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
151 reason_code, status);
152 break; 394 break;
153 default:
154 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
155 reason_code, status);
156 } 395 }
157 } else { 396
158 switch (reason_code) { 397 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
159 case UWB_DRP_REASON_ACCEPTED: 398 /* drp_ie is companion */
160 /* New reservations are handled in uwb_rsv_find(). */ 399 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS))
161 break; 400 /* stroke companion */
162 case UWB_DRP_REASON_DENIED: 401 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
163 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 402 } else {
164 break; 403 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
165 case UWB_DRP_REASON_CONFLICT: 404 if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) {
166 case UWB_DRP_REASON_MODIFIED: 405 /* FIXME: there is a conflict, find
167 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 406 * the conflicting reservations and
168 reason_code, status); 407 * take a sensible action. Consider
408 * that in drp_ie there is the
409 * "neighbour" */
410 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
411 } else {
412 /* accept the extra reservation */
413 bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS);
414 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
415 }
416 } else {
417 if (status) {
418 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
419 }
420 }
421
422 }
423 break;
424
425 case UWB_DRP_REASON_MODIFIED:
426 /* check to see if we have already modified the reservation */
427 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
428 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
169 break; 429 break;
170 default:
171 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
172 reason_code, status);
173 } 430 }
431
432 /* find if the owner wants to expand or reduce */
433 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
434 /* owner is reducing */
435 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS);
436 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
437 }
438
439 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
440 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
441 break;
442 default:
443 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
444 reason_code, status);
174 } 445 }
175} 446}
176 447
@@ -179,23 +450,60 @@ static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
179 * state. 450 * state.
180 */ 451 */
181static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, 452static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
182 struct uwb_ie_drp *drp_ie) 453 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
454 struct uwb_rc_evt_drp *drp_evt)
183{ 455{
184 struct device *dev = &rc->uwb_dev.dev; 456 struct device *dev = &rc->uwb_dev.dev;
457 struct uwb_rsv_move *mv = &rsv->mv;
185 int status; 458 int status;
186 enum uwb_drp_reason reason_code; 459 enum uwb_drp_reason reason_code;
460 struct uwb_mas_bm mas;
187 461
188 status = uwb_ie_drp_status(drp_ie); 462 status = uwb_ie_drp_status(drp_ie);
189 reason_code = uwb_ie_drp_reason_code(drp_ie); 463 reason_code = uwb_ie_drp_reason_code(drp_ie);
464 uwb_drp_ie_to_bm(&mas, drp_ie);
190 465
191 if (status) { 466 if (status) {
192 switch (reason_code) { 467 switch (reason_code) {
193 case UWB_DRP_REASON_ACCEPTED: 468 case UWB_DRP_REASON_ACCEPTED:
194 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 469 switch (rsv->state) {
195 break; 470 case UWB_RSV_STATE_O_PENDING:
196 case UWB_DRP_REASON_MODIFIED: 471 case UWB_RSV_STATE_O_INITIATED:
197 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 472 case UWB_RSV_STATE_O_ESTABLISHED:
198 reason_code, status); 473 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
474 break;
475 case UWB_RSV_STATE_O_MODIFIED:
476 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
477 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
478 } else {
479 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
480 }
481 break;
482
483 case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
484 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
485 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
486 } else {
487 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
488 }
489 break;
490 case UWB_RSV_STATE_O_MOVE_EXPANDING:
491 if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
492 /* Companion reservation accepted */
493 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
494 } else {
495 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
496 }
497 break;
498 case UWB_RSV_STATE_O_MOVE_COMBINING:
499 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS))
500 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
501 else
502 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
503 break;
504 default:
505 break;
506 }
199 break; 507 break;
200 default: 508 default:
201 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 509 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
@@ -210,9 +518,10 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
210 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 518 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
211 break; 519 break;
212 case UWB_DRP_REASON_CONFLICT: 520 case UWB_DRP_REASON_CONFLICT:
213 case UWB_DRP_REASON_MODIFIED: 521 /* resolve the conflict */
214 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 522 bitmap_complement(mas.bm, src->last_availability_bm,
215 reason_code, status); 523 UWB_NUM_MAS);
524 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
216 break; 525 break;
217 default: 526 default:
218 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 527 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
@@ -221,12 +530,110 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
221 } 530 }
222} 531}
223 532
533static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
534{
535 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
536 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
537}
538
539static void uwb_cnflt_update_work(struct work_struct *work)
540{
541 struct uwb_cnflt_alien *cnflt = container_of(work,
542 struct uwb_cnflt_alien,
543 cnflt_update_work);
544 struct uwb_cnflt_alien *c;
545 struct uwb_rc *rc = cnflt->rc;
546
547 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
548
549 mutex_lock(&rc->rsvs_mutex);
550
551 list_del(&cnflt->rc_node);
552
553 /* update rc global conflicting alien bitmap */
554 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
555
556 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
557 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS);
558 }
559
560 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
561
562 kfree(cnflt);
563 mutex_unlock(&rc->rsvs_mutex);
564}
565
566static void uwb_cnflt_timer(unsigned long arg)
567{
568 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
569
570 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
571}
572
224/* 573/*
225 * Process a received DRP IE, it's either for a reservation owned by 574 * We have received an DRP_IE of type Alien BP and we need to make
226 * the RC or targeted at it (or it's for a WUSB cluster reservation). 575 * sure we do not transmit in conflicting MASs.
227 */ 576 */
228static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, 577static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
229 struct uwb_ie_drp *drp_ie) 578{
579 struct device *dev = &rc->uwb_dev.dev;
580 struct uwb_mas_bm mas;
581 struct uwb_cnflt_alien *cnflt;
582 char buf[72];
583 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
584
585 uwb_drp_ie_to_bm(&mas, drp_ie);
586 bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
587
588 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
589 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
590 /* Existing alien BP reservation conflicting
591 * bitmap, just reset the timer */
592 uwb_cnflt_alien_stroke_timer(cnflt);
593 return;
594 }
595 }
596
597 /* New alien BP reservation conflicting bitmap */
598
599 /* alloc and initialize new uwb_cnflt_alien */
600 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
601 if (!cnflt)
602 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
603 INIT_LIST_HEAD(&cnflt->rc_node);
604 init_timer(&cnflt->timer);
605 cnflt->timer.function = uwb_cnflt_timer;
606 cnflt->timer.data = (unsigned long)cnflt;
607
608 cnflt->rc = rc;
609 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
610
611 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
612
613 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
614
615 /* update rc global conflicting alien bitmap */
616 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
617
618 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
619
620 /* start the timer */
621 uwb_cnflt_alien_stroke_timer(cnflt);
622}
623
624static void uwb_drp_process_not_involved(struct uwb_rc *rc,
625 struct uwb_rc_evt_drp *drp_evt,
626 struct uwb_ie_drp *drp_ie)
627{
628 struct uwb_mas_bm mas;
629
630 uwb_drp_ie_to_bm(&mas, drp_ie);
631 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
632}
633
634static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
635 struct uwb_rc_evt_drp *drp_evt,
636 struct uwb_ie_drp *drp_ie)
230{ 637{
231 struct uwb_rsv *rsv; 638 struct uwb_rsv *rsv;
232 639
@@ -239,7 +646,7 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src,
239 */ 646 */
240 return; 647 return;
241 } 648 }
242 649
243 /* 650 /*
244 * Do nothing with DRP IEs for reservations that have been 651 * Do nothing with DRP IEs for reservations that have been
245 * terminated. 652 * terminated.
@@ -248,13 +655,43 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src,
248 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 655 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
249 return; 656 return;
250 } 657 }
251 658
252 if (uwb_ie_drp_owner(drp_ie)) 659 if (uwb_ie_drp_owner(drp_ie))
253 uwb_drp_process_target(rc, rsv, drp_ie); 660 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
661 else
662 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
663
664}
665
666
667static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
668{
669 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
670}
671
672/*
673 * Process a received DRP IE.
674 */
675static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
676 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
677{
678 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
679 uwb_drp_handle_alien_drp(rc, drp_ie);
680 else if (uwb_drp_involves_us(rc, drp_ie))
681 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
254 else 682 else
255 uwb_drp_process_owner(rc, rsv, drp_ie); 683 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
256} 684}
257 685
686/*
687 * Process a received DRP Availability IE
688 */
689static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
690 struct uwb_ie_drp_avail *drp_availability_ie)
691{
692 bitmap_copy(src->last_availability_bm,
693 drp_availability_ie->bmp, UWB_NUM_MAS);
694}
258 695
259/* 696/*
260 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) 697 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
@@ -276,10 +713,10 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
276 713
277 switch (ie_hdr->element_id) { 714 switch (ie_hdr->element_id) {
278 case UWB_IE_DRP_AVAILABILITY: 715 case UWB_IE_DRP_AVAILABILITY:
279 /* FIXME: does something need to be done with this? */ 716 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
280 break; 717 break;
281 case UWB_IE_DRP: 718 case UWB_IE_DRP:
282 uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); 719 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
283 break; 720 break;
284 default: 721 default:
285 dev_warn(dev, "unexpected IE in DRP notification\n"); 722 dev_warn(dev, "unexpected IE in DRP notification\n");
@@ -292,55 +729,6 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
292 (int)ielen); 729 (int)ielen);
293} 730}
294 731
295
296/*
297 * Go through all the DRP IEs and find the ones that conflict with our
298 * reservations.
299 *
300 * FIXME: must resolve the conflict according the the rules in
301 * [ECMA-368].
302 */
303static
304void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
305 size_t ielen, struct uwb_dev *src_dev)
306{
307 struct device *dev = &rc->uwb_dev.dev;
308 struct uwb_ie_hdr *ie_hdr;
309 struct uwb_ie_drp *drp_ie;
310 void *ptr;
311
312 ptr = drp_evt->ie_data;
313 for (;;) {
314 ie_hdr = uwb_ie_next(&ptr, &ielen);
315 if (!ie_hdr)
316 break;
317
318 drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr);
319
320 /* FIXME: check if this DRP IE conflicts. */
321 }
322
323 if (ielen > 0)
324 dev_warn(dev, "%d octets remaining in DRP notification\n",
325 (int)ielen);
326}
327
328
329/*
330 * Terminate all reservations owned by, or targeted at, 'uwb_dev'.
331 */
332static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
333{
334 struct uwb_rsv *rsv;
335
336 list_for_each_entry(rsv, &rc->reservations, rc_node) {
337 if (rsv->owner == uwb_dev
338 || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev))
339 uwb_rsv_remove(rsv);
340 }
341}
342
343
344/** 732/**
345 * uwbd_evt_handle_rc_drp - handle a DRP_IE event 733 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
346 * @evt: the DRP_IE event from the radio controller 734 * @evt: the DRP_IE event from the radio controller
@@ -381,7 +769,6 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
381 size_t ielength, bytes_left; 769 size_t ielength, bytes_left;
382 struct uwb_dev_addr src_addr; 770 struct uwb_dev_addr src_addr;
383 struct uwb_dev *src_dev; 771 struct uwb_dev *src_dev;
384 int reason;
385 772
386 /* Is there enough data to decode the event (and any IEs in 773 /* Is there enough data to decode the event (and any IEs in
387 its payload)? */ 774 its payload)? */
@@ -417,22 +804,8 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
417 804
418 mutex_lock(&rc->rsvs_mutex); 805 mutex_lock(&rc->rsvs_mutex);
419 806
420 reason = uwb_rc_evt_drp_reason(drp_evt); 807 /* We do not distinguish from the reason */
421 808 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
422 switch (reason) {
423 case UWB_DRP_NOTIF_DRP_IE_RCVD:
424 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
425 break;
426 case UWB_DRP_NOTIF_CONFLICT:
427 uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev);
428 break;
429 case UWB_DRP_NOTIF_TERMINATE:
430 uwb_drp_terminate_all(rc, src_dev);
431 break;
432 default:
433 dev_warn(dev, "ignored DRP event with reason code: %d\n", reason);
434 break;
435 }
436 809
437 mutex_unlock(&rc->rsvs_mutex); 810 mutex_unlock(&rc->rsvs_mutex);
438 811
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index 1cd84f927540..165aec6a8f97 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -17,20 +17,31 @@
17 */ 17 */
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/uwb.h> 19#include <linux/uwb.h>
20#include <linux/random.h>
20 21
21#include "uwb-internal.h" 22#include "uwb-internal.h"
22 23
23static void uwb_rsv_timer(unsigned long arg); 24static void uwb_rsv_timer(unsigned long arg);
24 25
25static const char *rsv_states[] = { 26static const char *rsv_states[] = {
26 [UWB_RSV_STATE_NONE] = "none", 27 [UWB_RSV_STATE_NONE] = "none ",
27 [UWB_RSV_STATE_O_INITIATED] = "initiated", 28 [UWB_RSV_STATE_O_INITIATED] = "o initiated ",
28 [UWB_RSV_STATE_O_PENDING] = "pending", 29 [UWB_RSV_STATE_O_PENDING] = "o pending ",
29 [UWB_RSV_STATE_O_MODIFIED] = "modified", 30 [UWB_RSV_STATE_O_MODIFIED] = "o modified ",
30 [UWB_RSV_STATE_O_ESTABLISHED] = "established", 31 [UWB_RSV_STATE_O_ESTABLISHED] = "o established ",
31 [UWB_RSV_STATE_T_ACCEPTED] = "accepted", 32 [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ",
32 [UWB_RSV_STATE_T_DENIED] = "denied", 33 [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding",
33 [UWB_RSV_STATE_T_PENDING] = "pending", 34 [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining",
35 [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ",
36 [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ",
37 [UWB_RSV_STATE_T_CONFLICT] = "t conflict ",
38 [UWB_RSV_STATE_T_PENDING] = "t pending ",
39 [UWB_RSV_STATE_T_DENIED] = "t denied ",
40 [UWB_RSV_STATE_T_RESIZED] = "t resized ",
41 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ",
42 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf",
43 [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend",
44 [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ",
34}; 45};
35 46
36static const char *rsv_types[] = { 47static const char *rsv_types[] = {
@@ -41,6 +52,31 @@ static const char *rsv_types[] = {
41 [UWB_DRP_TYPE_PCA] = "pca", 52 [UWB_DRP_TYPE_PCA] = "pca",
42}; 53};
43 54
55bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv)
56{
57 static const bool has_two_drp_ies[] = {
58 [UWB_RSV_STATE_O_INITIATED] = false,
59 [UWB_RSV_STATE_O_PENDING] = false,
60 [UWB_RSV_STATE_O_MODIFIED] = false,
61 [UWB_RSV_STATE_O_ESTABLISHED] = false,
62 [UWB_RSV_STATE_O_TO_BE_MOVED] = false,
63 [UWB_RSV_STATE_O_MOVE_COMBINING] = false,
64 [UWB_RSV_STATE_O_MOVE_REDUCING] = false,
65 [UWB_RSV_STATE_O_MOVE_EXPANDING] = true,
66 [UWB_RSV_STATE_T_ACCEPTED] = false,
67 [UWB_RSV_STATE_T_CONFLICT] = false,
68 [UWB_RSV_STATE_T_PENDING] = false,
69 [UWB_RSV_STATE_T_DENIED] = false,
70 [UWB_RSV_STATE_T_RESIZED] = false,
71 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true,
72 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true,
73 [UWB_RSV_STATE_T_EXPANDING_PENDING] = true,
74 [UWB_RSV_STATE_T_EXPANDING_DENIED] = true,
75 };
76
77 return has_two_drp_ies[rsv->state];
78}
79
44/** 80/**
45 * uwb_rsv_state_str - return a string for a reservation state 81 * uwb_rsv_state_str - return a string for a reservation state
46 * @state: the reservation state. 82 * @state: the reservation state.
@@ -65,7 +101,7 @@ const char *uwb_rsv_type_str(enum uwb_drp_type type)
65} 101}
66EXPORT_SYMBOL_GPL(uwb_rsv_type_str); 102EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
67 103
68static void uwb_rsv_dump(struct uwb_rsv *rsv) 104void uwb_rsv_dump(char *text, struct uwb_rsv *rsv)
69{ 105{
70 struct device *dev = &rsv->rc->uwb_dev.dev; 106 struct device *dev = &rsv->rc->uwb_dev.dev;
71 struct uwb_dev_addr devaddr; 107 struct uwb_dev_addr devaddr;
@@ -88,12 +124,12 @@ static void uwb_rsv_release(struct kref *kref)
88 kfree(rsv); 124 kfree(rsv);
89} 125}
90 126
91static void uwb_rsv_get(struct uwb_rsv *rsv) 127void uwb_rsv_get(struct uwb_rsv *rsv)
92{ 128{
93 kref_get(&rsv->kref); 129 kref_get(&rsv->kref);
94} 130}
95 131
96static void uwb_rsv_put(struct uwb_rsv *rsv) 132void uwb_rsv_put(struct uwb_rsv *rsv)
97{ 133{
98 kref_put(&rsv->kref, uwb_rsv_release); 134 kref_put(&rsv->kref, uwb_rsv_release);
99} 135}
@@ -108,6 +144,7 @@ static void uwb_rsv_put(struct uwb_rsv *rsv)
108static int uwb_rsv_get_stream(struct uwb_rsv *rsv) 144static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
109{ 145{
110 struct uwb_rc *rc = rsv->rc; 146 struct uwb_rc *rc = rsv->rc;
147 struct device *dev = &rc->uwb_dev.dev;
111 unsigned long *streams_bm; 148 unsigned long *streams_bm;
112 int stream; 149 int stream;
113 150
@@ -129,12 +166,15 @@ static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
129 rsv->stream = stream; 166 rsv->stream = stream;
130 set_bit(stream, streams_bm); 167 set_bit(stream, streams_bm);
131 168
169 dev_dbg(dev, "get stream %d\n", rsv->stream);
170
132 return 0; 171 return 0;
133} 172}
134 173
135static void uwb_rsv_put_stream(struct uwb_rsv *rsv) 174static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
136{ 175{
137 struct uwb_rc *rc = rsv->rc; 176 struct uwb_rc *rc = rsv->rc;
177 struct device *dev = &rc->uwb_dev.dev;
138 unsigned long *streams_bm; 178 unsigned long *streams_bm;
139 179
140 switch (rsv->target.type) { 180 switch (rsv->target.type) {
@@ -149,86 +189,52 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
149 } 189 }
150 190
151 clear_bit(rsv->stream, streams_bm); 191 clear_bit(rsv->stream, streams_bm);
192
193 dev_dbg(dev, "put stream %d\n", rsv->stream);
152} 194}
153 195
154/* 196void uwb_rsv_backoff_win_timer(unsigned long arg)
155 * Generate a MAS allocation with a single row component.
156 */
157static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas,
158 int first_mas, int mas_per_zone,
159 int zs, int ze)
160{ 197{
161 struct uwb_mas_bm col; 198 struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
162 int z; 199 struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
200 struct device *dev = &rc->uwb_dev.dev;
163 201
164 bitmap_zero(mas->bm, UWB_NUM_MAS); 202 bow->can_reserve_extra_mases = true;
165 bitmap_zero(col.bm, UWB_NUM_MAS); 203 if (bow->total_expired <= 4) {
166 bitmap_fill(col.bm, mas_per_zone); 204 bow->total_expired++;
167 bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); 205 } else {
168 206 /* after 4 backoff window has expired we can exit from
169 for (z = zs; z <= ze; z++) { 207 * the backoff procedure */
170 bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); 208 bow->total_expired = 0;
171 bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); 209 bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
172 } 210 }
211 dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n);
212
213 /* try to relocate all the "to be moved" relocations */
214 uwb_rsv_handle_drp_avail_change(rc);
173} 215}
174 216
175/* 217void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
176 * Allocate some MAS for this reservation based on current local
177 * availability, the reservation parameters (max_mas, min_mas,
178 * sparsity), and the WiMedia rules for MAS allocations.
179 *
180 * Returns -EBUSY is insufficient free MAS are available.
181 *
182 * FIXME: to simplify this, only safe reservations with a single row
183 * component in zones 1 to 15 are tried (zone 0 is skipped to avoid
184 * problems with the MAS reserved for the BP).
185 *
186 * [ECMA-368] section B.2.
187 */
188static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv)
189{ 218{
190 static const int safe_mas_in_row[UWB_NUM_ZONES] = { 219 struct uwb_drp_backoff_win *bow = &rc->bow;
191 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 220 struct device *dev = &rc->uwb_dev.dev;
192 }; 221 unsigned timeout_us;
193 int n, r;
194 struct uwb_mas_bm mas;
195 bool found = false;
196 222
197 /* 223 dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window);
198 * Search all valid safe allocations until either: too few MAS
199 * are available; or the smallest allocation with sufficient
200 * MAS is found.
201 *
202 * The top of the zones are preferred, so space for larger
203 * allocations is available in the bottom of the zone (e.g., a
204 * 15 MAS allocation should start in row 14 leaving space for
205 * a 120 MAS allocation at row 0).
206 */
207 for (n = safe_mas_in_row[0]; n >= 1; n--) {
208 int num_mas;
209 224
210 num_mas = n * (UWB_NUM_ZONES - 1); 225 bow->can_reserve_extra_mases = false;
211 if (num_mas < rsv->min_mas)
212 break;
213 if (found && num_mas < rsv->max_mas)
214 break;
215 226
216 for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { 227 if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX)
217 if (safe_mas_in_row[r] < n) 228 return;
218 continue;
219 uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES);
220 if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) {
221 found = true;
222 break;
223 }
224 }
225 }
226 229
227 if (!found) 230 bow->window <<= 1;
228 return -EBUSY; 231 bow->n = random32() & (bow->window - 1);
232 dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n);
229 233
230 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); 234 /* reset the timer associated variables */
231 return 0; 235 timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
236 bow->total_expired = 0;
237 mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
232} 238}
233 239
234static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) 240static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
@@ -241,13 +247,16 @@ static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
241 * received. 247 * received.
242 */ 248 */
243 if (rsv->is_multicast) { 249 if (rsv->is_multicast) {
244 if (rsv->state == UWB_RSV_STATE_O_INITIATED) 250 if (rsv->state == UWB_RSV_STATE_O_INITIATED
251 || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING
252 || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING
253 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING)
245 sframes = 1; 254 sframes = 1;
246 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) 255 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
247 sframes = 0; 256 sframes = 0;
257
248 } 258 }
249 259
250 rsv->expired = false;
251 if (sframes > 0) { 260 if (sframes > 0) {
252 /* 261 /*
253 * Add an additional 2 superframes to account for the 262 * Add an additional 2 superframes to account for the
@@ -269,7 +278,7 @@ static void uwb_rsv_state_update(struct uwb_rsv *rsv,
269 rsv->state = new_state; 278 rsv->state = new_state;
270 rsv->ie_valid = false; 279 rsv->ie_valid = false;
271 280
272 uwb_rsv_dump(rsv); 281 uwb_rsv_dump("SU", rsv);
273 282
274 uwb_rsv_stroke_timer(rsv); 283 uwb_rsv_stroke_timer(rsv);
275 uwb_rsv_sched_update(rsv->rc); 284 uwb_rsv_sched_update(rsv->rc);
@@ -283,10 +292,17 @@ static void uwb_rsv_callback(struct uwb_rsv *rsv)
283 292
284void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) 293void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
285{ 294{
295 struct uwb_rsv_move *mv = &rsv->mv;
296
286 if (rsv->state == new_state) { 297 if (rsv->state == new_state) {
287 switch (rsv->state) { 298 switch (rsv->state) {
288 case UWB_RSV_STATE_O_ESTABLISHED: 299 case UWB_RSV_STATE_O_ESTABLISHED:
300 case UWB_RSV_STATE_O_MOVE_EXPANDING:
301 case UWB_RSV_STATE_O_MOVE_COMBINING:
302 case UWB_RSV_STATE_O_MOVE_REDUCING:
289 case UWB_RSV_STATE_T_ACCEPTED: 303 case UWB_RSV_STATE_T_ACCEPTED:
304 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
305 case UWB_RSV_STATE_T_RESIZED:
290 case UWB_RSV_STATE_NONE: 306 case UWB_RSV_STATE_NONE:
291 uwb_rsv_stroke_timer(rsv); 307 uwb_rsv_stroke_timer(rsv);
292 break; 308 break;
@@ -298,11 +314,10 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
298 return; 314 return;
299 } 315 }
300 316
317 uwb_rsv_dump("SC", rsv);
318
301 switch (new_state) { 319 switch (new_state) {
302 case UWB_RSV_STATE_NONE: 320 case UWB_RSV_STATE_NONE:
303 uwb_drp_avail_release(rsv->rc, &rsv->mas);
304 if (uwb_rsv_is_owner(rsv))
305 uwb_rsv_put_stream(rsv);
306 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); 321 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
307 uwb_rsv_callback(rsv); 322 uwb_rsv_callback(rsv);
308 break; 323 break;
@@ -312,12 +327,45 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
312 case UWB_RSV_STATE_O_PENDING: 327 case UWB_RSV_STATE_O_PENDING:
313 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); 328 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
314 break; 329 break;
330 case UWB_RSV_STATE_O_MODIFIED:
331 /* in the companion there are the MASes to drop */
332 bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
333 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED);
334 break;
315 case UWB_RSV_STATE_O_ESTABLISHED: 335 case UWB_RSV_STATE_O_ESTABLISHED:
336 if (rsv->state == UWB_RSV_STATE_O_MODIFIED
337 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) {
338 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
339 rsv->needs_release_companion_mas = false;
340 }
316 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 341 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
317 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); 342 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
318 uwb_rsv_callback(rsv); 343 uwb_rsv_callback(rsv);
319 break; 344 break;
345 case UWB_RSV_STATE_O_MOVE_EXPANDING:
346 rsv->needs_release_companion_mas = true;
347 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
348 break;
349 case UWB_RSV_STATE_O_MOVE_COMBINING:
350 rsv->needs_release_companion_mas = false;
351 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
352 bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
353 rsv->mas.safe += mv->companion_mas.safe;
354 rsv->mas.unsafe += mv->companion_mas.unsafe;
355 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
356 break;
357 case UWB_RSV_STATE_O_MOVE_REDUCING:
358 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
359 rsv->needs_release_companion_mas = true;
360 rsv->mas.safe = mv->final_mas.safe;
361 rsv->mas.unsafe = mv->final_mas.unsafe;
362 bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
363 bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS);
364 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
365 break;
320 case UWB_RSV_STATE_T_ACCEPTED: 366 case UWB_RSV_STATE_T_ACCEPTED:
367 case UWB_RSV_STATE_T_RESIZED:
368 rsv->needs_release_companion_mas = false;
321 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 369 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
322 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); 370 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
323 uwb_rsv_callback(rsv); 371 uwb_rsv_callback(rsv);
@@ -325,12 +373,82 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
325 case UWB_RSV_STATE_T_DENIED: 373 case UWB_RSV_STATE_T_DENIED:
326 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); 374 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
327 break; 375 break;
376 case UWB_RSV_STATE_T_CONFLICT:
377 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT);
378 break;
379 case UWB_RSV_STATE_T_PENDING:
380 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING);
381 break;
382 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
383 rsv->needs_release_companion_mas = true;
384 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
385 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
386 break;
328 default: 387 default:
329 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", 388 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
330 uwb_rsv_state_str(new_state), new_state); 389 uwb_rsv_state_str(new_state), new_state);
331 } 390 }
332} 391}
333 392
393static void uwb_rsv_handle_timeout_work(struct work_struct *work)
394{
395 struct uwb_rsv *rsv = container_of(work, struct uwb_rsv,
396 handle_timeout_work);
397 struct uwb_rc *rc = rsv->rc;
398
399 mutex_lock(&rc->rsvs_mutex);
400
401 uwb_rsv_dump("TO", rsv);
402
403 switch (rsv->state) {
404 case UWB_RSV_STATE_O_INITIATED:
405 if (rsv->is_multicast) {
406 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
407 goto unlock;
408 }
409 break;
410 case UWB_RSV_STATE_O_MOVE_EXPANDING:
411 if (rsv->is_multicast) {
412 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
413 goto unlock;
414 }
415 break;
416 case UWB_RSV_STATE_O_MOVE_COMBINING:
417 if (rsv->is_multicast) {
418 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
419 goto unlock;
420 }
421 break;
422 case UWB_RSV_STATE_O_MOVE_REDUCING:
423 if (rsv->is_multicast) {
424 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
425 goto unlock;
426 }
427 break;
428 case UWB_RSV_STATE_O_ESTABLISHED:
429 if (rsv->is_multicast)
430 goto unlock;
431 break;
432 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
433 /*
434 * The time out could be for the main or of the
435 * companion DRP, assume it's for the companion and
436 * drop that first. A further time out is required to
437 * drop the main.
438 */
439 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
440 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
441 goto unlock;
442 default:
443 break;
444 }
445
446 uwb_rsv_remove(rsv);
447
448unlock:
449 mutex_unlock(&rc->rsvs_mutex);
450}
451
334static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) 452static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
335{ 453{
336 struct uwb_rsv *rsv; 454 struct uwb_rsv *rsv;
@@ -347,6 +465,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
347 rsv->timer.data = (unsigned long)rsv; 465 rsv->timer.data = (unsigned long)rsv;
348 466
349 rsv->rc = rc; 467 rsv->rc = rc;
468 INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
350 469
351 return rsv; 470 return rsv;
352} 471}
@@ -381,8 +500,18 @@ EXPORT_SYMBOL_GPL(uwb_rsv_create);
381 500
382void uwb_rsv_remove(struct uwb_rsv *rsv) 501void uwb_rsv_remove(struct uwb_rsv *rsv)
383{ 502{
503 uwb_rsv_dump("RM", rsv);
504
384 if (rsv->state != UWB_RSV_STATE_NONE) 505 if (rsv->state != UWB_RSV_STATE_NONE)
385 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 506 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
507
508 if (rsv->needs_release_companion_mas)
509 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
510 uwb_drp_avail_release(rsv->rc, &rsv->mas);
511
512 if (uwb_rsv_is_owner(rsv))
513 uwb_rsv_put_stream(rsv);
514
386 del_timer_sync(&rsv->timer); 515 del_timer_sync(&rsv->timer);
387 uwb_dev_put(rsv->owner); 516 uwb_dev_put(rsv->owner);
388 if (rsv->target.type == UWB_RSV_TARGET_DEV) 517 if (rsv->target.type == UWB_RSV_TARGET_DEV)
@@ -409,7 +538,7 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
409 * @rsv: the reservation 538 * @rsv: the reservation
410 * 539 *
411 * The PAL should fill in @rsv's owner, target, type, max_mas, 540 * The PAL should fill in @rsv's owner, target, type, max_mas,
412 * min_mas, sparsity and is_multicast fields. If the target is a 541 * min_mas, max_interval and is_multicast fields. If the target is a
413 * uwb_dev it must be referenced. 542 * uwb_dev it must be referenced.
414 * 543 *
415 * The reservation's callback will be called when the reservation is 544 * The reservation's callback will be called when the reservation is
@@ -418,16 +547,27 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
418int uwb_rsv_establish(struct uwb_rsv *rsv) 547int uwb_rsv_establish(struct uwb_rsv *rsv)
419{ 548{
420 struct uwb_rc *rc = rsv->rc; 549 struct uwb_rc *rc = rsv->rc;
550 struct uwb_mas_bm available;
421 int ret; 551 int ret;
422 552
423 mutex_lock(&rc->rsvs_mutex); 553 mutex_lock(&rc->rsvs_mutex);
424
425 ret = uwb_rsv_get_stream(rsv); 554 ret = uwb_rsv_get_stream(rsv);
426 if (ret) 555 if (ret)
427 goto out; 556 goto out;
428 557
429 ret = uwb_rsv_alloc_mas(rsv); 558 rsv->tiebreaker = random32() & 1;
430 if (ret) { 559 /* get available mas bitmap */
560 uwb_drp_available(rc, &available);
561
562 ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas);
563 if (ret == UWB_RSV_ALLOC_NOT_FOUND) {
564 ret = -EBUSY;
565 uwb_rsv_put_stream(rsv);
566 goto out;
567 }
568
569 ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas);
570 if (ret != 0) {
431 uwb_rsv_put_stream(rsv); 571 uwb_rsv_put_stream(rsv);
432 goto out; 572 goto out;
433 } 573 }
@@ -448,16 +588,71 @@ EXPORT_SYMBOL_GPL(uwb_rsv_establish);
448 * @rsv: the reservation to modify 588 * @rsv: the reservation to modify
449 * @max_mas: new maximum MAS to reserve 589 * @max_mas: new maximum MAS to reserve
450 * @min_mas: new minimum MAS to reserve 590 * @min_mas: new minimum MAS to reserve
451 * @sparsity: new sparsity to use 591 * @max_interval: new max_interval to use
452 * 592 *
453 * FIXME: implement this once there are PALs that use it. 593 * FIXME: implement this once there are PALs that use it.
454 */ 594 */
455int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) 595int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval)
456{ 596{
457 return -ENOSYS; 597 return -ENOSYS;
458} 598}
459EXPORT_SYMBOL_GPL(uwb_rsv_modify); 599EXPORT_SYMBOL_GPL(uwb_rsv_modify);
460 600
601/*
602 * move an already established reservation (rc->rsvs_mutex must to be
603 * taken when tis function is called)
604 */
605int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
606{
607 struct uwb_rc *rc = rsv->rc;
608 struct uwb_drp_backoff_win *bow = &rc->bow;
609 struct device *dev = &rc->uwb_dev.dev;
610 struct uwb_rsv_move *mv;
611 int ret = 0;
612
613 if (bow->can_reserve_extra_mases == false)
614 return -EBUSY;
615
616 mv = &rsv->mv;
617
618 if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) {
619
620 if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) {
621 /* We want to move the reservation */
622 bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS);
623 uwb_drp_avail_reserve_pending(rc, &mv->companion_mas);
624 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
625 }
626 } else {
627 dev_dbg(dev, "new allocation not found\n");
628 }
629
630 return ret;
631}
632
633/* It will try to move every reservation in state O_ESTABLISHED giving
634 * to the MAS allocator algorithm an availability that is the real one
635 * plus the allocation already established from the reservation. */
636void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
637{
638 struct uwb_drp_backoff_win *bow = &rc->bow;
639 struct uwb_rsv *rsv;
640 struct uwb_mas_bm mas;
641
642 if (bow->can_reserve_extra_mases == false)
643 return;
644
645 list_for_each_entry(rsv, &rc->reservations, rc_node) {
646 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED ||
647 rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) {
648 uwb_drp_available(rc, &mas);
649 bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS);
650 uwb_rsv_try_move(rsv, &mas);
651 }
652 }
653
654}
655
461/** 656/**
462 * uwb_rsv_terminate - terminate an established reservation 657 * uwb_rsv_terminate - terminate an established reservation
463 * @rsv: the reservation to terminate 658 * @rsv: the reservation to terminate
@@ -546,6 +741,7 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
546 uwb_dev_get(rsv->owner); 741 uwb_dev_get(rsv->owner);
547 rsv->target.type = UWB_RSV_TARGET_DEV; 742 rsv->target.type = UWB_RSV_TARGET_DEV;
548 rsv->target.dev = &rc->uwb_dev; 743 rsv->target.dev = &rc->uwb_dev;
744 uwb_dev_get(&rc->uwb_dev);
549 rsv->type = uwb_ie_drp_type(drp_ie); 745 rsv->type = uwb_ie_drp_type(drp_ie);
550 rsv->stream = uwb_ie_drp_stream_index(drp_ie); 746 rsv->stream = uwb_ie_drp_stream_index(drp_ie);
551 uwb_drp_ie_to_bm(&rsv->mas, drp_ie); 747 uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
@@ -567,12 +763,34 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
567 list_add_tail(&rsv->rc_node, &rc->reservations); 763 list_add_tail(&rsv->rc_node, &rc->reservations);
568 state = rsv->state; 764 state = rsv->state;
569 rsv->state = UWB_RSV_STATE_NONE; 765 rsv->state = UWB_RSV_STATE_NONE;
570 uwb_rsv_set_state(rsv, state); 766
767 /* FIXME: do something sensible here */
768 if (state == UWB_RSV_STATE_T_ACCEPTED
769 && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) {
770 /* FIXME: do something sensible here */
771 } else {
772 uwb_rsv_set_state(rsv, state);
773 }
571 774
572 return rsv; 775 return rsv;
573} 776}
574 777
575/** 778/**
779 * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations
780 * @rsv: the reservation.
781 * @mas: returns the available MAS.
782 *
783 * The usable MAS of a reservation may be less than the negotiated MAS
784 * if alien BPs are present.
785 */
786void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas)
787{
788 bitmap_zero(mas->bm, UWB_NUM_MAS);
789 bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
790}
791EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas);
792
793/**
576 * uwb_rsv_find - find a reservation for a received DRP IE. 794 * uwb_rsv_find - find a reservation for a received DRP IE.
577 * @rc: the radio controller 795 * @rc: the radio controller
578 * @src: source of the DRP IE 796 * @src: source of the DRP IE
@@ -611,8 +829,6 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc)
611 bool ie_updated = false; 829 bool ie_updated = false;
612 830
613 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 831 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
614 if (rsv->expired)
615 uwb_drp_handle_timeout(rsv);
616 if (!rsv->ie_valid) { 832 if (!rsv->ie_valid) {
617 uwb_drp_ie_update(rsv); 833 uwb_drp_ie_update(rsv);
618 ie_updated = true; 834 ie_updated = true;
@@ -622,9 +838,47 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc)
622 return ie_updated; 838 return ie_updated;
623} 839}
624 840
841void uwb_rsv_queue_update(struct uwb_rc *rc)
842{
843 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
844
845 queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us));
846}
847
848/**
849 * uwb_rsv_sched_update - schedule an update of the DRP IEs
850 * @rc: the radio controller.
851 *
852 * To improve performance and ensure correctness with [ECMA-368] the
853 * number of SET-DRP-IE commands that are done are limited.
854 *
855 * DRP IEs update come from two sources: DRP events from the hardware
856 * which all occur at the beginning of the superframe ('syncronous'
857 * events) and reservation establishment/termination requests from
858 * PALs or timers ('asynchronous' events).
859 *
860 * A delayed work ensures that all the synchronous events result in
861 * one SET-DRP-IE command.
862 *
863 * Additional logic (the set_drp_ie_pending and rsv_updated_postponed
864 * flags) will prevent an asynchrous event starting a SET-DRP-IE
865 * command if one is currently awaiting a response.
866 *
867 * FIXME: this does leave a window where an asynchrous event can delay
868 * the SET-DRP-IE for a synchronous event by one superframe.
869 */
625void uwb_rsv_sched_update(struct uwb_rc *rc) 870void uwb_rsv_sched_update(struct uwb_rc *rc)
626{ 871{
627 queue_work(rc->rsv_workq, &rc->rsv_update_work); 872 spin_lock(&rc->rsvs_lock);
873 if (!delayed_work_pending(&rc->rsv_update_work)) {
874 if (rc->set_drp_ie_pending > 0) {
875 rc->set_drp_ie_pending++;
876 goto unlock;
877 }
878 uwb_rsv_queue_update(rc);
879 }
880unlock:
881 spin_unlock(&rc->rsvs_lock);
628} 882}
629 883
630/* 884/*
@@ -633,7 +887,8 @@ void uwb_rsv_sched_update(struct uwb_rc *rc)
633 */ 887 */
634static void uwb_rsv_update_work(struct work_struct *work) 888static void uwb_rsv_update_work(struct work_struct *work)
635{ 889{
636 struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); 890 struct uwb_rc *rc = container_of(work, struct uwb_rc,
891 rsv_update_work.work);
637 bool ie_updated; 892 bool ie_updated;
638 893
639 mutex_lock(&rc->rsvs_mutex); 894 mutex_lock(&rc->rsvs_mutex);
@@ -645,18 +900,34 @@ static void uwb_rsv_update_work(struct work_struct *work)
645 ie_updated = true; 900 ie_updated = true;
646 } 901 }
647 902
648 if (ie_updated) 903 if (ie_updated && (rc->set_drp_ie_pending == 0))
649 uwb_rc_send_all_drp_ie(rc); 904 uwb_rc_send_all_drp_ie(rc);
650 905
651 mutex_unlock(&rc->rsvs_mutex); 906 mutex_unlock(&rc->rsvs_mutex);
652} 907}
653 908
909static void uwb_rsv_alien_bp_work(struct work_struct *work)
910{
911 struct uwb_rc *rc = container_of(work, struct uwb_rc,
912 rsv_alien_bp_work.work);
913 struct uwb_rsv *rsv;
914
915 mutex_lock(&rc->rsvs_mutex);
916
917 list_for_each_entry(rsv, &rc->reservations, rc_node) {
918 if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
919 rsv->callback(rsv);
920 }
921 }
922
923 mutex_unlock(&rc->rsvs_mutex);
924}
925
654static void uwb_rsv_timer(unsigned long arg) 926static void uwb_rsv_timer(unsigned long arg)
655{ 927{
656 struct uwb_rsv *rsv = (struct uwb_rsv *)arg; 928 struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
657 929
658 rsv->expired = true; 930 queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
659 uwb_rsv_sched_update(rsv->rc);
660} 931}
661 932
662/** 933/**
@@ -673,16 +944,27 @@ void uwb_rsv_remove_all(struct uwb_rc *rc)
673 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 944 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
674 uwb_rsv_remove(rsv); 945 uwb_rsv_remove(rsv);
675 } 946 }
947 /* Cancel any postponed update. */
948 rc->set_drp_ie_pending = 0;
676 mutex_unlock(&rc->rsvs_mutex); 949 mutex_unlock(&rc->rsvs_mutex);
677 950
678 cancel_work_sync(&rc->rsv_update_work); 951 cancel_delayed_work_sync(&rc->rsv_update_work);
679} 952}
680 953
681void uwb_rsv_init(struct uwb_rc *rc) 954void uwb_rsv_init(struct uwb_rc *rc)
682{ 955{
683 INIT_LIST_HEAD(&rc->reservations); 956 INIT_LIST_HEAD(&rc->reservations);
957 INIT_LIST_HEAD(&rc->cnflt_alien_list);
684 mutex_init(&rc->rsvs_mutex); 958 mutex_init(&rc->rsvs_mutex);
685 INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 959 spin_lock_init(&rc->rsvs_lock);
960 INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
961 INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work);
962 rc->bow.can_reserve_extra_mases = true;
963 rc->bow.total_expired = 0;
964 rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
965 init_timer(&rc->bow.timer);
966 rc->bow.timer.function = uwb_rsv_backoff_win_timer;
967 rc->bow.timer.data = (unsigned long)&rc->bow;
686 968
687 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); 969 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
688} 970}
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
index a6debb9baf38..89b2e6a7214c 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/uwb/uwb-debug.c
@@ -82,29 +82,21 @@ struct uwb_dbg {
82 struct dentry *reservations_f; 82 struct dentry *reservations_f;
83 struct dentry *accept_f; 83 struct dentry *accept_f;
84 struct dentry *drp_avail_f; 84 struct dentry *drp_avail_f;
85 spinlock_t list_lock;
85}; 86};
86 87
87static struct dentry *root_dir; 88static struct dentry *root_dir;
88 89
89static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) 90static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
90{ 91{
91 struct uwb_rc *rc = rsv->rc; 92 struct uwb_dbg *dbg = rsv->pal_priv;
92 struct device *dev = &rc->uwb_dev.dev;
93 struct uwb_dev_addr devaddr;
94 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
95
96 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
97 if (rsv->target.type == UWB_RSV_TARGET_DEV)
98 devaddr = rsv->target.dev->dev_addr;
99 else
100 devaddr = rsv->target.devaddr;
101 uwb_dev_addr_print(target, sizeof(target), &devaddr);
102 93
103 dev_dbg(dev, "debug: rsv %s -> %s: %s\n", 94 uwb_rsv_dump("debug", rsv);
104 owner, target, uwb_rsv_state_str(rsv->state));
105 95
106 if (rsv->state == UWB_RSV_STATE_NONE) { 96 if (rsv->state == UWB_RSV_STATE_NONE) {
97 spin_lock(&dbg->list_lock);
107 list_del(&rsv->pal_node); 98 list_del(&rsv->pal_node);
99 spin_unlock(&dbg->list_lock);
108 uwb_rsv_destroy(rsv); 100 uwb_rsv_destroy(rsv);
109 } 101 }
110} 102}
@@ -128,20 +120,21 @@ static int cmd_rsv_establish(struct uwb_rc *rc,
128 return -ENOMEM; 120 return -ENOMEM;
129 } 121 }
130 122
131 rsv->owner = &rc->uwb_dev; 123 rsv->target.type = UWB_RSV_TARGET_DEV;
132 rsv->target.type = UWB_RSV_TARGET_DEV; 124 rsv->target.dev = target;
133 rsv->target.dev = target; 125 rsv->type = cmd->type;
134 rsv->type = cmd->type; 126 rsv->max_mas = cmd->max_mas;
135 rsv->max_mas = cmd->max_mas; 127 rsv->min_mas = cmd->min_mas;
136 rsv->min_mas = cmd->min_mas; 128 rsv->max_interval = cmd->max_interval;
137 rsv->sparsity = cmd->sparsity;
138 129
139 ret = uwb_rsv_establish(rsv); 130 ret = uwb_rsv_establish(rsv);
140 if (ret) 131 if (ret)
141 uwb_rsv_destroy(rsv); 132 uwb_rsv_destroy(rsv);
142 else 133 else {
134 spin_lock(&(rc->dbg)->list_lock);
143 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); 135 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
144 136 spin_unlock(&(rc->dbg)->list_lock);
137 }
145 return ret; 138 return ret;
146} 139}
147 140
@@ -151,17 +144,24 @@ static int cmd_rsv_terminate(struct uwb_rc *rc,
151 struct uwb_rsv *rsv, *found = NULL; 144 struct uwb_rsv *rsv, *found = NULL;
152 int i = 0; 145 int i = 0;
153 146
147 spin_lock(&(rc->dbg)->list_lock);
148
154 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { 149 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
155 if (i == cmd->index) { 150 if (i == cmd->index) {
156 found = rsv; 151 found = rsv;
152 uwb_rsv_get(found);
157 break; 153 break;
158 } 154 }
159 i++; 155 i++;
160 } 156 }
157
158 spin_unlock(&(rc->dbg)->list_lock);
159
161 if (!found) 160 if (!found)
162 return -EINVAL; 161 return -EINVAL;
163 162
164 uwb_rsv_terminate(found); 163 uwb_rsv_terminate(found);
164 uwb_rsv_put(found);
165 165
166 return 0; 166 return 0;
167} 167}
@@ -191,7 +191,7 @@ static ssize_t command_write(struct file *file, const char __user *buf,
191 struct uwb_rc *rc = file->private_data; 191 struct uwb_rc *rc = file->private_data;
192 struct uwb_dbg_cmd cmd; 192 struct uwb_dbg_cmd cmd;
193 int ret = 0; 193 int ret = 0;
194 194
195 if (len != sizeof(struct uwb_dbg_cmd)) 195 if (len != sizeof(struct uwb_dbg_cmd))
196 return -EINVAL; 196 return -EINVAL;
197 197
@@ -325,7 +325,9 @@ static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv)
325 struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal); 325 struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal);
326 326
327 if (dbg->accept) { 327 if (dbg->accept) {
328 spin_lock(&dbg->list_lock);
328 list_add_tail(&rsv->pal_node, &dbg->rsvs); 329 list_add_tail(&rsv->pal_node, &dbg->rsvs);
330 spin_unlock(&dbg->list_lock);
329 uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg); 331 uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg);
330 } 332 }
331} 333}
@@ -341,6 +343,7 @@ void uwb_dbg_add_rc(struct uwb_rc *rc)
341 return; 343 return;
342 344
343 INIT_LIST_HEAD(&rc->dbg->rsvs); 345 INIT_LIST_HEAD(&rc->dbg->rsvs);
346 spin_lock_init(&(rc->dbg)->list_lock);
344 347
345 uwb_pal_init(&rc->dbg->pal); 348 uwb_pal_init(&rc->dbg->pal);
346 rc->dbg->pal.rc = rc; 349 rc->dbg->pal.rc = rc;
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
index f0f21f406bf0..d5bcfc1c227a 100644
--- a/drivers/uwb/uwb-internal.h
+++ b/drivers/uwb/uwb-internal.h
@@ -92,6 +92,12 @@ extern const char *uwb_rc_strerror(unsigned code);
92 92
93struct uwb_rc_neh; 93struct uwb_rc_neh;
94 94
95extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
96 struct uwb_rccb *cmd, size_t cmd_size,
97 u8 expected_type, u16 expected_event,
98 uwb_rc_cmd_cb_f cb, void *arg);
99
100
95void uwb_rc_neh_create(struct uwb_rc *rc); 101void uwb_rc_neh_create(struct uwb_rc *rc);
96void uwb_rc_neh_destroy(struct uwb_rc *rc); 102void uwb_rc_neh_destroy(struct uwb_rc *rc);
97 103
@@ -106,7 +112,69 @@ void uwb_rc_neh_put(struct uwb_rc_neh *neh);
106extern int uwb_est_create(void); 112extern int uwb_est_create(void);
107extern void uwb_est_destroy(void); 113extern void uwb_est_destroy(void);
108 114
115/*
116 * UWB conflicting alien reservations
117 */
118struct uwb_cnflt_alien {
119 struct uwb_rc *rc;
120 struct list_head rc_node;
121 struct uwb_mas_bm mas;
122 struct timer_list timer;
123 struct work_struct cnflt_update_work;
124};
125
126enum uwb_uwb_rsv_alloc_result {
127 UWB_RSV_ALLOC_FOUND = 0,
128 UWB_RSV_ALLOC_NOT_FOUND,
129};
130
131enum uwb_rsv_mas_status {
132 UWB_RSV_MAS_NOT_AVAIL = 1,
133 UWB_RSV_MAS_SAFE,
134 UWB_RSV_MAS_UNSAFE,
135};
136
137struct uwb_rsv_col_set_info {
138 unsigned char start_col;
139 unsigned char interval;
140 unsigned char safe_mas_per_col;
141 unsigned char unsafe_mas_per_col;
142};
143
144struct uwb_rsv_col_info {
145 unsigned char max_avail_safe;
146 unsigned char max_avail_unsafe;
147 unsigned char highest_mas[UWB_MAS_PER_ZONE];
148 struct uwb_rsv_col_set_info csi;
149};
150
151struct uwb_rsv_row_info {
152 unsigned char avail[UWB_MAS_PER_ZONE];
153 unsigned char free_rows;
154 unsigned char used_rows;
155};
156
157/*
158 * UWB find allocation
159 */
160struct uwb_rsv_alloc_info {
161 unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES];
162 struct uwb_rsv_col_info ci[UWB_NUM_ZONES];
163 struct uwb_rsv_row_info ri;
164 struct uwb_mas_bm *not_available;
165 struct uwb_mas_bm *result;
166 int min_mas;
167 int max_mas;
168 int max_interval;
169 int total_allocated_mases;
170 int safe_allocated_mases;
171 int unsafe_allocated_mases;
172 int interval;
173};
109 174
175int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available,
176 struct uwb_mas_bm *result);
177void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc);
110/* 178/*
111 * UWB Events & management daemon 179 * UWB Events & management daemon
112 */ 180 */
@@ -254,18 +322,28 @@ void uwb_rsv_init(struct uwb_rc *rc);
254int uwb_rsv_setup(struct uwb_rc *rc); 322int uwb_rsv_setup(struct uwb_rc *rc);
255void uwb_rsv_cleanup(struct uwb_rc *rc); 323void uwb_rsv_cleanup(struct uwb_rc *rc);
256void uwb_rsv_remove_all(struct uwb_rc *rc); 324void uwb_rsv_remove_all(struct uwb_rc *rc);
325void uwb_rsv_get(struct uwb_rsv *rsv);
326void uwb_rsv_put(struct uwb_rsv *rsv);
327bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
328void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
329int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
330void uwb_rsv_backoff_win_timer(unsigned long arg);
331void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
332int uwb_rsv_status(struct uwb_rsv *rsv);
333int uwb_rsv_companion_status(struct uwb_rsv *rsv);
257 334
258void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); 335void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
259void uwb_rsv_remove(struct uwb_rsv *rsv); 336void uwb_rsv_remove(struct uwb_rsv *rsv);
260struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, 337struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
261 struct uwb_ie_drp *drp_ie); 338 struct uwb_ie_drp *drp_ie);
262void uwb_rsv_sched_update(struct uwb_rc *rc); 339void uwb_rsv_sched_update(struct uwb_rc *rc);
340void uwb_rsv_queue_update(struct uwb_rc *rc);
263 341
264void uwb_drp_handle_timeout(struct uwb_rsv *rsv);
265int uwb_drp_ie_update(struct uwb_rsv *rsv); 342int uwb_drp_ie_update(struct uwb_rsv *rsv);
266void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); 343void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
267 344
268void uwb_drp_avail_init(struct uwb_rc *rc); 345void uwb_drp_avail_init(struct uwb_rc *rc);
346void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail);
269int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); 347int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
270void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); 348void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
271void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); 349void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);