diff options
Diffstat (limited to 'drivers/uwb')
41 files changed, 2517 insertions, 1763 deletions
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index 257e6908304c..2f98d080fe78 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_UWB_I1480U) += i1480/ | |||
6 | 6 | ||
7 | uwb-objs := \ | 7 | uwb-objs := \ |
8 | address.o \ | 8 | address.o \ |
9 | allocator.o \ | ||
9 | beacon.o \ | 10 | beacon.o \ |
10 | driver.o \ | 11 | driver.o \ |
11 | drp.o \ | 12 | drp.o \ |
@@ -13,10 +14,12 @@ uwb-objs := \ | |||
13 | drp-ie.o \ | 14 | drp-ie.o \ |
14 | est.o \ | 15 | est.o \ |
15 | ie.o \ | 16 | ie.o \ |
17 | ie-rcv.o \ | ||
16 | lc-dev.o \ | 18 | lc-dev.o \ |
17 | lc-rc.o \ | 19 | lc-rc.o \ |
18 | neh.o \ | 20 | neh.o \ |
19 | pal.o \ | 21 | pal.o \ |
22 | radio.o \ | ||
20 | reset.o \ | 23 | reset.o \ |
21 | rsv.o \ | 24 | rsv.o \ |
22 | scan.o \ | 25 | scan.o \ |
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c index 1664ae5f1706..ad21b1d7218c 100644 --- a/drivers/uwb/address.c +++ b/drivers/uwb/address.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/random.h> | 29 | #include <linux/random.h> |
30 | #include <linux/etherdevice.h> | 30 | #include <linux/etherdevice.h> |
31 | #include <linux/uwb/debug.h> | 31 | |
32 | #include "uwb-internal.h" | 32 | #include "uwb-internal.h" |
33 | 33 | ||
34 | 34 | ||
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c new file mode 100644 index 000000000000..c8185e6b0cd5 --- /dev/null +++ b/drivers/uwb/allocator.c | |||
@@ -0,0 +1,386 @@ | |||
1 | /* | ||
2 | * UWB reservation management. | ||
3 | * | ||
4 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/uwb.h> | ||
21 | |||
22 | #include "uwb-internal.h" | ||
23 | |||
24 | static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai) | ||
25 | { | ||
26 | int col, mas, safe_mas, unsafe_mas; | ||
27 | unsigned char *bm = ai->bm; | ||
28 | struct uwb_rsv_col_info *ci = ai->ci; | ||
29 | unsigned char c; | ||
30 | |||
31 | for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) { | ||
32 | |||
33 | safe_mas = ci->csi.safe_mas_per_col; | ||
34 | unsafe_mas = ci->csi.unsafe_mas_per_col; | ||
35 | |||
36 | for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) { | ||
37 | if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) { | ||
38 | |||
39 | if (safe_mas > 0) { | ||
40 | safe_mas--; | ||
41 | c = UWB_RSV_MAS_SAFE; | ||
42 | } else if (unsafe_mas > 0) { | ||
43 | unsafe_mas--; | ||
44 | c = UWB_RSV_MAS_UNSAFE; | ||
45 | } else { | ||
46 | break; | ||
47 | } | ||
48 | bm[col * UWB_MAS_PER_ZONE + mas] = c; | ||
49 | } | ||
50 | } | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai) | ||
55 | { | ||
56 | int mas, col, rows; | ||
57 | unsigned char *bm = ai->bm; | ||
58 | struct uwb_rsv_row_info *ri = &ai->ri; | ||
59 | unsigned char c; | ||
60 | |||
61 | rows = 1; | ||
62 | c = UWB_RSV_MAS_SAFE; | ||
63 | for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) { | ||
64 | if (ri->avail[mas] == 1) { | ||
65 | |||
66 | if (rows > ri->used_rows) { | ||
67 | break; | ||
68 | } else if (rows > 7) { | ||
69 | c = UWB_RSV_MAS_UNSAFE; | ||
70 | } | ||
71 | |||
72 | for (col = 0; col < UWB_NUM_ZONES; col++) { | ||
73 | if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) { | ||
74 | bm[col * UWB_NUM_ZONES + mas] = c; | ||
75 | if(c == UWB_RSV_MAS_SAFE) | ||
76 | ai->safe_allocated_mases++; | ||
77 | else | ||
78 | ai->unsafe_allocated_mases++; | ||
79 | } | ||
80 | } | ||
81 | rows++; | ||
82 | } | ||
83 | } | ||
84 | ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Find the best column set for a given availability, interval, num safe mas and | ||
89 | * num unsafe mas. | ||
90 | * | ||
91 | * The different sets are tried in order as shown below, depending on the interval. | ||
92 | * | ||
93 | * interval = 16 | ||
94 | * deep = 0 | ||
95 | * set 1 -> { 8 } | ||
96 | * deep = 1 | ||
97 | * set 1 -> { 4 } | ||
98 | * set 2 -> { 12 } | ||
99 | * deep = 2 | ||
100 | * set 1 -> { 2 } | ||
101 | * set 2 -> { 6 } | ||
102 | * set 3 -> { 10 } | ||
103 | * set 4 -> { 14 } | ||
104 | * deep = 3 | ||
105 | * set 1 -> { 1 } | ||
106 | * set 2 -> { 3 } | ||
107 | * set 3 -> { 5 } | ||
108 | * set 4 -> { 7 } | ||
109 | * set 5 -> { 9 } | ||
110 | * set 6 -> { 11 } | ||
111 | * set 7 -> { 13 } | ||
112 | * set 8 -> { 15 } | ||
113 | * | ||
114 | * interval = 8 | ||
115 | * deep = 0 | ||
116 | * set 1 -> { 4 12 } | ||
117 | * deep = 1 | ||
118 | * set 1 -> { 2 10 } | ||
119 | * set 2 -> { 6 14 } | ||
120 | * deep = 2 | ||
121 | * set 1 -> { 1 9 } | ||
122 | * set 2 -> { 3 11 } | ||
123 | * set 3 -> { 5 13 } | ||
124 | * set 4 -> { 7 15 } | ||
125 | * | ||
126 | * interval = 4 | ||
127 | * deep = 0 | ||
128 | * set 1 -> { 2 6 10 14 } | ||
129 | * deep = 1 | ||
130 | * set 1 -> { 1 5 9 13 } | ||
131 | * set 2 -> { 3 7 11 15 } | ||
132 | * | ||
133 | * interval = 2 | ||
134 | * deep = 0 | ||
135 | * set 1 -> { 1 3 5 7 9 11 13 15 } | ||
136 | */ | ||
137 | static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval, | ||
138 | int num_safe_mas, int num_unsafe_mas) | ||
139 | { | ||
140 | struct uwb_rsv_col_info *ci = ai->ci; | ||
141 | struct uwb_rsv_col_set_info *csi = &ci->csi; | ||
142 | struct uwb_rsv_col_set_info tmp_csi; | ||
143 | int deep, set, col, start_col_deep, col_start_set; | ||
144 | int start_col, max_mas_in_set, lowest_max_mas_in_deep; | ||
145 | int n_mas; | ||
146 | int found = UWB_RSV_ALLOC_NOT_FOUND; | ||
147 | |||
148 | tmp_csi.start_col = 0; | ||
149 | start_col_deep = interval; | ||
150 | n_mas = num_unsafe_mas + num_safe_mas; | ||
151 | |||
152 | for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) { | ||
153 | start_col_deep /= 2; | ||
154 | col_start_set = 0; | ||
155 | lowest_max_mas_in_deep = UWB_MAS_PER_ZONE; | ||
156 | |||
157 | for (set = 1; set <= (1 << deep); set++) { | ||
158 | max_mas_in_set = 0; | ||
159 | start_col = start_col_deep + col_start_set; | ||
160 | for (col = start_col; col < UWB_NUM_ZONES; col += interval) { | ||
161 | |||
162 | if (ci[col].max_avail_safe >= num_safe_mas && | ||
163 | ci[col].max_avail_unsafe >= n_mas) { | ||
164 | if (ci[col].highest_mas[n_mas] > max_mas_in_set) | ||
165 | max_mas_in_set = ci[col].highest_mas[n_mas]; | ||
166 | } else { | ||
167 | max_mas_in_set = 0; | ||
168 | break; | ||
169 | } | ||
170 | } | ||
171 | if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) { | ||
172 | lowest_max_mas_in_deep = max_mas_in_set; | ||
173 | |||
174 | tmp_csi.start_col = start_col; | ||
175 | } | ||
176 | col_start_set += (interval >> deep); | ||
177 | } | ||
178 | |||
179 | if (lowest_max_mas_in_deep < 8) { | ||
180 | csi->start_col = tmp_csi.start_col; | ||
181 | found = UWB_RSV_ALLOC_FOUND; | ||
182 | break; | ||
183 | } else if ((lowest_max_mas_in_deep > 8) && | ||
184 | (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) && | ||
185 | (found == UWB_RSV_ALLOC_NOT_FOUND)) { | ||
186 | csi->start_col = tmp_csi.start_col; | ||
187 | found = UWB_RSV_ALLOC_FOUND; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | if (found == UWB_RSV_ALLOC_FOUND) { | ||
192 | csi->interval = interval; | ||
193 | csi->safe_mas_per_col = num_safe_mas; | ||
194 | csi->unsafe_mas_per_col = num_unsafe_mas; | ||
195 | |||
196 | ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas; | ||
197 | ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas; | ||
198 | ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; | ||
199 | ai->interval = interval; | ||
200 | } | ||
201 | return found; | ||
202 | } | ||
203 | |||
204 | static void get_row_descriptors(struct uwb_rsv_alloc_info *ai) | ||
205 | { | ||
206 | unsigned char *bm = ai->bm; | ||
207 | struct uwb_rsv_row_info *ri = &ai->ri; | ||
208 | int col, mas; | ||
209 | |||
210 | ri->free_rows = 16; | ||
211 | for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { | ||
212 | ri->avail[mas] = 1; | ||
213 | for (col = 1; col < UWB_NUM_ZONES; col++) { | ||
214 | if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) { | ||
215 | ri->free_rows--; | ||
216 | ri->avail[mas]=0; | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | } | ||
221 | } | ||
222 | |||
223 | static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci) | ||
224 | { | ||
225 | int mas; | ||
226 | int block_count = 0, start_block = 0; | ||
227 | int previous_avail = 0; | ||
228 | int available = 0; | ||
229 | int safe_mas_in_row[UWB_MAS_PER_ZONE] = { | ||
230 | 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, | ||
231 | }; | ||
232 | |||
233 | rci->max_avail_safe = 0; | ||
234 | |||
235 | for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { | ||
236 | if (!bm[column * UWB_NUM_ZONES + mas]) { | ||
237 | available++; | ||
238 | rci->max_avail_unsafe = available; | ||
239 | |||
240 | rci->highest_mas[available] = mas; | ||
241 | |||
242 | if (previous_avail) { | ||
243 | block_count++; | ||
244 | if ((block_count > safe_mas_in_row[start_block]) && | ||
245 | (!rci->max_avail_safe)) | ||
246 | rci->max_avail_safe = available - 1; | ||
247 | } else { | ||
248 | previous_avail = 1; | ||
249 | start_block = mas; | ||
250 | block_count = 1; | ||
251 | } | ||
252 | } else { | ||
253 | previous_avail = 0; | ||
254 | } | ||
255 | } | ||
256 | if (!rci->max_avail_safe) | ||
257 | rci->max_avail_safe = rci->max_avail_unsafe; | ||
258 | } | ||
259 | |||
260 | static void get_column_descriptors(struct uwb_rsv_alloc_info *ai) | ||
261 | { | ||
262 | unsigned char *bm = ai->bm; | ||
263 | struct uwb_rsv_col_info *ci = ai->ci; | ||
264 | int col; | ||
265 | |||
266 | for (col = 1; col < UWB_NUM_ZONES; col++) { | ||
267 | uwb_rsv_fill_column_info(bm, col, &ci[col]); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai) | ||
272 | { | ||
273 | int n_rows; | ||
274 | int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW; | ||
275 | int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW; | ||
276 | if (ai->min_mas % UWB_USABLE_MAS_PER_ROW) | ||
277 | min_rows++; | ||
278 | for (n_rows = max_rows; n_rows >= min_rows; n_rows--) { | ||
279 | if (n_rows <= ai->ri.free_rows) { | ||
280 | ai->ri.used_rows = n_rows; | ||
281 | ai->interval = 1; /* row reservation */ | ||
282 | uwb_rsv_fill_row_alloc(ai); | ||
283 | return UWB_RSV_ALLOC_FOUND; | ||
284 | } | ||
285 | } | ||
286 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
287 | } | ||
288 | |||
289 | static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval) | ||
290 | { | ||
291 | int n_safe, n_unsafe, n_mas; | ||
292 | int n_column = UWB_NUM_ZONES / interval; | ||
293 | int max_per_zone = ai->max_mas / n_column; | ||
294 | int min_per_zone = ai->min_mas / n_column; | ||
295 | |||
296 | if (ai->min_mas % n_column) | ||
297 | min_per_zone++; | ||
298 | |||
299 | if (min_per_zone > UWB_MAS_PER_ZONE) { | ||
300 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
301 | } | ||
302 | |||
303 | if (max_per_zone > UWB_MAS_PER_ZONE) { | ||
304 | max_per_zone = UWB_MAS_PER_ZONE; | ||
305 | } | ||
306 | |||
307 | for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) { | ||
308 | if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND) | ||
309 | continue; | ||
310 | for (n_safe = n_mas; n_safe >= 0; n_safe--) { | ||
311 | n_unsafe = n_mas - n_safe; | ||
312 | if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) { | ||
313 | uwb_rsv_fill_column_alloc(ai); | ||
314 | return UWB_RSV_ALLOC_FOUND; | ||
315 | } | ||
316 | } | ||
317 | } | ||
318 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
319 | } | ||
320 | |||
321 | int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, | ||
322 | struct uwb_mas_bm *result) | ||
323 | { | ||
324 | struct uwb_rsv_alloc_info *ai; | ||
325 | int interval; | ||
326 | int bit_index; | ||
327 | |||
328 | ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); | ||
329 | |||
330 | ai->min_mas = rsv->min_mas; | ||
331 | ai->max_mas = rsv->max_mas; | ||
332 | ai->max_interval = rsv->max_interval; | ||
333 | |||
334 | |||
335 | /* fill the not available vector from the available bm */ | ||
336 | for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { | ||
337 | if (!test_bit(bit_index, available->bm)) | ||
338 | ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL; | ||
339 | } | ||
340 | |||
341 | if (ai->max_interval == 1) { | ||
342 | get_row_descriptors(ai); | ||
343 | if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) | ||
344 | goto alloc_found; | ||
345 | else | ||
346 | goto alloc_not_found; | ||
347 | } | ||
348 | |||
349 | get_column_descriptors(ai); | ||
350 | |||
351 | for (interval = 16; interval >= 2; interval>>=1) { | ||
352 | if (interval > ai->max_interval) | ||
353 | continue; | ||
354 | if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND) | ||
355 | goto alloc_found; | ||
356 | } | ||
357 | |||
358 | /* try row reservation if no column is found */ | ||
359 | get_row_descriptors(ai); | ||
360 | if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) | ||
361 | goto alloc_found; | ||
362 | else | ||
363 | goto alloc_not_found; | ||
364 | |||
365 | alloc_found: | ||
366 | bitmap_zero(result->bm, UWB_NUM_MAS); | ||
367 | bitmap_zero(result->unsafe_bm, UWB_NUM_MAS); | ||
368 | /* fill the safe and unsafe bitmaps */ | ||
369 | for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { | ||
370 | if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE) | ||
371 | set_bit(bit_index, result->bm); | ||
372 | else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE) | ||
373 | set_bit(bit_index, result->unsafe_bm); | ||
374 | } | ||
375 | bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS); | ||
376 | |||
377 | result->safe = ai->safe_allocated_mases; | ||
378 | result->unsafe = ai->unsafe_allocated_mases; | ||
379 | |||
380 | kfree(ai); | ||
381 | return UWB_RSV_ALLOC_FOUND; | ||
382 | |||
383 | alloc_not_found: | ||
384 | kfree(ai); | ||
385 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
386 | } | ||
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c index 46b18eec5026..36bc3158006f 100644 --- a/drivers/uwb/beacon.c +++ b/drivers/uwb/beacon.c | |||
@@ -22,19 +22,16 @@ | |||
22 | * | 22 | * |
23 | * FIXME: docs | 23 | * FIXME: docs |
24 | */ | 24 | */ |
25 | |||
26 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
27 | #include <linux/init.h> | 26 | #include <linux/init.h> |
28 | #include <linux/module.h> | 27 | #include <linux/module.h> |
29 | #include <linux/device.h> | 28 | #include <linux/device.h> |
30 | #include <linux/err.h> | 29 | #include <linux/err.h> |
31 | #include <linux/kdev_t.h> | 30 | #include <linux/kdev_t.h> |
32 | #include "uwb-internal.h" | ||
33 | 31 | ||
34 | #define D_LOCAL 0 | 32 | #include "uwb-internal.h" |
35 | #include <linux/uwb/debug.h> | ||
36 | 33 | ||
37 | /** Start Beaconing command structure */ | 34 | /* Start Beaconing command structure */ |
38 | struct uwb_rc_cmd_start_beacon { | 35 | struct uwb_rc_cmd_start_beacon { |
39 | struct uwb_rccb rccb; | 36 | struct uwb_rccb rccb; |
40 | __le16 wBPSTOffset; | 37 | __le16 wBPSTOffset; |
@@ -119,7 +116,6 @@ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) | |||
119 | int result; | 116 | int result; |
120 | struct device *dev = &rc->uwb_dev.dev; | 117 | struct device *dev = &rc->uwb_dev.dev; |
121 | 118 | ||
122 | mutex_lock(&rc->uwb_dev.mutex); | ||
123 | if (channel < 0) | 119 | if (channel < 0) |
124 | channel = -1; | 120 | channel = -1; |
125 | if (channel == -1) | 121 | if (channel == -1) |
@@ -128,7 +124,7 @@ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) | |||
128 | /* channel >= 0...dah */ | 124 | /* channel >= 0...dah */ |
129 | result = uwb_rc_start_beacon(rc, bpst_offset, channel); | 125 | result = uwb_rc_start_beacon(rc, bpst_offset, channel); |
130 | if (result < 0) | 126 | if (result < 0) |
131 | goto out_up; | 127 | return result; |
132 | if (le16_to_cpu(rc->ies->wIELength) > 0) { | 128 | if (le16_to_cpu(rc->ies->wIELength) > 0) { |
133 | result = uwb_rc_set_ie(rc, rc->ies); | 129 | result = uwb_rc_set_ie(rc, rc->ies); |
134 | if (result < 0) { | 130 | if (result < 0) { |
@@ -137,19 +133,12 @@ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) | |||
137 | result = uwb_rc_stop_beacon(rc); | 133 | result = uwb_rc_stop_beacon(rc); |
138 | channel = -1; | 134 | channel = -1; |
139 | bpst_offset = 0; | 135 | bpst_offset = 0; |
140 | } else | 136 | } |
141 | result = 0; | ||
142 | } | 137 | } |
143 | } | 138 | } |
144 | 139 | ||
145 | if (result < 0) | 140 | if (result >= 0) |
146 | goto out_up; | 141 | rc->beaconing = channel; |
147 | rc->beaconing = channel; | ||
148 | |||
149 | uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE); | ||
150 | |||
151 | out_up: | ||
152 | mutex_unlock(&rc->uwb_dev.mutex); | ||
153 | return result; | 142 | return result; |
154 | } | 143 | } |
155 | 144 | ||
@@ -168,12 +157,6 @@ out_up: | |||
168 | * FIXME: use something faster for search than a list | 157 | * FIXME: use something faster for search than a list |
169 | */ | 158 | */ |
170 | 159 | ||
171 | struct uwb_beca uwb_beca = { | ||
172 | .list = LIST_HEAD_INIT(uwb_beca.list), | ||
173 | .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex) | ||
174 | }; | ||
175 | |||
176 | |||
177 | void uwb_bce_kfree(struct kref *_bce) | 160 | void uwb_bce_kfree(struct kref *_bce) |
178 | { | 161 | { |
179 | struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); | 162 | struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); |
@@ -185,13 +168,11 @@ void uwb_bce_kfree(struct kref *_bce) | |||
185 | 168 | ||
186 | /* Find a beacon by dev addr in the cache */ | 169 | /* Find a beacon by dev addr in the cache */ |
187 | static | 170 | static |
188 | struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr) | 171 | struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc, |
172 | const struct uwb_dev_addr *dev_addr) | ||
189 | { | 173 | { |
190 | struct uwb_beca_e *bce, *next; | 174 | struct uwb_beca_e *bce, *next; |
191 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 175 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { |
192 | d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n", | ||
193 | dev_addr->data[0], dev_addr->data[1], | ||
194 | bce->dev_addr.data[0], bce->dev_addr.data[1]); | ||
195 | if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) | 176 | if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) |
196 | goto out; | 177 | goto out; |
197 | } | 178 | } |
@@ -202,10 +183,11 @@ out: | |||
202 | 183 | ||
203 | /* Find a beacon by dev addr in the cache */ | 184 | /* Find a beacon by dev addr in the cache */ |
204 | static | 185 | static |
205 | struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr) | 186 | struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc, |
187 | const struct uwb_mac_addr *mac_addr) | ||
206 | { | 188 | { |
207 | struct uwb_beca_e *bce, *next; | 189 | struct uwb_beca_e *bce, *next; |
208 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 190 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { |
209 | if (!memcmp(bce->mac_addr, mac_addr->data, | 191 | if (!memcmp(bce->mac_addr, mac_addr->data, |
210 | sizeof(struct uwb_mac_addr))) | 192 | sizeof(struct uwb_mac_addr))) |
211 | goto out; | 193 | goto out; |
@@ -229,11 +211,11 @@ struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, | |||
229 | struct uwb_dev *found = NULL; | 211 | struct uwb_dev *found = NULL; |
230 | struct uwb_beca_e *bce; | 212 | struct uwb_beca_e *bce; |
231 | 213 | ||
232 | mutex_lock(&uwb_beca.mutex); | 214 | mutex_lock(&rc->uwb_beca.mutex); |
233 | bce = __uwb_beca_find_bydev(devaddr); | 215 | bce = __uwb_beca_find_bydev(rc, devaddr); |
234 | if (bce) | 216 | if (bce) |
235 | found = uwb_dev_try_get(rc, bce->uwb_dev); | 217 | found = uwb_dev_try_get(rc, bce->uwb_dev); |
236 | mutex_unlock(&uwb_beca.mutex); | 218 | mutex_unlock(&rc->uwb_beca.mutex); |
237 | 219 | ||
238 | return found; | 220 | return found; |
239 | } | 221 | } |
@@ -249,11 +231,11 @@ struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, | |||
249 | struct uwb_dev *found = NULL; | 231 | struct uwb_dev *found = NULL; |
250 | struct uwb_beca_e *bce; | 232 | struct uwb_beca_e *bce; |
251 | 233 | ||
252 | mutex_lock(&uwb_beca.mutex); | 234 | mutex_lock(&rc->uwb_beca.mutex); |
253 | bce = __uwb_beca_find_bymac(macaddr); | 235 | bce = __uwb_beca_find_bymac(rc, macaddr); |
254 | if (bce) | 236 | if (bce) |
255 | found = uwb_dev_try_get(rc, bce->uwb_dev); | 237 | found = uwb_dev_try_get(rc, bce->uwb_dev); |
256 | mutex_unlock(&uwb_beca.mutex); | 238 | mutex_unlock(&rc->uwb_beca.mutex); |
257 | 239 | ||
258 | return found; | 240 | return found; |
259 | } | 241 | } |
@@ -274,7 +256,9 @@ static void uwb_beca_e_init(struct uwb_beca_e *bce) | |||
274 | * @bf: Beacon frame (part of b, really) | 256 | * @bf: Beacon frame (part of b, really) |
275 | * @ts_jiffies: Timestamp (in jiffies) when the beacon was received | 257 | * @ts_jiffies: Timestamp (in jiffies) when the beacon was received |
276 | */ | 258 | */ |
277 | struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, | 259 | static |
260 | struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc, | ||
261 | struct uwb_rc_evt_beacon *be, | ||
278 | struct uwb_beacon_frame *bf, | 262 | struct uwb_beacon_frame *bf, |
279 | unsigned long ts_jiffies) | 263 | unsigned long ts_jiffies) |
280 | { | 264 | { |
@@ -286,7 +270,7 @@ struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, | |||
286 | uwb_beca_e_init(bce); | 270 | uwb_beca_e_init(bce); |
287 | bce->ts_jiffies = ts_jiffies; | 271 | bce->ts_jiffies = ts_jiffies; |
288 | bce->uwb_dev = NULL; | 272 | bce->uwb_dev = NULL; |
289 | list_add(&bce->node, &uwb_beca.list); | 273 | list_add(&bce->node, &rc->uwb_beca.list); |
290 | return bce; | 274 | return bce; |
291 | } | 275 | } |
292 | 276 | ||
@@ -295,33 +279,32 @@ struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, | |||
295 | * | 279 | * |
296 | * Remove associated devicest too. | 280 | * Remove associated devicest too. |
297 | */ | 281 | */ |
298 | void uwb_beca_purge(void) | 282 | void uwb_beca_purge(struct uwb_rc *rc) |
299 | { | 283 | { |
300 | struct uwb_beca_e *bce, *next; | 284 | struct uwb_beca_e *bce, *next; |
301 | unsigned long expires; | 285 | unsigned long expires; |
302 | 286 | ||
303 | mutex_lock(&uwb_beca.mutex); | 287 | mutex_lock(&rc->uwb_beca.mutex); |
304 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 288 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { |
305 | expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); | 289 | expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); |
306 | if (time_after(jiffies, expires)) { | 290 | if (time_after(jiffies, expires)) { |
307 | uwbd_dev_offair(bce); | 291 | uwbd_dev_offair(bce); |
308 | list_del(&bce->node); | ||
309 | uwb_bce_put(bce); | ||
310 | } | 292 | } |
311 | } | 293 | } |
312 | mutex_unlock(&uwb_beca.mutex); | 294 | mutex_unlock(&rc->uwb_beca.mutex); |
313 | } | 295 | } |
314 | 296 | ||
315 | /* Clean up the whole beacon cache. Called on shutdown */ | 297 | /* Clean up the whole beacon cache. Called on shutdown */ |
316 | void uwb_beca_release(void) | 298 | void uwb_beca_release(struct uwb_rc *rc) |
317 | { | 299 | { |
318 | struct uwb_beca_e *bce, *next; | 300 | struct uwb_beca_e *bce, *next; |
319 | mutex_lock(&uwb_beca.mutex); | 301 | |
320 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 302 | mutex_lock(&rc->uwb_beca.mutex); |
303 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { | ||
321 | list_del(&bce->node); | 304 | list_del(&bce->node); |
322 | uwb_bce_put(bce); | 305 | uwb_bce_put(bce); |
323 | } | 306 | } |
324 | mutex_unlock(&uwb_beca.mutex); | 307 | mutex_unlock(&rc->uwb_beca.mutex); |
325 | } | 308 | } |
326 | 309 | ||
327 | static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, | 310 | static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, |
@@ -349,22 +332,22 @@ ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce, | |||
349 | ssize_t result = 0; | 332 | ssize_t result = 0; |
350 | struct uwb_rc_evt_beacon *be; | 333 | struct uwb_rc_evt_beacon *be; |
351 | struct uwb_beacon_frame *bf; | 334 | struct uwb_beacon_frame *bf; |
352 | struct uwb_buf_ctx ctx = { | 335 | int ies_len; |
353 | .buf = buf, | 336 | struct uwb_ie_hdr *ies; |
354 | .bytes = 0, | ||
355 | .size = size | ||
356 | }; | ||
357 | 337 | ||
358 | mutex_lock(&bce->mutex); | 338 | mutex_lock(&bce->mutex); |
339 | |||
359 | be = bce->be; | 340 | be = bce->be; |
360 | if (be == NULL) | 341 | if (be) { |
361 | goto out; | 342 | bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; |
362 | bf = (void *) be->BeaconInfo; | 343 | ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame); |
363 | uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx, | 344 | ies = (struct uwb_ie_hdr *)bf->IEData; |
364 | bf->IEData, be->wBeaconInfoLength - sizeof(*bf)); | 345 | |
365 | result = ctx.bytes; | 346 | result = uwb_ie_dump_hex(ies, ies_len, buf, size); |
366 | out: | 347 | } |
348 | |||
367 | mutex_unlock(&bce->mutex); | 349 | mutex_unlock(&bce->mutex); |
350 | |||
368 | return result; | 351 | return result; |
369 | } | 352 | } |
370 | 353 | ||
@@ -437,18 +420,18 @@ int uwbd_evt_handle_rc_beacon(struct uwb_event *evt) | |||
437 | if (uwb_mac_addr_bcast(&bf->Device_Identifier)) | 420 | if (uwb_mac_addr_bcast(&bf->Device_Identifier)) |
438 | return 0; | 421 | return 0; |
439 | 422 | ||
440 | mutex_lock(&uwb_beca.mutex); | 423 | mutex_lock(&rc->uwb_beca.mutex); |
441 | bce = __uwb_beca_find_bymac(&bf->Device_Identifier); | 424 | bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier); |
442 | if (bce == NULL) { | 425 | if (bce == NULL) { |
443 | /* Not in there, a new device is pinging */ | 426 | /* Not in there, a new device is pinging */ |
444 | uwb_beacon_print(evt->rc, be, bf); | 427 | uwb_beacon_print(evt->rc, be, bf); |
445 | bce = __uwb_beca_add(be, bf, evt->ts_jiffies); | 428 | bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies); |
446 | if (bce == NULL) { | 429 | if (bce == NULL) { |
447 | mutex_unlock(&uwb_beca.mutex); | 430 | mutex_unlock(&rc->uwb_beca.mutex); |
448 | return -ENOMEM; | 431 | return -ENOMEM; |
449 | } | 432 | } |
450 | } | 433 | } |
451 | mutex_unlock(&uwb_beca.mutex); | 434 | mutex_unlock(&rc->uwb_beca.mutex); |
452 | 435 | ||
453 | mutex_lock(&bce->mutex); | 436 | mutex_lock(&bce->mutex); |
454 | /* purge old beacon data */ | 437 | /* purge old beacon data */ |
@@ -588,19 +571,6 @@ error: | |||
588 | return result; | 571 | return result; |
589 | } | 572 | } |
590 | 573 | ||
591 | /** | ||
592 | * uwb_bg_joined - is the RC in a beacon group? | ||
593 | * @rc: the radio controller | ||
594 | * | ||
595 | * Returns true if the radio controller is in a beacon group (even if | ||
596 | * it's the sole member). | ||
597 | */ | ||
598 | int uwb_bg_joined(struct uwb_rc *rc) | ||
599 | { | ||
600 | return rc->beaconing != -1; | ||
601 | } | ||
602 | EXPORT_SYMBOL_GPL(uwb_bg_joined); | ||
603 | |||
604 | /* | 574 | /* |
605 | * Print beaconing state. | 575 | * Print beaconing state. |
606 | */ | 576 | */ |
@@ -619,9 +589,6 @@ static ssize_t uwb_rc_beacon_show(struct device *dev, | |||
619 | 589 | ||
620 | /* | 590 | /* |
621 | * Start beaconing on the specified channel, or stop beaconing. | 591 | * Start beaconing on the specified channel, or stop beaconing. |
622 | * | ||
623 | * The BPST offset of when to start searching for a beacon group to | ||
624 | * join may be specified. | ||
625 | */ | 592 | */ |
626 | static ssize_t uwb_rc_beacon_store(struct device *dev, | 593 | static ssize_t uwb_rc_beacon_store(struct device *dev, |
627 | struct device_attribute *attr, | 594 | struct device_attribute *attr, |
@@ -630,12 +597,11 @@ static ssize_t uwb_rc_beacon_store(struct device *dev, | |||
630 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | 597 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); |
631 | struct uwb_rc *rc = uwb_dev->rc; | 598 | struct uwb_rc *rc = uwb_dev->rc; |
632 | int channel; | 599 | int channel; |
633 | unsigned bpst_offset = 0; | ||
634 | ssize_t result = -EINVAL; | 600 | ssize_t result = -EINVAL; |
635 | 601 | ||
636 | result = sscanf(buf, "%d %u\n", &channel, &bpst_offset); | 602 | result = sscanf(buf, "%d", &channel); |
637 | if (result >= 1) | 603 | if (result >= 1) |
638 | result = uwb_rc_beacon(rc, channel, bpst_offset); | 604 | result = uwb_radio_force_channel(rc, channel); |
639 | 605 | ||
640 | return result < 0 ? result : size; | 606 | return result < 0 ? result : size; |
641 | } | 607 | } |
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c index 521cdeb84971..da77e41de990 100644 --- a/drivers/uwb/driver.c +++ b/drivers/uwb/driver.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #include <linux/err.h> | 53 | #include <linux/err.h> |
54 | #include <linux/kdev_t.h> | 54 | #include <linux/kdev_t.h> |
55 | #include <linux/random.h> | 55 | #include <linux/random.h> |
56 | #include <linux/uwb/debug.h> | 56 | |
57 | #include "uwb-internal.h" | 57 | #include "uwb-internal.h" |
58 | 58 | ||
59 | 59 | ||
@@ -118,7 +118,6 @@ static int __init uwb_subsys_init(void) | |||
118 | result = class_register(&uwb_rc_class); | 118 | result = class_register(&uwb_rc_class); |
119 | if (result < 0) | 119 | if (result < 0) |
120 | goto error_uwb_rc_class_register; | 120 | goto error_uwb_rc_class_register; |
121 | uwbd_start(); | ||
122 | uwb_dbg_init(); | 121 | uwb_dbg_init(); |
123 | return 0; | 122 | return 0; |
124 | 123 | ||
@@ -132,7 +131,6 @@ module_init(uwb_subsys_init); | |||
132 | static void __exit uwb_subsys_exit(void) | 131 | static void __exit uwb_subsys_exit(void) |
133 | { | 132 | { |
134 | uwb_dbg_exit(); | 133 | uwb_dbg_exit(); |
135 | uwbd_stop(); | ||
136 | class_unregister(&uwb_rc_class); | 134 | class_unregister(&uwb_rc_class); |
137 | uwb_est_destroy(); | 135 | uwb_est_destroy(); |
138 | return; | 136 | return; |
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c index 3febd8552808..40a540a5a72e 100644 --- a/drivers/uwb/drp-avail.c +++ b/drivers/uwb/drp-avail.c | |||
@@ -58,7 +58,7 @@ void uwb_drp_avail_init(struct uwb_rc *rc) | |||
58 | * | 58 | * |
59 | * avail = global & local & pending | 59 | * avail = global & local & pending |
60 | */ | 60 | */ |
61 | static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) | 61 | void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) |
62 | { | 62 | { |
63 | bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); | 63 | bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); |
64 | bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); | 64 | bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); |
@@ -105,6 +105,7 @@ void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas) | |||
105 | bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); | 105 | bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); |
106 | bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); | 106 | bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); |
107 | rc->drp_avail.ie_valid = false; | 107 | rc->drp_avail.ie_valid = false; |
108 | uwb_rsv_handle_drp_avail_change(rc); | ||
108 | } | 109 | } |
109 | 110 | ||
110 | /** | 111 | /** |
@@ -280,6 +281,7 @@ int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt) | |||
280 | mutex_lock(&rc->rsvs_mutex); | 281 | mutex_lock(&rc->rsvs_mutex); |
281 | bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); | 282 | bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); |
282 | rc->drp_avail.ie_valid = false; | 283 | rc->drp_avail.ie_valid = false; |
284 | uwb_rsv_handle_drp_avail_change(rc); | ||
283 | mutex_unlock(&rc->rsvs_mutex); | 285 | mutex_unlock(&rc->rsvs_mutex); |
284 | 286 | ||
285 | uwb_rsv_sched_update(rc); | 287 | uwb_rsv_sched_update(rc); |
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c index 882724c5f126..2840d7bf9e67 100644 --- a/drivers/uwb/drp-ie.c +++ b/drivers/uwb/drp-ie.c | |||
@@ -16,13 +16,102 @@ | |||
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #include <linux/version.h> | ||
20 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
21 | #include <linux/random.h> | 20 | #include <linux/random.h> |
22 | #include <linux/uwb.h> | 21 | #include <linux/uwb.h> |
23 | 22 | ||
24 | #include "uwb-internal.h" | 23 | #include "uwb-internal.h" |
25 | 24 | ||
25 | |||
26 | /* | ||
27 | * Return the reason code for a reservations's DRP IE. | ||
28 | */ | ||
29 | int uwb_rsv_reason_code(struct uwb_rsv *rsv) | ||
30 | { | ||
31 | static const int reason_codes[] = { | ||
32 | [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, | ||
33 | [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, | ||
34 | [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, | ||
35 | [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, | ||
36 | [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, | ||
37 | [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, | ||
38 | [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, | ||
39 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, | ||
40 | [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, | ||
41 | [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, | ||
42 | [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, | ||
43 | [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, | ||
44 | [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, | ||
45 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, | ||
46 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, | ||
47 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, | ||
48 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, | ||
49 | }; | ||
50 | |||
51 | return reason_codes[rsv->state]; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Return the reason code for a reservations's companion DRP IE . | ||
56 | */ | ||
57 | int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) | ||
58 | { | ||
59 | static const int companion_reason_codes[] = { | ||
60 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, | ||
61 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, | ||
62 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, | ||
63 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, | ||
64 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, | ||
65 | }; | ||
66 | |||
67 | return companion_reason_codes[rsv->state]; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Return the status bit for a reservations's DRP IE. | ||
72 | */ | ||
73 | int uwb_rsv_status(struct uwb_rsv *rsv) | ||
74 | { | ||
75 | static const int statuses[] = { | ||
76 | [UWB_RSV_STATE_O_INITIATED] = 0, | ||
77 | [UWB_RSV_STATE_O_PENDING] = 0, | ||
78 | [UWB_RSV_STATE_O_MODIFIED] = 1, | ||
79 | [UWB_RSV_STATE_O_ESTABLISHED] = 1, | ||
80 | [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, | ||
81 | [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, | ||
82 | [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, | ||
83 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, | ||
84 | [UWB_RSV_STATE_T_ACCEPTED] = 1, | ||
85 | [UWB_RSV_STATE_T_CONFLICT] = 0, | ||
86 | [UWB_RSV_STATE_T_PENDING] = 0, | ||
87 | [UWB_RSV_STATE_T_DENIED] = 0, | ||
88 | [UWB_RSV_STATE_T_RESIZED] = 1, | ||
89 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, | ||
90 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, | ||
91 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, | ||
92 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, | ||
93 | |||
94 | }; | ||
95 | |||
96 | return statuses[rsv->state]; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Return the status bit for a reservations's companion DRP IE . | ||
101 | */ | ||
102 | int uwb_rsv_companion_status(struct uwb_rsv *rsv) | ||
103 | { | ||
104 | static const int companion_statuses[] = { | ||
105 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, | ||
106 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, | ||
107 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, | ||
108 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, | ||
109 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, | ||
110 | }; | ||
111 | |||
112 | return companion_statuses[rsv->state]; | ||
113 | } | ||
114 | |||
26 | /* | 115 | /* |
27 | * Allocate a DRP IE. | 116 | * Allocate a DRP IE. |
28 | * | 117 | * |
@@ -34,16 +123,12 @@ | |||
34 | static struct uwb_ie_drp *uwb_drp_ie_alloc(void) | 123 | static struct uwb_ie_drp *uwb_drp_ie_alloc(void) |
35 | { | 124 | { |
36 | struct uwb_ie_drp *drp_ie; | 125 | struct uwb_ie_drp *drp_ie; |
37 | unsigned tiebreaker; | ||
38 | 126 | ||
39 | drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + | 127 | drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + |
40 | UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), | 128 | UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), |
41 | GFP_KERNEL); | 129 | GFP_KERNEL); |
42 | if (drp_ie) { | 130 | if (drp_ie) { |
43 | drp_ie->hdr.element_id = UWB_IE_DRP; | 131 | drp_ie->hdr.element_id = UWB_IE_DRP; |
44 | |||
45 | get_random_bytes(&tiebreaker, sizeof(unsigned)); | ||
46 | uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1); | ||
47 | } | 132 | } |
48 | return drp_ie; | 133 | return drp_ie; |
49 | } | 134 | } |
@@ -104,43 +189,17 @@ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, | |||
104 | */ | 189 | */ |
105 | int uwb_drp_ie_update(struct uwb_rsv *rsv) | 190 | int uwb_drp_ie_update(struct uwb_rsv *rsv) |
106 | { | 191 | { |
107 | struct device *dev = &rsv->rc->uwb_dev.dev; | ||
108 | struct uwb_ie_drp *drp_ie; | 192 | struct uwb_ie_drp *drp_ie; |
109 | int reason_code, status; | 193 | struct uwb_rsv_move *mv; |
194 | int unsafe; | ||
110 | 195 | ||
111 | switch (rsv->state) { | 196 | if (rsv->state == UWB_RSV_STATE_NONE) { |
112 | case UWB_RSV_STATE_NONE: | ||
113 | kfree(rsv->drp_ie); | 197 | kfree(rsv->drp_ie); |
114 | rsv->drp_ie = NULL; | 198 | rsv->drp_ie = NULL; |
115 | return 0; | 199 | return 0; |
116 | case UWB_RSV_STATE_O_INITIATED: | ||
117 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
118 | status = 0; | ||
119 | break; | ||
120 | case UWB_RSV_STATE_O_PENDING: | ||
121 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
122 | status = 0; | ||
123 | break; | ||
124 | case UWB_RSV_STATE_O_MODIFIED: | ||
125 | reason_code = UWB_DRP_REASON_MODIFIED; | ||
126 | status = 1; | ||
127 | break; | ||
128 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
129 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
130 | status = 1; | ||
131 | break; | ||
132 | case UWB_RSV_STATE_T_ACCEPTED: | ||
133 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
134 | status = 1; | ||
135 | break; | ||
136 | case UWB_RSV_STATE_T_DENIED: | ||
137 | reason_code = UWB_DRP_REASON_DENIED; | ||
138 | status = 0; | ||
139 | break; | ||
140 | default: | ||
141 | dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state); | ||
142 | return -EINVAL; | ||
143 | } | 200 | } |
201 | |||
202 | unsafe = rsv->mas.unsafe ? 1 : 0; | ||
144 | 203 | ||
145 | if (rsv->drp_ie == NULL) { | 204 | if (rsv->drp_ie == NULL) { |
146 | rsv->drp_ie = uwb_drp_ie_alloc(); | 205 | rsv->drp_ie = uwb_drp_ie_alloc(); |
@@ -149,9 +208,11 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv) | |||
149 | } | 208 | } |
150 | drp_ie = rsv->drp_ie; | 209 | drp_ie = rsv->drp_ie; |
151 | 210 | ||
211 | uwb_ie_drp_set_unsafe(drp_ie, unsafe); | ||
212 | uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); | ||
152 | uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); | 213 | uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); |
153 | uwb_ie_drp_set_status(drp_ie, status); | 214 | uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); |
154 | uwb_ie_drp_set_reason_code(drp_ie, reason_code); | 215 | uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); |
155 | uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); | 216 | uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); |
156 | uwb_ie_drp_set_type(drp_ie, rsv->type); | 217 | uwb_ie_drp_set_type(drp_ie, rsv->type); |
157 | 218 | ||
@@ -169,6 +230,27 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv) | |||
169 | 230 | ||
170 | uwb_drp_ie_from_bm(drp_ie, &rsv->mas); | 231 | uwb_drp_ie_from_bm(drp_ie, &rsv->mas); |
171 | 232 | ||
233 | if (uwb_rsv_has_two_drp_ies(rsv)) { | ||
234 | mv = &rsv->mv; | ||
235 | if (mv->companion_drp_ie == NULL) { | ||
236 | mv->companion_drp_ie = uwb_drp_ie_alloc(); | ||
237 | if (mv->companion_drp_ie == NULL) | ||
238 | return -ENOMEM; | ||
239 | } | ||
240 | drp_ie = mv->companion_drp_ie; | ||
241 | |||
242 | /* keep all the same configuration of the main drp_ie */ | ||
243 | memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); | ||
244 | |||
245 | |||
246 | /* FIXME: handle properly the unsafe bit */ | ||
247 | uwb_ie_drp_set_unsafe(drp_ie, 1); | ||
248 | uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); | ||
249 | uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); | ||
250 | |||
251 | uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); | ||
252 | } | ||
253 | |||
172 | rsv->ie_valid = true; | 254 | rsv->ie_valid = true; |
173 | return 0; | 255 | return 0; |
174 | } | 256 | } |
@@ -219,6 +301,8 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) | |||
219 | u8 zone; | 301 | u8 zone; |
220 | u16 zone_mask; | 302 | u16 zone_mask; |
221 | 303 | ||
304 | bitmap_zero(bm->bm, UWB_NUM_MAS); | ||
305 | |||
222 | for (cnt = 0; cnt < numallocs; cnt++) { | 306 | for (cnt = 0; cnt < numallocs; cnt++) { |
223 | alloc = &drp_ie->allocs[cnt]; | 307 | alloc = &drp_ie->allocs[cnt]; |
224 | zone_bm = le16_to_cpu(alloc->zone_bm); | 308 | zone_bm = le16_to_cpu(alloc->zone_bm); |
@@ -230,3 +314,4 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) | |||
230 | } | 314 | } |
231 | } | 315 | } |
232 | } | 316 | } |
317 | |||
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c index c0b1e5e2bd08..2b4f9406789d 100644 --- a/drivers/uwb/drp.c +++ b/drivers/uwb/drp.c | |||
@@ -23,6 +23,59 @@ | |||
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include "uwb-internal.h" | 24 | #include "uwb-internal.h" |
25 | 25 | ||
26 | |||
27 | /* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ | ||
28 | enum uwb_drp_conflict_action { | ||
29 | /* Reservation is mantained, no action needed */ | ||
30 | UWB_DRP_CONFLICT_MANTAIN = 0, | ||
31 | |||
32 | /* the device shall not transmit frames in conflicting MASs in | ||
33 | * the following superframe. If the device is the reservation | ||
34 | * target, it shall also set the Reason Code in its DRP IE to | ||
35 | * Conflict in its beacon in the following superframe. | ||
36 | */ | ||
37 | UWB_DRP_CONFLICT_ACT1, | ||
38 | |||
39 | /* the device shall not set the Reservation Status bit to ONE | ||
40 | * and shall not transmit frames in conflicting MASs. If the | ||
41 | * device is the reservation target, it shall also set the | ||
42 | * Reason Code in its DRP IE to Conflict. | ||
43 | */ | ||
44 | UWB_DRP_CONFLICT_ACT2, | ||
45 | |||
46 | /* the device shall not transmit frames in conflicting MASs in | ||
47 | * the following superframe. It shall remove the conflicting | ||
48 | * MASs from the reservation or set the Reservation Status to | ||
49 | * ZERO in its beacon in the following superframe. If the | ||
50 | * device is the reservation target, it shall also set the | ||
51 | * Reason Code in its DRP IE to Conflict. | ||
52 | */ | ||
53 | UWB_DRP_CONFLICT_ACT3, | ||
54 | }; | ||
55 | |||
56 | |||
57 | static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, | ||
58 | struct uwb_rceb *reply, ssize_t reply_size) | ||
59 | { | ||
60 | struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; | ||
61 | |||
62 | if (r != NULL) { | ||
63 | if (r->bResultCode != UWB_RC_RES_SUCCESS) | ||
64 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", | ||
65 | uwb_rc_strerror(r->bResultCode), r->bResultCode); | ||
66 | } else | ||
67 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); | ||
68 | |||
69 | spin_lock(&rc->rsvs_lock); | ||
70 | if (rc->set_drp_ie_pending > 1) { | ||
71 | rc->set_drp_ie_pending = 0; | ||
72 | uwb_rsv_queue_update(rc); | ||
73 | } else { | ||
74 | rc->set_drp_ie_pending = 0; | ||
75 | } | ||
76 | spin_unlock(&rc->rsvs_lock); | ||
77 | } | ||
78 | |||
26 | /** | 79 | /** |
27 | * Construct and send the SET DRP IE | 80 | * Construct and send the SET DRP IE |
28 | * | 81 | * |
@@ -37,28 +90,32 @@ | |||
37 | * | 90 | * |
38 | * A DRP Availability IE is appended. | 91 | * A DRP Availability IE is appended. |
39 | * | 92 | * |
40 | * rc->uwb_dev.mutex is held | 93 | * rc->rsvs_mutex is held |
41 | * | 94 | * |
42 | * FIXME We currently ignore the returned value indicating the remaining space | 95 | * FIXME We currently ignore the returned value indicating the remaining space |
43 | * in beacon. This could be used to deny reservation requests earlier if | 96 | * in beacon. This could be used to deny reservation requests earlier if |
44 | * determined that they would cause the beacon space to be exceeded. | 97 | * determined that they would cause the beacon space to be exceeded. |
45 | */ | 98 | */ |
46 | static | 99 | int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) |
47 | int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) | ||
48 | { | 100 | { |
49 | int result; | 101 | int result; |
50 | struct device *dev = &rc->uwb_dev.dev; | ||
51 | struct uwb_rc_cmd_set_drp_ie *cmd; | 102 | struct uwb_rc_cmd_set_drp_ie *cmd; |
52 | struct uwb_rc_evt_set_drp_ie reply; | ||
53 | struct uwb_rsv *rsv; | 103 | struct uwb_rsv *rsv; |
104 | struct uwb_rsv_move *mv; | ||
54 | int num_bytes = 0; | 105 | int num_bytes = 0; |
55 | u8 *IEDataptr; | 106 | u8 *IEDataptr; |
56 | 107 | ||
57 | result = -ENOMEM; | 108 | result = -ENOMEM; |
58 | /* First traverse all reservations to determine memory needed. */ | 109 | /* First traverse all reservations to determine memory needed. */ |
59 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | 110 | list_for_each_entry(rsv, &rc->reservations, rc_node) { |
60 | if (rsv->drp_ie != NULL) | 111 | if (rsv->drp_ie != NULL) { |
61 | num_bytes += rsv->drp_ie->hdr.length + 2; | 112 | num_bytes += rsv->drp_ie->hdr.length + 2; |
113 | if (uwb_rsv_has_two_drp_ies(rsv) && | ||
114 | (rsv->mv.companion_drp_ie != NULL)) { | ||
115 | mv = &rsv->mv; | ||
116 | num_bytes += mv->companion_drp_ie->hdr.length + 2; | ||
117 | } | ||
118 | } | ||
62 | } | 119 | } |
63 | num_bytes += sizeof(rc->drp_avail.ie); | 120 | num_bytes += sizeof(rc->drp_avail.ie); |
64 | cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); | 121 | cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); |
@@ -69,128 +126,322 @@ int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) | |||
69 | cmd->wIELength = num_bytes; | 126 | cmd->wIELength = num_bytes; |
70 | IEDataptr = (u8 *)&cmd->IEData[0]; | 127 | IEDataptr = (u8 *)&cmd->IEData[0]; |
71 | 128 | ||
129 | /* FIXME: DRV avail IE is not always needed */ | ||
130 | /* put DRP avail IE first */ | ||
131 | memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); | ||
132 | IEDataptr += sizeof(struct uwb_ie_drp_avail); | ||
133 | |||
72 | /* Next traverse all reservations to place IEs in allocated memory. */ | 134 | /* Next traverse all reservations to place IEs in allocated memory. */ |
73 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | 135 | list_for_each_entry(rsv, &rc->reservations, rc_node) { |
74 | if (rsv->drp_ie != NULL) { | 136 | if (rsv->drp_ie != NULL) { |
75 | memcpy(IEDataptr, rsv->drp_ie, | 137 | memcpy(IEDataptr, rsv->drp_ie, |
76 | rsv->drp_ie->hdr.length + 2); | 138 | rsv->drp_ie->hdr.length + 2); |
77 | IEDataptr += rsv->drp_ie->hdr.length + 2; | 139 | IEDataptr += rsv->drp_ie->hdr.length + 2; |
140 | |||
141 | if (uwb_rsv_has_two_drp_ies(rsv) && | ||
142 | (rsv->mv.companion_drp_ie != NULL)) { | ||
143 | mv = &rsv->mv; | ||
144 | memcpy(IEDataptr, mv->companion_drp_ie, | ||
145 | mv->companion_drp_ie->hdr.length + 2); | ||
146 | IEDataptr += mv->companion_drp_ie->hdr.length + 2; | ||
147 | } | ||
78 | } | 148 | } |
79 | } | 149 | } |
80 | memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); | ||
81 | 150 | ||
82 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | 151 | result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes, |
83 | reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; | 152 | UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, |
84 | result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, | 153 | uwb_rc_set_drp_cmd_done, NULL); |
85 | sizeof(*cmd) + num_bytes, &reply.rceb, | 154 | |
86 | sizeof(reply)); | 155 | rc->set_drp_ie_pending = 1; |
87 | if (result < 0) | 156 | |
88 | goto error_cmd; | ||
89 | result = le16_to_cpu(reply.wRemainingSpace); | ||
90 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
91 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " | ||
92 | "failed: %s (%d). RemainingSpace in beacon " | ||
93 | "= %d\n", uwb_rc_strerror(reply.bResultCode), | ||
94 | reply.bResultCode, result); | ||
95 | result = -EIO; | ||
96 | } else { | ||
97 | dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " | ||
98 | "= %d.\n", result); | ||
99 | result = 0; | ||
100 | } | ||
101 | error_cmd: | ||
102 | kfree(cmd); | 157 | kfree(cmd); |
103 | error: | 158 | error: |
104 | return result; | 159 | return result; |
105 | |||
106 | } | 160 | } |
107 | /** | 161 | |
108 | * Send all DRP IEs associated with this host | 162 | /* |
109 | * | 163 | * Evaluate the action to perform using conflict resolution rules |
110 | * @returns: >= 0 number of bytes still available in the beacon | ||
111 | * < 0 errno code on error. | ||
112 | * | 164 | * |
113 | * As per the protocol we obtain the host controller device lock to access | 165 | * Return a uwb_drp_conflict_action. |
114 | * bandwidth structures. | ||
115 | */ | 166 | */ |
116 | int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) | 167 | static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, |
168 | struct uwb_rsv *rsv, int our_status) | ||
117 | { | 169 | { |
118 | int result; | 170 | int our_tie_breaker = rsv->tiebreaker; |
171 | int our_type = rsv->type; | ||
172 | int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; | ||
173 | |||
174 | int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); | ||
175 | int ext_status = uwb_ie_drp_status(ext_drp_ie); | ||
176 | int ext_type = uwb_ie_drp_type(ext_drp_ie); | ||
177 | |||
178 | |||
179 | /* [ECMA-368 2nd Edition] 17.4.6 */ | ||
180 | if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { | ||
181 | return UWB_DRP_CONFLICT_MANTAIN; | ||
182 | } | ||
119 | 183 | ||
120 | mutex_lock(&rc->uwb_dev.mutex); | 184 | /* [ECMA-368 2nd Edition] 17.4.6-1 */ |
121 | result = uwb_rc_gen_send_drp_ie(rc); | 185 | if (our_type == UWB_DRP_TYPE_ALIEN_BP) { |
122 | mutex_unlock(&rc->uwb_dev.mutex); | 186 | return UWB_DRP_CONFLICT_MANTAIN; |
123 | return result; | 187 | } |
188 | |||
189 | /* [ECMA-368 2nd Edition] 17.4.6-2 */ | ||
190 | if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { | ||
191 | /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ | ||
192 | return UWB_DRP_CONFLICT_ACT1; | ||
193 | } | ||
194 | |||
195 | /* [ECMA-368 2nd Edition] 17.4.6-3 */ | ||
196 | if (our_status == 0 && ext_status == 1) { | ||
197 | return UWB_DRP_CONFLICT_ACT2; | ||
198 | } | ||
199 | |||
200 | /* [ECMA-368 2nd Edition] 17.4.6-4 */ | ||
201 | if (our_status == 1 && ext_status == 0) { | ||
202 | return UWB_DRP_CONFLICT_MANTAIN; | ||
203 | } | ||
204 | |||
205 | /* [ECMA-368 2nd Edition] 17.4.6-5a */ | ||
206 | if (our_tie_breaker == ext_tie_breaker && | ||
207 | our_beacon_slot < ext_beacon_slot) { | ||
208 | return UWB_DRP_CONFLICT_MANTAIN; | ||
209 | } | ||
210 | |||
211 | /* [ECMA-368 2nd Edition] 17.4.6-5b */ | ||
212 | if (our_tie_breaker != ext_tie_breaker && | ||
213 | our_beacon_slot > ext_beacon_slot) { | ||
214 | return UWB_DRP_CONFLICT_MANTAIN; | ||
215 | } | ||
216 | |||
217 | if (our_status == 0) { | ||
218 | if (our_tie_breaker == ext_tie_breaker) { | ||
219 | /* [ECMA-368 2nd Edition] 17.4.6-6a */ | ||
220 | if (our_beacon_slot > ext_beacon_slot) { | ||
221 | return UWB_DRP_CONFLICT_ACT2; | ||
222 | } | ||
223 | } else { | ||
224 | /* [ECMA-368 2nd Edition] 17.4.6-6b */ | ||
225 | if (our_beacon_slot < ext_beacon_slot) { | ||
226 | return UWB_DRP_CONFLICT_ACT2; | ||
227 | } | ||
228 | } | ||
229 | } else { | ||
230 | if (our_tie_breaker == ext_tie_breaker) { | ||
231 | /* [ECMA-368 2nd Edition] 17.4.6-7a */ | ||
232 | if (our_beacon_slot > ext_beacon_slot) { | ||
233 | return UWB_DRP_CONFLICT_ACT3; | ||
234 | } | ||
235 | } else { | ||
236 | /* [ECMA-368 2nd Edition] 17.4.6-7b */ | ||
237 | if (our_beacon_slot < ext_beacon_slot) { | ||
238 | return UWB_DRP_CONFLICT_ACT3; | ||
239 | } | ||
240 | } | ||
241 | } | ||
242 | return UWB_DRP_CONFLICT_MANTAIN; | ||
124 | } | 243 | } |
125 | 244 | ||
126 | void uwb_drp_handle_timeout(struct uwb_rsv *rsv) | 245 | static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, |
246 | int ext_beacon_slot, | ||
247 | struct uwb_rsv *rsv, | ||
248 | struct uwb_mas_bm *conflicting_mas) | ||
127 | { | 249 | { |
128 | struct device *dev = &rsv->rc->uwb_dev.dev; | 250 | struct uwb_rc *rc = rsv->rc; |
251 | struct uwb_rsv_move *mv = &rsv->mv; | ||
252 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
253 | int action; | ||
254 | |||
255 | action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); | ||
256 | |||
257 | if (uwb_rsv_is_owner(rsv)) { | ||
258 | switch(action) { | ||
259 | case UWB_DRP_CONFLICT_ACT2: | ||
260 | /* try move */ | ||
261 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); | ||
262 | if (bow->can_reserve_extra_mases == false) | ||
263 | uwb_rsv_backoff_win_increment(rc); | ||
264 | |||
265 | break; | ||
266 | case UWB_DRP_CONFLICT_ACT3: | ||
267 | uwb_rsv_backoff_win_increment(rc); | ||
268 | /* drop some mases with reason modified */ | ||
269 | /* put in the companion the mases to be dropped */ | ||
270 | bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); | ||
271 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
272 | default: | ||
273 | break; | ||
274 | } | ||
275 | } else { | ||
276 | switch(action) { | ||
277 | case UWB_DRP_CONFLICT_ACT2: | ||
278 | case UWB_DRP_CONFLICT_ACT3: | ||
279 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
280 | default: | ||
281 | break; | ||
282 | } | ||
129 | 283 | ||
130 | dev_dbg(dev, "reservation timeout in state %s (%d)\n", | 284 | } |
131 | uwb_rsv_state_str(rsv->state), rsv->state); | 285 | |
286 | } | ||
132 | 287 | ||
133 | switch (rsv->state) { | 288 | static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, |
134 | case UWB_RSV_STATE_O_INITIATED: | 289 | struct uwb_rsv *rsv, bool companion_only, |
135 | if (rsv->is_multicast) { | 290 | struct uwb_mas_bm *conflicting_mas) |
136 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 291 | { |
137 | return; | 292 | struct uwb_rc *rc = rsv->rc; |
293 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
294 | struct uwb_rsv_move *mv = &rsv->mv; | ||
295 | int action; | ||
296 | |||
297 | if (companion_only) { | ||
298 | /* status of companion is 0 at this point */ | ||
299 | action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); | ||
300 | if (uwb_rsv_is_owner(rsv)) { | ||
301 | switch(action) { | ||
302 | case UWB_DRP_CONFLICT_ACT2: | ||
303 | case UWB_DRP_CONFLICT_ACT3: | ||
304 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
305 | rsv->needs_release_companion_mas = false; | ||
306 | if (bow->can_reserve_extra_mases == false) | ||
307 | uwb_rsv_backoff_win_increment(rc); | ||
308 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
309 | } | ||
310 | } else { /* rsv is target */ | ||
311 | switch(action) { | ||
312 | case UWB_DRP_CONFLICT_ACT2: | ||
313 | case UWB_DRP_CONFLICT_ACT3: | ||
314 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT); | ||
315 | /* send_drp_avail_ie = true; */ | ||
316 | } | ||
138 | } | 317 | } |
139 | break; | 318 | } else { /* also base part of the reservation is conflicting */ |
140 | case UWB_RSV_STATE_O_ESTABLISHED: | 319 | if (uwb_rsv_is_owner(rsv)) { |
141 | if (rsv->is_multicast) | 320 | uwb_rsv_backoff_win_increment(rc); |
142 | return; | 321 | /* remove companion part */ |
143 | break; | 322 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); |
144 | default: | 323 | |
145 | break; | 324 | /* drop some mases with reason modified */ |
325 | |||
326 | /* put in the companion the mases to be dropped */ | ||
327 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); | ||
328 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
329 | } else { /* it is a target rsv */ | ||
330 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
331 | /* send_drp_avail_ie = true; */ | ||
332 | } | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, | ||
337 | struct uwb_rc_evt_drp *drp_evt, | ||
338 | struct uwb_ie_drp *drp_ie, | ||
339 | struct uwb_mas_bm *conflicting_mas) | ||
340 | { | ||
341 | struct uwb_rsv_move *mv; | ||
342 | |||
343 | /* check if the conflicting reservation has two drp_ies */ | ||
344 | if (uwb_rsv_has_two_drp_ies(rsv)) { | ||
345 | mv = &rsv->mv; | ||
346 | if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
347 | handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, | ||
348 | rsv, false, conflicting_mas); | ||
349 | } else { | ||
350 | if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
351 | handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, | ||
352 | rsv, true, conflicting_mas); | ||
353 | } | ||
354 | } | ||
355 | } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
356 | handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas); | ||
146 | } | 357 | } |
147 | uwb_rsv_remove(rsv); | ||
148 | } | 358 | } |
149 | 359 | ||
360 | static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, | ||
361 | struct uwb_rc_evt_drp *drp_evt, | ||
362 | struct uwb_ie_drp *drp_ie, | ||
363 | struct uwb_mas_bm *conflicting_mas) | ||
364 | { | ||
365 | struct uwb_rsv *rsv; | ||
366 | |||
367 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
368 | uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas); | ||
369 | } | ||
370 | } | ||
371 | |||
150 | /* | 372 | /* |
151 | * Based on the DRP IE, transition a target reservation to a new | 373 | * Based on the DRP IE, transition a target reservation to a new |
152 | * state. | 374 | * state. |
153 | */ | 375 | */ |
154 | static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, | 376 | static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, |
155 | struct uwb_ie_drp *drp_ie) | 377 | struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) |
156 | { | 378 | { |
157 | struct device *dev = &rc->uwb_dev.dev; | 379 | struct device *dev = &rc->uwb_dev.dev; |
380 | struct uwb_rsv_move *mv = &rsv->mv; | ||
158 | int status; | 381 | int status; |
159 | enum uwb_drp_reason reason_code; | 382 | enum uwb_drp_reason reason_code; |
160 | 383 | struct uwb_mas_bm mas; | |
384 | |||
161 | status = uwb_ie_drp_status(drp_ie); | 385 | status = uwb_ie_drp_status(drp_ie); |
162 | reason_code = uwb_ie_drp_reason_code(drp_ie); | 386 | reason_code = uwb_ie_drp_reason_code(drp_ie); |
387 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
163 | 388 | ||
164 | if (status) { | 389 | switch (reason_code) { |
165 | switch (reason_code) { | 390 | case UWB_DRP_REASON_ACCEPTED: |
166 | case UWB_DRP_REASON_ACCEPTED: | 391 | |
167 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | 392 | if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { |
168 | break; | 393 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); |
169 | case UWB_DRP_REASON_MODIFIED: | ||
170 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | ||
171 | reason_code, status); | ||
172 | break; | 394 | break; |
173 | default: | ||
174 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
175 | reason_code, status); | ||
176 | } | 395 | } |
177 | } else { | 396 | |
178 | switch (reason_code) { | 397 | if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { |
179 | case UWB_DRP_REASON_ACCEPTED: | 398 | /* drp_ie is companion */ |
180 | /* New reservations are handled in uwb_rsv_find(). */ | 399 | if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) |
181 | break; | 400 | /* stroke companion */ |
182 | case UWB_DRP_REASON_DENIED: | 401 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); |
183 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 402 | } else { |
184 | break; | 403 | if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { |
185 | case UWB_DRP_REASON_CONFLICT: | 404 | if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) { |
186 | case UWB_DRP_REASON_MODIFIED: | 405 | /* FIXME: there is a conflict, find |
187 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 406 | * the conflicting reservations and |
188 | reason_code, status); | 407 | * take a sensible action. Consider |
408 | * that in drp_ie there is the | ||
409 | * "neighbour" */ | ||
410 | uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); | ||
411 | } else { | ||
412 | /* accept the extra reservation */ | ||
413 | bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS); | ||
414 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); | ||
415 | } | ||
416 | } else { | ||
417 | if (status) { | ||
418 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | } | ||
423 | break; | ||
424 | |||
425 | case UWB_DRP_REASON_MODIFIED: | ||
426 | /* check to see if we have already modified the reservation */ | ||
427 | if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { | ||
428 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
189 | break; | 429 | break; |
190 | default: | ||
191 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
192 | reason_code, status); | ||
193 | } | 430 | } |
431 | |||
432 | /* find if the owner wants to expand or reduce */ | ||
433 | if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
434 | /* owner is reducing */ | ||
435 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS); | ||
436 | uwb_drp_avail_release(rsv->rc, &mv->companion_mas); | ||
437 | } | ||
438 | |||
439 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | ||
440 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); | ||
441 | break; | ||
442 | default: | ||
443 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
444 | reason_code, status); | ||
194 | } | 445 | } |
195 | } | 446 | } |
196 | 447 | ||
@@ -199,23 +450,60 @@ static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
199 | * state. | 450 | * state. |
200 | */ | 451 | */ |
201 | static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | 452 | static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, |
202 | struct uwb_ie_drp *drp_ie) | 453 | struct uwb_dev *src, struct uwb_ie_drp *drp_ie, |
454 | struct uwb_rc_evt_drp *drp_evt) | ||
203 | { | 455 | { |
204 | struct device *dev = &rc->uwb_dev.dev; | 456 | struct device *dev = &rc->uwb_dev.dev; |
457 | struct uwb_rsv_move *mv = &rsv->mv; | ||
205 | int status; | 458 | int status; |
206 | enum uwb_drp_reason reason_code; | 459 | enum uwb_drp_reason reason_code; |
460 | struct uwb_mas_bm mas; | ||
207 | 461 | ||
208 | status = uwb_ie_drp_status(drp_ie); | 462 | status = uwb_ie_drp_status(drp_ie); |
209 | reason_code = uwb_ie_drp_reason_code(drp_ie); | 463 | reason_code = uwb_ie_drp_reason_code(drp_ie); |
464 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
210 | 465 | ||
211 | if (status) { | 466 | if (status) { |
212 | switch (reason_code) { | 467 | switch (reason_code) { |
213 | case UWB_DRP_REASON_ACCEPTED: | 468 | case UWB_DRP_REASON_ACCEPTED: |
214 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 469 | switch (rsv->state) { |
215 | break; | 470 | case UWB_RSV_STATE_O_PENDING: |
216 | case UWB_DRP_REASON_MODIFIED: | 471 | case UWB_RSV_STATE_O_INITIATED: |
217 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 472 | case UWB_RSV_STATE_O_ESTABLISHED: |
218 | reason_code, status); | 473 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); |
474 | break; | ||
475 | case UWB_RSV_STATE_O_MODIFIED: | ||
476 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
477 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
478 | } else { | ||
479 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
480 | } | ||
481 | break; | ||
482 | |||
483 | case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ | ||
484 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
485 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
486 | } else { | ||
487 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
488 | } | ||
489 | break; | ||
490 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
491 | if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) { | ||
492 | /* Companion reservation accepted */ | ||
493 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
494 | } else { | ||
495 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
496 | } | ||
497 | break; | ||
498 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
499 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) | ||
500 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
501 | else | ||
502 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
503 | break; | ||
504 | default: | ||
505 | break; | ||
506 | } | ||
219 | break; | 507 | break; |
220 | default: | 508 | default: |
221 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | 509 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", |
@@ -230,9 +518,10 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
230 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 518 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
231 | break; | 519 | break; |
232 | case UWB_DRP_REASON_CONFLICT: | 520 | case UWB_DRP_REASON_CONFLICT: |
233 | case UWB_DRP_REASON_MODIFIED: | 521 | /* resolve the conflict */ |
234 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 522 | bitmap_complement(mas.bm, src->last_availability_bm, |
235 | reason_code, status); | 523 | UWB_NUM_MAS); |
524 | uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); | ||
236 | break; | 525 | break; |
237 | default: | 526 | default: |
238 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | 527 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", |
@@ -241,12 +530,110 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
241 | } | 530 | } |
242 | } | 531 | } |
243 | 532 | ||
533 | static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) | ||
534 | { | ||
535 | unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; | ||
536 | mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
537 | } | ||
538 | |||
539 | static void uwb_cnflt_update_work(struct work_struct *work) | ||
540 | { | ||
541 | struct uwb_cnflt_alien *cnflt = container_of(work, | ||
542 | struct uwb_cnflt_alien, | ||
543 | cnflt_update_work); | ||
544 | struct uwb_cnflt_alien *c; | ||
545 | struct uwb_rc *rc = cnflt->rc; | ||
546 | |||
547 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
548 | |||
549 | mutex_lock(&rc->rsvs_mutex); | ||
550 | |||
551 | list_del(&cnflt->rc_node); | ||
552 | |||
553 | /* update rc global conflicting alien bitmap */ | ||
554 | bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); | ||
555 | |||
556 | list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { | ||
557 | bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS); | ||
558 | } | ||
559 | |||
560 | queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); | ||
561 | |||
562 | kfree(cnflt); | ||
563 | mutex_unlock(&rc->rsvs_mutex); | ||
564 | } | ||
565 | |||
566 | static void uwb_cnflt_timer(unsigned long arg) | ||
567 | { | ||
568 | struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg; | ||
569 | |||
570 | queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); | ||
571 | } | ||
572 | |||
244 | /* | 573 | /* |
245 | * Process a received DRP IE, it's either for a reservation owned by | 574 | * We have received an DRP_IE of type Alien BP and we need to make |
246 | * the RC or targeted at it (or it's for a WUSB cluster reservation). | 575 | * sure we do not transmit in conflicting MASs. |
247 | */ | 576 | */ |
248 | static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | 577 | static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) |
249 | struct uwb_ie_drp *drp_ie) | 578 | { |
579 | struct device *dev = &rc->uwb_dev.dev; | ||
580 | struct uwb_mas_bm mas; | ||
581 | struct uwb_cnflt_alien *cnflt; | ||
582 | char buf[72]; | ||
583 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
584 | |||
585 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
586 | bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); | ||
587 | |||
588 | list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { | ||
589 | if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { | ||
590 | /* Existing alien BP reservation conflicting | ||
591 | * bitmap, just reset the timer */ | ||
592 | uwb_cnflt_alien_stroke_timer(cnflt); | ||
593 | return; | ||
594 | } | ||
595 | } | ||
596 | |||
597 | /* New alien BP reservation conflicting bitmap */ | ||
598 | |||
599 | /* alloc and initialize new uwb_cnflt_alien */ | ||
600 | cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); | ||
601 | if (!cnflt) | ||
602 | dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); | ||
603 | INIT_LIST_HEAD(&cnflt->rc_node); | ||
604 | init_timer(&cnflt->timer); | ||
605 | cnflt->timer.function = uwb_cnflt_timer; | ||
606 | cnflt->timer.data = (unsigned long)cnflt; | ||
607 | |||
608 | cnflt->rc = rc; | ||
609 | INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); | ||
610 | |||
611 | bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); | ||
612 | |||
613 | list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); | ||
614 | |||
615 | /* update rc global conflicting alien bitmap */ | ||
616 | bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); | ||
617 | |||
618 | queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); | ||
619 | |||
620 | /* start the timer */ | ||
621 | uwb_cnflt_alien_stroke_timer(cnflt); | ||
622 | } | ||
623 | |||
624 | static void uwb_drp_process_not_involved(struct uwb_rc *rc, | ||
625 | struct uwb_rc_evt_drp *drp_evt, | ||
626 | struct uwb_ie_drp *drp_ie) | ||
627 | { | ||
628 | struct uwb_mas_bm mas; | ||
629 | |||
630 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
631 | uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); | ||
632 | } | ||
633 | |||
634 | static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, | ||
635 | struct uwb_rc_evt_drp *drp_evt, | ||
636 | struct uwb_ie_drp *drp_ie) | ||
250 | { | 637 | { |
251 | struct uwb_rsv *rsv; | 638 | struct uwb_rsv *rsv; |
252 | 639 | ||
@@ -259,7 +646,7 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | |||
259 | */ | 646 | */ |
260 | return; | 647 | return; |
261 | } | 648 | } |
262 | 649 | ||
263 | /* | 650 | /* |
264 | * Do nothing with DRP IEs for reservations that have been | 651 | * Do nothing with DRP IEs for reservations that have been |
265 | * terminated. | 652 | * terminated. |
@@ -268,13 +655,43 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | |||
268 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 655 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
269 | return; | 656 | return; |
270 | } | 657 | } |
271 | 658 | ||
272 | if (uwb_ie_drp_owner(drp_ie)) | 659 | if (uwb_ie_drp_owner(drp_ie)) |
273 | uwb_drp_process_target(rc, rsv, drp_ie); | 660 | uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); |
661 | else | ||
662 | uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); | ||
663 | |||
664 | } | ||
665 | |||
666 | |||
667 | static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) | ||
668 | { | ||
669 | return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * Process a received DRP IE. | ||
674 | */ | ||
675 | static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
676 | struct uwb_dev *src, struct uwb_ie_drp *drp_ie) | ||
677 | { | ||
678 | if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) | ||
679 | uwb_drp_handle_alien_drp(rc, drp_ie); | ||
680 | else if (uwb_drp_involves_us(rc, drp_ie)) | ||
681 | uwb_drp_process_involved(rc, src, drp_evt, drp_ie); | ||
274 | else | 682 | else |
275 | uwb_drp_process_owner(rc, rsv, drp_ie); | 683 | uwb_drp_process_not_involved(rc, drp_evt, drp_ie); |
276 | } | 684 | } |
277 | 685 | ||
686 | /* | ||
687 | * Process a received DRP Availability IE | ||
688 | */ | ||
689 | static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, | ||
690 | struct uwb_ie_drp_avail *drp_availability_ie) | ||
691 | { | ||
692 | bitmap_copy(src->last_availability_bm, | ||
693 | drp_availability_ie->bmp, UWB_NUM_MAS); | ||
694 | } | ||
278 | 695 | ||
279 | /* | 696 | /* |
280 | * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) | 697 | * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) |
@@ -296,10 +713,10 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | |||
296 | 713 | ||
297 | switch (ie_hdr->element_id) { | 714 | switch (ie_hdr->element_id) { |
298 | case UWB_IE_DRP_AVAILABILITY: | 715 | case UWB_IE_DRP_AVAILABILITY: |
299 | /* FIXME: does something need to be done with this? */ | 716 | uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); |
300 | break; | 717 | break; |
301 | case UWB_IE_DRP: | 718 | case UWB_IE_DRP: |
302 | uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); | 719 | uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); |
303 | break; | 720 | break; |
304 | default: | 721 | default: |
305 | dev_warn(dev, "unexpected IE in DRP notification\n"); | 722 | dev_warn(dev, "unexpected IE in DRP notification\n"); |
@@ -312,55 +729,6 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | |||
312 | (int)ielen); | 729 | (int)ielen); |
313 | } | 730 | } |
314 | 731 | ||
315 | |||
316 | /* | ||
317 | * Go through all the DRP IEs and find the ones that conflict with our | ||
318 | * reservations. | ||
319 | * | ||
320 | * FIXME: must resolve the conflict according the the rules in | ||
321 | * [ECMA-368]. | ||
322 | */ | ||
323 | static | ||
324 | void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
325 | size_t ielen, struct uwb_dev *src_dev) | ||
326 | { | ||
327 | struct device *dev = &rc->uwb_dev.dev; | ||
328 | struct uwb_ie_hdr *ie_hdr; | ||
329 | struct uwb_ie_drp *drp_ie; | ||
330 | void *ptr; | ||
331 | |||
332 | ptr = drp_evt->ie_data; | ||
333 | for (;;) { | ||
334 | ie_hdr = uwb_ie_next(&ptr, &ielen); | ||
335 | if (!ie_hdr) | ||
336 | break; | ||
337 | |||
338 | drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); | ||
339 | |||
340 | /* FIXME: check if this DRP IE conflicts. */ | ||
341 | } | ||
342 | |||
343 | if (ielen > 0) | ||
344 | dev_warn(dev, "%d octets remaining in DRP notification\n", | ||
345 | (int)ielen); | ||
346 | } | ||
347 | |||
348 | |||
349 | /* | ||
350 | * Terminate all reservations owned by, or targeted at, 'uwb_dev'. | ||
351 | */ | ||
352 | static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) | ||
353 | { | ||
354 | struct uwb_rsv *rsv; | ||
355 | |||
356 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
357 | if (rsv->owner == uwb_dev | ||
358 | || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) | ||
359 | uwb_rsv_remove(rsv); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | |||
364 | /** | 732 | /** |
365 | * uwbd_evt_handle_rc_drp - handle a DRP_IE event | 733 | * uwbd_evt_handle_rc_drp - handle a DRP_IE event |
366 | * @evt: the DRP_IE event from the radio controller | 734 | * @evt: the DRP_IE event from the radio controller |
@@ -401,7 +769,6 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt) | |||
401 | size_t ielength, bytes_left; | 769 | size_t ielength, bytes_left; |
402 | struct uwb_dev_addr src_addr; | 770 | struct uwb_dev_addr src_addr; |
403 | struct uwb_dev *src_dev; | 771 | struct uwb_dev *src_dev; |
404 | int reason; | ||
405 | 772 | ||
406 | /* Is there enough data to decode the event (and any IEs in | 773 | /* Is there enough data to decode the event (and any IEs in |
407 | its payload)? */ | 774 | its payload)? */ |
@@ -437,22 +804,8 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt) | |||
437 | 804 | ||
438 | mutex_lock(&rc->rsvs_mutex); | 805 | mutex_lock(&rc->rsvs_mutex); |
439 | 806 | ||
440 | reason = uwb_rc_evt_drp_reason(drp_evt); | 807 | /* We do not distinguish from the reason */ |
441 | 808 | uwb_drp_process_all(rc, drp_evt, ielength, src_dev); | |
442 | switch (reason) { | ||
443 | case UWB_DRP_NOTIF_DRP_IE_RCVD: | ||
444 | uwb_drp_process_all(rc, drp_evt, ielength, src_dev); | ||
445 | break; | ||
446 | case UWB_DRP_NOTIF_CONFLICT: | ||
447 | uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); | ||
448 | break; | ||
449 | case UWB_DRP_NOTIF_TERMINATE: | ||
450 | uwb_drp_terminate_all(rc, src_dev); | ||
451 | break; | ||
452 | default: | ||
453 | dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); | ||
454 | break; | ||
455 | } | ||
456 | 809 | ||
457 | mutex_unlock(&rc->rsvs_mutex); | 810 | mutex_unlock(&rc->rsvs_mutex); |
458 | 811 | ||
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c index 5fe566b7c845..328fcc2b6099 100644 --- a/drivers/uwb/est.c +++ b/drivers/uwb/est.c | |||
@@ -40,10 +40,8 @@ | |||
40 | * uwb_est_get_size() | 40 | * uwb_est_get_size() |
41 | */ | 41 | */ |
42 | #include <linux/spinlock.h> | 42 | #include <linux/spinlock.h> |
43 | #define D_LOCAL 0 | ||
44 | #include <linux/uwb/debug.h> | ||
45 | #include "uwb-internal.h" | ||
46 | 43 | ||
44 | #include "uwb-internal.h" | ||
47 | 45 | ||
48 | struct uwb_est { | 46 | struct uwb_est { |
49 | u16 type_event_high; | 47 | u16 type_event_high; |
@@ -52,7 +50,6 @@ struct uwb_est { | |||
52 | const struct uwb_est_entry *entry; | 50 | const struct uwb_est_entry *entry; |
53 | }; | 51 | }; |
54 | 52 | ||
55 | |||
56 | static struct uwb_est *uwb_est; | 53 | static struct uwb_est *uwb_est; |
57 | static u8 uwb_est_size; | 54 | static u8 uwb_est_size; |
58 | static u8 uwb_est_used; | 55 | static u8 uwb_est_used; |
@@ -440,21 +437,12 @@ ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, | |||
440 | u8 *ptr = (u8 *) rceb; | 437 | u8 *ptr = (u8 *) rceb; |
441 | 438 | ||
442 | read_lock_irqsave(&uwb_est_lock, flags); | 439 | read_lock_irqsave(&uwb_est_lock, flags); |
443 | d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x," | ||
444 | " buffer size %ld\n", | ||
445 | (unsigned) rceb->bEventType, | ||
446 | (unsigned) le16_to_cpu(rceb->wEvent), | ||
447 | (unsigned) rceb->bEventContext, | ||
448 | (long) rceb_size); | ||
449 | size = -ENOSPC; | 440 | size = -ENOSPC; |
450 | if (rceb_size < sizeof(*rceb)) | 441 | if (rceb_size < sizeof(*rceb)) |
451 | goto out; | 442 | goto out; |
452 | event = le16_to_cpu(rceb->wEvent); | 443 | event = le16_to_cpu(rceb->wEvent); |
453 | type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; | 444 | type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; |
454 | for (itr = 0; itr < uwb_est_used; itr++) { | 445 | for (itr = 0; itr < uwb_est_used; itr++) { |
455 | d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n", | ||
456 | uwb_est[itr].type_event_high, uwb_est[itr].vendor, | ||
457 | uwb_est[itr].product); | ||
458 | if (uwb_est[itr].type_event_high != type_event_high) | 446 | if (uwb_est[itr].type_event_high != type_event_high) |
459 | continue; | 447 | continue; |
460 | size = uwb_est_get_size(rc, &uwb_est[itr], | 448 | size = uwb_est_get_size(rc, &uwb_est[itr], |
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 3d26fa0f8ae1..559f8784acf3 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c | |||
@@ -51,16 +51,14 @@ | |||
51 | * | 51 | * |
52 | * | 52 | * |
53 | */ | 53 | */ |
54 | #include <linux/version.h> | ||
55 | #include <linux/init.h> | 54 | #include <linux/init.h> |
56 | #include <linux/module.h> | 55 | #include <linux/module.h> |
57 | #include <linux/usb.h> | 56 | #include <linux/usb.h> |
58 | #include <linux/usb/wusb.h> | 57 | #include <linux/usb/wusb.h> |
59 | #include <linux/usb/wusb-wa.h> | 58 | #include <linux/usb/wusb-wa.h> |
60 | #include <linux/uwb.h> | 59 | #include <linux/uwb.h> |
60 | |||
61 | #include "uwb-internal.h" | 61 | #include "uwb-internal.h" |
62 | #define D_LOCAL 1 | ||
63 | #include <linux/uwb/debug.h> | ||
64 | 62 | ||
65 | /* The device uses commands and events from the WHCI specification, although | 63 | /* The device uses commands and events from the WHCI specification, although |
66 | * reporting itself as WUSB compliant. */ | 64 | * reporting itself as WUSB compliant. */ |
@@ -631,17 +629,13 @@ void hwarc_neep_cb(struct urb *urb) | |||
631 | 629 | ||
632 | switch (result = urb->status) { | 630 | switch (result = urb->status) { |
633 | case 0: | 631 | case 0: |
634 | d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n", | ||
635 | urb->status, (size_t)urb->actual_length); | ||
636 | uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, | 632 | uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, |
637 | urb->actual_length); | 633 | urb->actual_length); |
638 | break; | 634 | break; |
639 | case -ECONNRESET: /* Not an error, but a controlled situation; */ | 635 | case -ECONNRESET: /* Not an error, but a controlled situation; */ |
640 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | 636 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ |
641 | d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status); | ||
642 | goto out; | 637 | goto out; |
643 | case -ESHUTDOWN: /* going away! */ | 638 | case -ESHUTDOWN: /* going away! */ |
644 | d_printf(2, dev, "NEEP: URB down %d\n", urb->status); | ||
645 | goto out; | 639 | goto out; |
646 | default: /* On general errors, retry unless it gets ugly */ | 640 | default: /* On general errors, retry unless it gets ugly */ |
647 | if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, | 641 | if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, |
@@ -650,7 +644,6 @@ void hwarc_neep_cb(struct urb *urb) | |||
650 | dev_err(dev, "NEEP: URB error %d\n", urb->status); | 644 | dev_err(dev, "NEEP: URB error %d\n", urb->status); |
651 | } | 645 | } |
652 | result = usb_submit_urb(urb, GFP_ATOMIC); | 646 | result = usb_submit_urb(urb, GFP_ATOMIC); |
653 | d_printf(3, dev, "NEEP: submit %d\n", result); | ||
654 | if (result < 0) { | 647 | if (result < 0) { |
655 | dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", | 648 | dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", |
656 | result); | 649 | result); |
@@ -759,11 +752,11 @@ static int hwarc_get_version(struct uwb_rc *rc) | |||
759 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); | 752 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); |
760 | while (itr_size >= sizeof(*hdr)) { | 753 | while (itr_size >= sizeof(*hdr)) { |
761 | hdr = (struct usb_descriptor_header *) itr; | 754 | hdr = (struct usb_descriptor_header *) itr; |
762 | d_printf(3, dev, "Extra device descriptor: " | 755 | dev_dbg(dev, "Extra device descriptor: " |
763 | "type %02x/%u bytes @ %zu (%zu left)\n", | 756 | "type %02x/%u bytes @ %zu (%zu left)\n", |
764 | hdr->bDescriptorType, hdr->bLength, | 757 | hdr->bDescriptorType, hdr->bLength, |
765 | (itr - usb_dev->rawdescriptors[actconfig_idx]), | 758 | (itr - usb_dev->rawdescriptors[actconfig_idx]), |
766 | itr_size); | 759 | itr_size); |
767 | if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) | 760 | if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) |
768 | goto found; | 761 | goto found; |
769 | itr += hdr->bLength; | 762 | itr += hdr->bLength; |
@@ -795,8 +788,7 @@ found: | |||
795 | goto error; | 788 | goto error; |
796 | } | 789 | } |
797 | rc->version = version; | 790 | rc->version = version; |
798 | d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n", | 791 | dev_dbg(dev, "Device supports WUSB protocol version 0x%04x \n", rc->version); |
799 | rc->version); | ||
800 | result = 0; | 792 | result = 0; |
801 | error: | 793 | error: |
802 | return result; | 794 | return result; |
@@ -877,11 +869,28 @@ static void hwarc_disconnect(struct usb_interface *iface) | |||
877 | uwb_rc_rm(uwb_rc); | 869 | uwb_rc_rm(uwb_rc); |
878 | usb_put_intf(hwarc->usb_iface); | 870 | usb_put_intf(hwarc->usb_iface); |
879 | usb_put_dev(hwarc->usb_dev); | 871 | usb_put_dev(hwarc->usb_dev); |
880 | d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc); | ||
881 | kfree(hwarc); | 872 | kfree(hwarc); |
882 | uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ | 873 | uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ |
883 | } | 874 | } |
884 | 875 | ||
876 | static int hwarc_pre_reset(struct usb_interface *iface) | ||
877 | { | ||
878 | struct hwarc *hwarc = usb_get_intfdata(iface); | ||
879 | struct uwb_rc *uwb_rc = hwarc->uwb_rc; | ||
880 | |||
881 | uwb_rc_pre_reset(uwb_rc); | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | static int hwarc_post_reset(struct usb_interface *iface) | ||
886 | { | ||
887 | struct hwarc *hwarc = usb_get_intfdata(iface); | ||
888 | struct uwb_rc *uwb_rc = hwarc->uwb_rc; | ||
889 | |||
890 | uwb_rc_post_reset(uwb_rc); | ||
891 | return 0; | ||
892 | } | ||
893 | |||
885 | /** USB device ID's that we handle */ | 894 | /** USB device ID's that we handle */ |
886 | static struct usb_device_id hwarc_id_table[] = { | 895 | static struct usb_device_id hwarc_id_table[] = { |
887 | /* D-Link DUB-1210 */ | 896 | /* D-Link DUB-1210 */ |
@@ -898,20 +907,16 @@ MODULE_DEVICE_TABLE(usb, hwarc_id_table); | |||
898 | 907 | ||
899 | static struct usb_driver hwarc_driver = { | 908 | static struct usb_driver hwarc_driver = { |
900 | .name = "hwa-rc", | 909 | .name = "hwa-rc", |
910 | .id_table = hwarc_id_table, | ||
901 | .probe = hwarc_probe, | 911 | .probe = hwarc_probe, |
902 | .disconnect = hwarc_disconnect, | 912 | .disconnect = hwarc_disconnect, |
903 | .id_table = hwarc_id_table, | 913 | .pre_reset = hwarc_pre_reset, |
914 | .post_reset = hwarc_post_reset, | ||
904 | }; | 915 | }; |
905 | 916 | ||
906 | static int __init hwarc_driver_init(void) | 917 | static int __init hwarc_driver_init(void) |
907 | { | 918 | { |
908 | int result; | 919 | return usb_register(&hwarc_driver); |
909 | result = usb_register(&hwarc_driver); | ||
910 | if (result < 0) | ||
911 | printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n", | ||
912 | result); | ||
913 | return result; | ||
914 | |||
915 | } | 920 | } |
916 | module_init(hwarc_driver_init); | 921 | module_init(hwarc_driver_init); |
917 | 922 | ||
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c index 9097b3b30385..da7b1d08003c 100644 --- a/drivers/uwb/i1480/dfu/dfu.c +++ b/drivers/uwb/i1480/dfu/dfu.c | |||
@@ -34,10 +34,7 @@ | |||
34 | #include <linux/uwb.h> | 34 | #include <linux/uwb.h> |
35 | #include <linux/random.h> | 35 | #include <linux/random.h> |
36 | 36 | ||
37 | #define D_LOCAL 0 | 37 | /* |
38 | #include <linux/uwb/debug.h> | ||
39 | |||
40 | /** | ||
41 | * i1480_rceb_check - Check RCEB for expected field values | 38 | * i1480_rceb_check - Check RCEB for expected field values |
42 | * @i1480: pointer to device for which RCEB is being checked | 39 | * @i1480: pointer to device for which RCEB is being checked |
43 | * @rceb: RCEB being checked | 40 | * @rceb: RCEB being checked |
@@ -83,7 +80,7 @@ int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, | |||
83 | EXPORT_SYMBOL_GPL(i1480_rceb_check); | 80 | EXPORT_SYMBOL_GPL(i1480_rceb_check); |
84 | 81 | ||
85 | 82 | ||
86 | /** | 83 | /* |
87 | * Execute a Radio Control Command | 84 | * Execute a Radio Control Command |
88 | * | 85 | * |
89 | * Command data has to be in i1480->cmd_buf. | 86 | * Command data has to be in i1480->cmd_buf. |
@@ -101,7 +98,6 @@ ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, | |||
101 | u8 expected_type = reply->bEventType; | 98 | u8 expected_type = reply->bEventType; |
102 | u8 context; | 99 | u8 context; |
103 | 100 | ||
104 | d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); | ||
105 | init_completion(&i1480->evt_complete); | 101 | init_completion(&i1480->evt_complete); |
106 | i1480->evt_result = -EINPROGRESS; | 102 | i1480->evt_result = -EINPROGRESS; |
107 | do { | 103 | do { |
@@ -150,8 +146,6 @@ ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, | |||
150 | result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, | 146 | result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, |
151 | expected_type, expected_event); | 147 | expected_type, expected_event); |
152 | error: | 148 | error: |
153 | d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n", | ||
154 | i1480, cmd_name, cmd_size, result); | ||
155 | return result; | 149 | return result; |
156 | } | 150 | } |
157 | EXPORT_SYMBOL_GPL(i1480_cmd); | 151 | EXPORT_SYMBOL_GPL(i1480_cmd); |
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c index 2e4d8f07c165..694d0daf88ab 100644 --- a/drivers/uwb/i1480/dfu/mac.c +++ b/drivers/uwb/i1480/dfu/mac.c | |||
@@ -31,9 +31,6 @@ | |||
31 | #include <linux/uwb.h> | 31 | #include <linux/uwb.h> |
32 | #include "i1480-dfu.h" | 32 | #include "i1480-dfu.h" |
33 | 33 | ||
34 | #define D_LOCAL 0 | ||
35 | #include <linux/uwb/debug.h> | ||
36 | |||
37 | /* | 34 | /* |
38 | * Descriptor for a continuous segment of MAC fw data | 35 | * Descriptor for a continuous segment of MAC fw data |
39 | */ | 36 | */ |
@@ -184,10 +181,6 @@ ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr) | |||
184 | } | 181 | } |
185 | if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { | 182 | if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { |
186 | u8 *buf = i1480->cmd_buf; | 183 | u8 *buf = i1480->cmd_buf; |
187 | d_printf(2, i1480->dev, | ||
188 | "original data @ %p + %u, %zu bytes\n", | ||
189 | bin, src_itr, result); | ||
190 | d_dump(4, i1480->dev, bin + src_itr, result); | ||
191 | for (cnt = 0; cnt < result; cnt++) | 184 | for (cnt = 0; cnt < result; cnt++) |
192 | if (bin[src_itr + cnt] != buf[cnt]) { | 185 | if (bin[src_itr + cnt] != buf[cnt]) { |
193 | dev_err(i1480->dev, "byte failed at " | 186 | dev_err(i1480->dev, "byte failed at " |
@@ -224,7 +217,6 @@ int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr, | |||
224 | struct fw_hdr *hdr_itr; | 217 | struct fw_hdr *hdr_itr; |
225 | int verif_retry_count; | 218 | int verif_retry_count; |
226 | 219 | ||
227 | d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr); | ||
228 | /* Now, header by header, push them to the hw */ | 220 | /* Now, header by header, push them to the hw */ |
229 | for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { | 221 | for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { |
230 | verif_retry_count = 0; | 222 | verif_retry_count = 0; |
@@ -264,7 +256,6 @@ retry: | |||
264 | break; | 256 | break; |
265 | } | 257 | } |
266 | } | 258 | } |
267 | d_fnend(3, dev, "(%zd)\n", result); | ||
268 | return result; | 259 | return result; |
269 | } | 260 | } |
270 | 261 | ||
@@ -337,11 +328,9 @@ int __mac_fw_upload(struct i1480 *i1480, const char *fw_name, | |||
337 | const struct firmware *fw; | 328 | const struct firmware *fw; |
338 | struct fw_hdr *fw_hdrs; | 329 | struct fw_hdr *fw_hdrs; |
339 | 330 | ||
340 | d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag); | ||
341 | result = request_firmware(&fw, fw_name, i1480->dev); | 331 | result = request_firmware(&fw, fw_name, i1480->dev); |
342 | if (result < 0) /* Up to caller to complain on -ENOENT */ | 332 | if (result < 0) /* Up to caller to complain on -ENOENT */ |
343 | goto out; | 333 | goto out; |
344 | d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name); | ||
345 | result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); | 334 | result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); |
346 | if (result < 0) { | 335 | if (result < 0) { |
347 | dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " | 336 | dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " |
@@ -363,8 +352,6 @@ out_hdrs_release: | |||
363 | out_release: | 352 | out_release: |
364 | release_firmware(fw); | 353 | release_firmware(fw); |
365 | out: | 354 | out: |
366 | d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag, | ||
367 | result); | ||
368 | return result; | 355 | return result; |
369 | } | 356 | } |
370 | 357 | ||
@@ -433,7 +420,6 @@ int i1480_fw_is_running_q(struct i1480 *i1480) | |||
433 | int result; | 420 | int result; |
434 | u32 *val = (u32 *) i1480->cmd_buf; | 421 | u32 *val = (u32 *) i1480->cmd_buf; |
435 | 422 | ||
436 | d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480); | ||
437 | for (cnt = 0; cnt < 10; cnt++) { | 423 | for (cnt = 0; cnt < 10; cnt++) { |
438 | msleep(100); | 424 | msleep(100); |
439 | result = i1480->read(i1480, 0x80080000, 4); | 425 | result = i1480->read(i1480, 0x80080000, 4); |
@@ -447,7 +433,6 @@ int i1480_fw_is_running_q(struct i1480 *i1480) | |||
447 | dev_err(i1480->dev, "Timed out waiting for fw to start\n"); | 433 | dev_err(i1480->dev, "Timed out waiting for fw to start\n"); |
448 | result = -ETIMEDOUT; | 434 | result = -ETIMEDOUT; |
449 | out: | 435 | out: |
450 | d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); | ||
451 | return result; | 436 | return result; |
452 | 437 | ||
453 | } | 438 | } |
@@ -467,7 +452,6 @@ int i1480_mac_fw_upload(struct i1480 *i1480) | |||
467 | int result = 0, deprecated_name = 0; | 452 | int result = 0, deprecated_name = 0; |
468 | struct i1480_rceb *rcebe = (void *) i1480->evt_buf; | 453 | struct i1480_rceb *rcebe = (void *) i1480->evt_buf; |
469 | 454 | ||
470 | d_fnstart(3, i1480->dev, "(%p)\n", i1480); | ||
471 | result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); | 455 | result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); |
472 | if (result == -ENOENT) { | 456 | if (result == -ENOENT) { |
473 | result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, | 457 | result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, |
@@ -501,7 +485,6 @@ int i1480_mac_fw_upload(struct i1480 *i1480) | |||
501 | dev_err(i1480->dev, "MAC fw '%s': initialization event returns " | 485 | dev_err(i1480->dev, "MAC fw '%s': initialization event returns " |
502 | "wrong size (%zu bytes vs %zu needed)\n", | 486 | "wrong size (%zu bytes vs %zu needed)\n", |
503 | i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); | 487 | i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); |
504 | dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32)); | ||
505 | goto error_size; | 488 | goto error_size; |
506 | } | 489 | } |
507 | result = -EIO; | 490 | result = -EIO; |
@@ -522,6 +505,5 @@ error_fw_not_running: | |||
522 | error_init_timeout: | 505 | error_init_timeout: |
523 | error_size: | 506 | error_size: |
524 | error_setup: | 507 | error_setup: |
525 | d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); | ||
526 | return result; | 508 | return result; |
527 | } | 509 | } |
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 98eeeff051aa..686795e97195 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c | |||
@@ -35,7 +35,6 @@ | |||
35 | * the functions are i1480_usb_NAME(). | 35 | * the functions are i1480_usb_NAME(). |
36 | */ | 36 | */ |
37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
38 | #include <linux/version.h> | ||
39 | #include <linux/usb.h> | 38 | #include <linux/usb.h> |
40 | #include <linux/interrupt.h> | 39 | #include <linux/interrupt.h> |
41 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
@@ -44,10 +43,6 @@ | |||
44 | #include <linux/usb/wusb-wa.h> | 43 | #include <linux/usb/wusb-wa.h> |
45 | #include "i1480-dfu.h" | 44 | #include "i1480-dfu.h" |
46 | 45 | ||
47 | #define D_LOCAL 0 | ||
48 | #include <linux/uwb/debug.h> | ||
49 | |||
50 | |||
51 | struct i1480_usb { | 46 | struct i1480_usb { |
52 | struct i1480 i1480; | 47 | struct i1480 i1480; |
53 | struct usb_device *usb_dev; | 48 | struct usb_device *usb_dev; |
@@ -118,8 +113,6 @@ int i1480_usb_write(struct i1480 *i1480, u32 memory_address, | |||
118 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | 113 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); |
119 | size_t buffer_size, itr = 0; | 114 | size_t buffer_size, itr = 0; |
120 | 115 | ||
121 | d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n", | ||
122 | i1480, memory_address, buffer, size); | ||
123 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ | 116 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ |
124 | while (size > 0) { | 117 | while (size > 0) { |
125 | buffer_size = size < i1480->buf_size ? size : i1480->buf_size; | 118 | buffer_size = size < i1480->buf_size ? size : i1480->buf_size; |
@@ -132,16 +125,10 @@ int i1480_usb_write(struct i1480 *i1480, u32 memory_address, | |||
132 | i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); | 125 | i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); |
133 | if (result < 0) | 126 | if (result < 0) |
134 | break; | 127 | break; |
135 | d_printf(3, i1480->dev, | ||
136 | "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n", | ||
137 | memory_address, result, buffer_size); | ||
138 | d_dump(4, i1480->dev, i1480->cmd_buf, result); | ||
139 | itr += result; | 128 | itr += result; |
140 | memory_address += result; | 129 | memory_address += result; |
141 | size -= result; | 130 | size -= result; |
142 | } | 131 | } |
143 | d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n", | ||
144 | i1480, memory_address, buffer, size, result); | ||
145 | return result; | 132 | return result; |
146 | } | 133 | } |
147 | 134 | ||
@@ -166,8 +153,6 @@ int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) | |||
166 | size_t itr, read_size = i1480->buf_size; | 153 | size_t itr, read_size = i1480->buf_size; |
167 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | 154 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); |
168 | 155 | ||
169 | d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n", | ||
170 | i1480, addr, size); | ||
171 | BUG_ON(size > i1480->buf_size); | 156 | BUG_ON(size > i1480->buf_size); |
172 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ | 157 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ |
173 | BUG_ON(read_size > 512); | 158 | BUG_ON(read_size > 512); |
@@ -201,10 +186,6 @@ int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) | |||
201 | } | 186 | } |
202 | result = bytes; | 187 | result = bytes; |
203 | out: | 188 | out: |
204 | d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n", | ||
205 | i1480, addr, size, result); | ||
206 | if (result > 0) | ||
207 | d_dump(4, i1480->dev, i1480->cmd_buf, result); | ||
208 | return result; | 189 | return result; |
209 | } | 190 | } |
210 | 191 | ||
@@ -260,7 +241,6 @@ int i1480_usb_wait_init_done(struct i1480 *i1480) | |||
260 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | 241 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); |
261 | struct usb_endpoint_descriptor *epd; | 242 | struct usb_endpoint_descriptor *epd; |
262 | 243 | ||
263 | d_fnstart(3, dev, "(%p)\n", i1480); | ||
264 | init_completion(&i1480->evt_complete); | 244 | init_completion(&i1480->evt_complete); |
265 | i1480->evt_result = -EINPROGRESS; | 245 | i1480->evt_result = -EINPROGRESS; |
266 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; | 246 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; |
@@ -282,14 +262,12 @@ int i1480_usb_wait_init_done(struct i1480 *i1480) | |||
282 | goto error_wait; | 262 | goto error_wait; |
283 | } | 263 | } |
284 | usb_kill_urb(i1480_usb->neep_urb); | 264 | usb_kill_urb(i1480_usb->neep_urb); |
285 | d_fnend(3, dev, "(%p) = 0\n", i1480); | ||
286 | return 0; | 265 | return 0; |
287 | 266 | ||
288 | error_wait: | 267 | error_wait: |
289 | usb_kill_urb(i1480_usb->neep_urb); | 268 | usb_kill_urb(i1480_usb->neep_urb); |
290 | error_submit: | 269 | error_submit: |
291 | i1480->evt_result = result; | 270 | i1480->evt_result = result; |
292 | d_fnend(3, dev, "(%p) = %d\n", i1480, result); | ||
293 | return result; | 271 | return result; |
294 | } | 272 | } |
295 | 273 | ||
@@ -320,7 +298,6 @@ int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) | |||
320 | struct uwb_rccb *cmd = i1480->cmd_buf; | 298 | struct uwb_rccb *cmd = i1480->cmd_buf; |
321 | u8 iface_no; | 299 | u8 iface_no; |
322 | 300 | ||
323 | d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); | ||
324 | /* Post a read on the notification & event endpoint */ | 301 | /* Post a read on the notification & event endpoint */ |
325 | iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; | 302 | iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; |
326 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; | 303 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; |
@@ -348,15 +325,11 @@ int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) | |||
348 | cmd_name, result); | 325 | cmd_name, result); |
349 | goto error_submit_ep0; | 326 | goto error_submit_ep0; |
350 | } | 327 | } |
351 | d_fnend(3, dev, "(%p, %s, %zu) = %d\n", | ||
352 | i1480, cmd_name, cmd_size, result); | ||
353 | return result; | 328 | return result; |
354 | 329 | ||
355 | error_submit_ep0: | 330 | error_submit_ep0: |
356 | usb_kill_urb(i1480_usb->neep_urb); | 331 | usb_kill_urb(i1480_usb->neep_urb); |
357 | error_submit_ep1: | 332 | error_submit_ep1: |
358 | d_fnend(3, dev, "(%p, %s, %zu) = %d\n", | ||
359 | i1480, cmd_name, cmd_size, result); | ||
360 | return result; | 333 | return result; |
361 | } | 334 | } |
362 | 335 | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c index 737d60cd5b73..049c05d4cc6a 100644 --- a/drivers/uwb/i1480/i1480u-wlp/lc.c +++ b/drivers/uwb/i1480/i1480u-wlp/lc.c | |||
@@ -55,10 +55,9 @@ | |||
55 | * is being removed. | 55 | * is being removed. |
56 | * i1480u_rm() | 56 | * i1480u_rm() |
57 | */ | 57 | */ |
58 | #include <linux/version.h> | ||
59 | #include <linux/if_arp.h> | 58 | #include <linux/if_arp.h> |
60 | #include <linux/etherdevice.h> | 59 | #include <linux/etherdevice.h> |
61 | #include <linux/uwb/debug.h> | 60 | |
62 | #include "i1480u-wlp.h" | 61 | #include "i1480u-wlp.h" |
63 | 62 | ||
64 | 63 | ||
@@ -207,7 +206,7 @@ int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface) | |||
207 | wlp->fill_device_info = i1480u_fill_device_info; | 206 | wlp->fill_device_info = i1480u_fill_device_info; |
208 | wlp->stop_queue = i1480u_stop_queue; | 207 | wlp->stop_queue = i1480u_stop_queue; |
209 | wlp->start_queue = i1480u_start_queue; | 208 | wlp->start_queue = i1480u_start_queue; |
210 | result = wlp_setup(wlp, rc); | 209 | result = wlp_setup(wlp, rc, net_dev); |
211 | if (result < 0) { | 210 | if (result < 0) { |
212 | dev_err(&iface->dev, "Cannot setup WLP\n"); | 211 | dev_err(&iface->dev, "Cannot setup WLP\n"); |
213 | goto error_wlp_setup; | 212 | goto error_wlp_setup; |
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c index 8802ac43d872..e3873ffb942c 100644 --- a/drivers/uwb/i1480/i1480u-wlp/netdev.c +++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | #include <linux/if_arp.h> | 42 | #include <linux/if_arp.h> |
43 | #include <linux/etherdevice.h> | 43 | #include <linux/etherdevice.h> |
44 | #include <linux/uwb/debug.h> | 44 | |
45 | #include "i1480u-wlp.h" | 45 | #include "i1480u-wlp.h" |
46 | 46 | ||
47 | struct i1480u_cmd_set_ip_mas { | 47 | struct i1480u_cmd_set_ip_mas { |
@@ -207,6 +207,11 @@ int i1480u_open(struct net_device *net_dev) | |||
207 | result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ | 207 | result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ |
208 | if (result < 0) | 208 | if (result < 0) |
209 | goto error_rx_setup; | 209 | goto error_rx_setup; |
210 | |||
211 | result = uwb_radio_start(&wlp->pal); | ||
212 | if (result < 0) | ||
213 | goto error_radio_start; | ||
214 | |||
210 | netif_wake_queue(net_dev); | 215 | netif_wake_queue(net_dev); |
211 | #ifdef i1480u_FLOW_CONTROL | 216 | #ifdef i1480u_FLOW_CONTROL |
212 | result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; | 217 | result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; |
@@ -215,25 +220,20 @@ int i1480u_open(struct net_device *net_dev) | |||
215 | goto error_notif_urb_submit; | 220 | goto error_notif_urb_submit; |
216 | } | 221 | } |
217 | #endif | 222 | #endif |
218 | i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb; | ||
219 | i1480u->uwb_notifs_handler.data = i1480u; | ||
220 | if (uwb_bg_joined(rc)) | ||
221 | netif_carrier_on(net_dev); | ||
222 | else | ||
223 | netif_carrier_off(net_dev); | ||
224 | uwb_notifs_register(rc, &i1480u->uwb_notifs_handler); | ||
225 | /* Interface is up with an address, now we can create WSS */ | 223 | /* Interface is up with an address, now we can create WSS */ |
226 | result = wlp_wss_setup(net_dev, &wlp->wss); | 224 | result = wlp_wss_setup(net_dev, &wlp->wss); |
227 | if (result < 0) { | 225 | if (result < 0) { |
228 | dev_err(dev, "Can't create WSS: %d. \n", result); | 226 | dev_err(dev, "Can't create WSS: %d. \n", result); |
229 | goto error_notif_deregister; | 227 | goto error_wss_setup; |
230 | } | 228 | } |
231 | return 0; | 229 | return 0; |
232 | error_notif_deregister: | 230 | error_wss_setup: |
233 | uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); | ||
234 | #ifdef i1480u_FLOW_CONTROL | 231 | #ifdef i1480u_FLOW_CONTROL |
232 | usb_kill_urb(i1480u->notif_urb); | ||
235 | error_notif_urb_submit: | 233 | error_notif_urb_submit: |
236 | #endif | 234 | #endif |
235 | uwb_radio_stop(&wlp->pal); | ||
236 | error_radio_start: | ||
237 | netif_stop_queue(net_dev); | 237 | netif_stop_queue(net_dev); |
238 | i1480u_rx_release(i1480u); | 238 | i1480u_rx_release(i1480u); |
239 | error_rx_setup: | 239 | error_rx_setup: |
@@ -248,16 +248,15 @@ int i1480u_stop(struct net_device *net_dev) | |||
248 | { | 248 | { |
249 | struct i1480u *i1480u = netdev_priv(net_dev); | 249 | struct i1480u *i1480u = netdev_priv(net_dev); |
250 | struct wlp *wlp = &i1480u->wlp; | 250 | struct wlp *wlp = &i1480u->wlp; |
251 | struct uwb_rc *rc = wlp->rc; | ||
252 | 251 | ||
253 | BUG_ON(wlp->rc == NULL); | 252 | BUG_ON(wlp->rc == NULL); |
254 | wlp_wss_remove(&wlp->wss); | 253 | wlp_wss_remove(&wlp->wss); |
255 | uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); | ||
256 | netif_carrier_off(net_dev); | 254 | netif_carrier_off(net_dev); |
257 | #ifdef i1480u_FLOW_CONTROL | 255 | #ifdef i1480u_FLOW_CONTROL |
258 | usb_kill_urb(i1480u->notif_urb); | 256 | usb_kill_urb(i1480u->notif_urb); |
259 | #endif | 257 | #endif |
260 | netif_stop_queue(net_dev); | 258 | netif_stop_queue(net_dev); |
259 | uwb_radio_stop(&wlp->pal); | ||
261 | i1480u_rx_release(i1480u); | 260 | i1480u_rx_release(i1480u); |
262 | i1480u_tx_release(i1480u); | 261 | i1480u_tx_release(i1480u); |
263 | return 0; | 262 | return 0; |
@@ -303,34 +302,6 @@ int i1480u_change_mtu(struct net_device *net_dev, int mtu) | |||
303 | return 0; | 302 | return 0; |
304 | } | 303 | } |
305 | 304 | ||
306 | |||
307 | /** | ||
308 | * Callback function to handle events from UWB | ||
309 | * When we see other devices we know the carrier is ok, | ||
310 | * if we are the only device in the beacon group we set the carrier | ||
311 | * state to off. | ||
312 | * */ | ||
313 | void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev, | ||
314 | enum uwb_notifs event) | ||
315 | { | ||
316 | struct i1480u *i1480u = data; | ||
317 | struct net_device *net_dev = i1480u->net_dev; | ||
318 | struct device *dev = &i1480u->usb_iface->dev; | ||
319 | switch (event) { | ||
320 | case UWB_NOTIF_BG_JOIN: | ||
321 | netif_carrier_on(net_dev); | ||
322 | dev_info(dev, "Link is up\n"); | ||
323 | break; | ||
324 | case UWB_NOTIF_BG_LEAVE: | ||
325 | netif_carrier_off(net_dev); | ||
326 | dev_info(dev, "Link is down\n"); | ||
327 | break; | ||
328 | default: | ||
329 | dev_err(dev, "don't know how to handle event %d from uwb\n", | ||
330 | event); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | /** | 305 | /** |
335 | * Stop the network queue | 306 | * Stop the network queue |
336 | * | 307 | * |
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c index 9fc035354a76..34f4cf9a7d34 100644 --- a/drivers/uwb/i1480/i1480u-wlp/rx.c +++ b/drivers/uwb/i1480/i1480u-wlp/rx.c | |||
@@ -68,11 +68,7 @@ | |||
68 | #include <linux/etherdevice.h> | 68 | #include <linux/etherdevice.h> |
69 | #include "i1480u-wlp.h" | 69 | #include "i1480u-wlp.h" |
70 | 70 | ||
71 | #define D_LOCAL 0 | 71 | /* |
72 | #include <linux/uwb/debug.h> | ||
73 | |||
74 | |||
75 | /** | ||
76 | * Setup the RX context | 72 | * Setup the RX context |
77 | * | 73 | * |
78 | * Each URB is provided with a transfer_buffer that is the data field | 74 | * Each URB is provided with a transfer_buffer that is the data field |
@@ -129,7 +125,7 @@ error: | |||
129 | } | 125 | } |
130 | 126 | ||
131 | 127 | ||
132 | /** Release resources associated to the rx context */ | 128 | /* Release resources associated to the rx context */ |
133 | void i1480u_rx_release(struct i1480u *i1480u) | 129 | void i1480u_rx_release(struct i1480u *i1480u) |
134 | { | 130 | { |
135 | int cnt; | 131 | int cnt; |
@@ -155,7 +151,7 @@ void i1480u_rx_unlink_urbs(struct i1480u *i1480u) | |||
155 | } | 151 | } |
156 | } | 152 | } |
157 | 153 | ||
158 | /** Fix an out-of-sequence packet */ | 154 | /* Fix an out-of-sequence packet */ |
159 | #define i1480u_fix(i1480u, msg...) \ | 155 | #define i1480u_fix(i1480u, msg...) \ |
160 | do { \ | 156 | do { \ |
161 | if (printk_ratelimit()) \ | 157 | if (printk_ratelimit()) \ |
@@ -166,7 +162,7 @@ do { \ | |||
166 | } while (0) | 162 | } while (0) |
167 | 163 | ||
168 | 164 | ||
169 | /** Drop an out-of-sequence packet */ | 165 | /* Drop an out-of-sequence packet */ |
170 | #define i1480u_drop(i1480u, msg...) \ | 166 | #define i1480u_drop(i1480u, msg...) \ |
171 | do { \ | 167 | do { \ |
172 | if (printk_ratelimit()) \ | 168 | if (printk_ratelimit()) \ |
@@ -177,7 +173,7 @@ do { \ | |||
177 | 173 | ||
178 | 174 | ||
179 | 175 | ||
180 | /** Finalizes setting up the SKB and delivers it | 176 | /* Finalizes setting up the SKB and delivers it |
181 | * | 177 | * |
182 | * We first pass the incoming frame to WLP substack for verification. It | 178 | * We first pass the incoming frame to WLP substack for verification. It |
183 | * may also be a WLP association frame in which case WLP will take over the | 179 | * may also be a WLP association frame in which case WLP will take over the |
@@ -192,18 +188,11 @@ void i1480u_skb_deliver(struct i1480u *i1480u) | |||
192 | struct net_device *net_dev = i1480u->net_dev; | 188 | struct net_device *net_dev = i1480u->net_dev; |
193 | struct device *dev = &i1480u->usb_iface->dev; | 189 | struct device *dev = &i1480u->usb_iface->dev; |
194 | 190 | ||
195 | d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n", | ||
196 | i1480u->rx_skb, i1480u->rx_skb->len); | ||
197 | d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len); | ||
198 | should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, | 191 | should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, |
199 | &i1480u->rx_srcaddr); | 192 | &i1480u->rx_srcaddr); |
200 | if (!should_parse) | 193 | if (!should_parse) |
201 | goto out; | 194 | goto out; |
202 | i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); | 195 | i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); |
203 | d_printf(5, dev, "RX delivered skb(%p), %u bytes\n", | ||
204 | i1480u->rx_skb, i1480u->rx_skb->len); | ||
205 | d_dump(7, dev, i1480u->rx_skb->data, | ||
206 | i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len); | ||
207 | i1480u->stats.rx_packets++; | 196 | i1480u->stats.rx_packets++; |
208 | i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; | 197 | i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; |
209 | net_dev->last_rx = jiffies; | 198 | net_dev->last_rx = jiffies; |
@@ -216,7 +205,7 @@ out: | |||
216 | } | 205 | } |
217 | 206 | ||
218 | 207 | ||
219 | /** | 208 | /* |
220 | * Process a buffer of data received from the USB RX endpoint | 209 | * Process a buffer of data received from the USB RX endpoint |
221 | * | 210 | * |
222 | * First fragment arrives with next or last fragment. All other fragments | 211 | * First fragment arrives with next or last fragment. All other fragments |
@@ -404,7 +393,7 @@ out: | |||
404 | } | 393 | } |
405 | 394 | ||
406 | 395 | ||
407 | /** | 396 | /* |
408 | * Called when an RX URB has finished receiving or has found some kind | 397 | * Called when an RX URB has finished receiving or has found some kind |
409 | * of error condition. | 398 | * of error condition. |
410 | * | 399 | * |
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c index a1d8ca6ac935..4ffaf546cc6c 100644 --- a/drivers/uwb/i1480/i1480u-wlp/sysfs.c +++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c | |||
@@ -25,8 +25,8 @@ | |||
25 | 25 | ||
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
28 | #include <linux/uwb/debug.h> | ||
29 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | |||
30 | #include "i1480u-wlp.h" | 30 | #include "i1480u-wlp.h" |
31 | 31 | ||
32 | 32 | ||
@@ -226,7 +226,6 @@ ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight, | |||
226 | * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a | 226 | * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a |
227 | * class_device_attr_NAME or device_attr_NAME (for group registration). | 227 | * class_device_attr_NAME or device_attr_NAME (for group registration). |
228 | */ | 228 | */ |
229 | #include <linux/version.h> | ||
230 | 229 | ||
231 | #define i1480u_SHOW(name, fn, param) \ | 230 | #define i1480u_SHOW(name, fn, param) \ |
232 | static ssize_t i1480u_show_##name(struct device *dev, \ | 231 | static ssize_t i1480u_show_##name(struct device *dev, \ |
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c index 3426bfb68240..39032cc3503e 100644 --- a/drivers/uwb/i1480/i1480u-wlp/tx.c +++ b/drivers/uwb/i1480/i1480u-wlp/tx.c | |||
@@ -55,8 +55,6 @@ | |||
55 | */ | 55 | */ |
56 | 56 | ||
57 | #include "i1480u-wlp.h" | 57 | #include "i1480u-wlp.h" |
58 | #define D_LOCAL 5 | ||
59 | #include <linux/uwb/debug.h> | ||
60 | 58 | ||
61 | enum { | 59 | enum { |
62 | /* This is only for Next and Last TX packets */ | 60 | /* This is only for Next and Last TX packets */ |
@@ -64,7 +62,7 @@ enum { | |||
64 | - sizeof(struct untd_hdr_rst), | 62 | - sizeof(struct untd_hdr_rst), |
65 | }; | 63 | }; |
66 | 64 | ||
67 | /** Free resources allocated to a i1480u tx context. */ | 65 | /* Free resources allocated to a i1480u tx context. */ |
68 | static | 66 | static |
69 | void i1480u_tx_free(struct i1480u_tx *wtx) | 67 | void i1480u_tx_free(struct i1480u_tx *wtx) |
70 | { | 68 | { |
@@ -99,7 +97,7 @@ void i1480u_tx_unlink_urbs(struct i1480u *i1480u) | |||
99 | } | 97 | } |
100 | 98 | ||
101 | 99 | ||
102 | /** | 100 | /* |
103 | * Callback for a completed tx USB URB. | 101 | * Callback for a completed tx USB URB. |
104 | * | 102 | * |
105 | * TODO: | 103 | * TODO: |
@@ -149,8 +147,6 @@ void i1480u_tx_cb(struct urb *urb) | |||
149 | <= i1480u->tx_inflight.threshold | 147 | <= i1480u->tx_inflight.threshold |
150 | && netif_queue_stopped(net_dev) | 148 | && netif_queue_stopped(net_dev) |
151 | && i1480u->tx_inflight.threshold != 0) { | 149 | && i1480u->tx_inflight.threshold != 0) { |
152 | if (d_test(2) && printk_ratelimit()) | ||
153 | d_printf(2, dev, "Restart queue. \n"); | ||
154 | netif_start_queue(net_dev); | 150 | netif_start_queue(net_dev); |
155 | atomic_inc(&i1480u->tx_inflight.restart_count); | 151 | atomic_inc(&i1480u->tx_inflight.restart_count); |
156 | } | 152 | } |
@@ -158,7 +154,7 @@ void i1480u_tx_cb(struct urb *urb) | |||
158 | } | 154 | } |
159 | 155 | ||
160 | 156 | ||
161 | /** | 157 | /* |
162 | * Given a buffer that doesn't fit in a single fragment, create an | 158 | * Given a buffer that doesn't fit in a single fragment, create an |
163 | * scatter/gather structure for delivery to the USB pipe. | 159 | * scatter/gather structure for delivery to the USB pipe. |
164 | * | 160 | * |
@@ -253,15 +249,11 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
253 | /* Now do each remaining fragment */ | 249 | /* Now do each remaining fragment */ |
254 | result = -EINVAL; | 250 | result = -EINVAL; |
255 | while (pl_size_left > 0) { | 251 | while (pl_size_left > 0) { |
256 | d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n", | ||
257 | pl_size_left, buf_itr - wtx->buf); | ||
258 | if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf | 252 | if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf |
259 | > wtx->buf_size) { | 253 | > wtx->buf_size) { |
260 | printk(KERN_ERR "BUG: no space for header\n"); | 254 | printk(KERN_ERR "BUG: no space for header\n"); |
261 | goto error_bug; | 255 | goto error_bug; |
262 | } | 256 | } |
263 | d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n", | ||
264 | pl_size_left, buf_itr - wtx->buf); | ||
265 | untd_hdr_rst = buf_itr; | 257 | untd_hdr_rst = buf_itr; |
266 | buf_itr += sizeof(*untd_hdr_rst); | 258 | buf_itr += sizeof(*untd_hdr_rst); |
267 | if (pl_size_left > i1480u_MAX_PL_SIZE) { | 259 | if (pl_size_left > i1480u_MAX_PL_SIZE) { |
@@ -271,9 +263,6 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
271 | frg_pl_size = pl_size_left; | 263 | frg_pl_size = pl_size_left; |
272 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); | 264 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); |
273 | } | 265 | } |
274 | d_printf(5, NULL, | ||
275 | "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
276 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
277 | untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); | 266 | untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); |
278 | untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); | 267 | untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); |
279 | untd_hdr_rst->padding = 0; | 268 | untd_hdr_rst->padding = 0; |
@@ -286,9 +275,6 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
286 | buf_itr += frg_pl_size; | 275 | buf_itr += frg_pl_size; |
287 | pl_itr += frg_pl_size; | 276 | pl_itr += frg_pl_size; |
288 | pl_size_left -= frg_pl_size; | 277 | pl_size_left -= frg_pl_size; |
289 | d_printf(5, NULL, | ||
290 | "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
291 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
292 | } | 278 | } |
293 | dev_kfree_skb_irq(skb); | 279 | dev_kfree_skb_irq(skb); |
294 | return 0; | 280 | return 0; |
@@ -308,7 +294,7 @@ error_buf_alloc: | |||
308 | } | 294 | } |
309 | 295 | ||
310 | 296 | ||
311 | /** | 297 | /* |
312 | * Given a buffer that fits in a single fragment, fill out a @wtx | 298 | * Given a buffer that fits in a single fragment, fill out a @wtx |
313 | * struct for transmitting it down the USB pipe. | 299 | * struct for transmitting it down the USB pipe. |
314 | * | 300 | * |
@@ -346,7 +332,7 @@ int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
346 | } | 332 | } |
347 | 333 | ||
348 | 334 | ||
349 | /** | 335 | /* |
350 | * Given a skb to transmit, massage it to become palatable for the TX pipe | 336 | * Given a skb to transmit, massage it to become palatable for the TX pipe |
351 | * | 337 | * |
352 | * This will break the buffer in chunks smaller than | 338 | * This will break the buffer in chunks smaller than |
@@ -425,7 +411,7 @@ error_wtx_alloc: | |||
425 | return NULL; | 411 | return NULL; |
426 | } | 412 | } |
427 | 413 | ||
428 | /** | 414 | /* |
429 | * Actual fragmentation and transmission of frame | 415 | * Actual fragmentation and transmission of frame |
430 | * | 416 | * |
431 | * @wlp: WLP substack data structure | 417 | * @wlp: WLP substack data structure |
@@ -447,20 +433,12 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
447 | struct i1480u_tx *wtx; | 433 | struct i1480u_tx *wtx; |
448 | struct wlp_tx_hdr *wlp_tx_hdr; | 434 | struct wlp_tx_hdr *wlp_tx_hdr; |
449 | static unsigned char dev_bcast[2] = { 0xff, 0xff }; | 435 | static unsigned char dev_bcast[2] = { 0xff, 0xff }; |
450 | #if 0 | ||
451 | int lockup = 50; | ||
452 | #endif | ||
453 | 436 | ||
454 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
455 | net_dev); | ||
456 | BUG_ON(i1480u->wlp.rc == NULL); | 437 | BUG_ON(i1480u->wlp.rc == NULL); |
457 | if ((net_dev->flags & IFF_UP) == 0) | 438 | if ((net_dev->flags & IFF_UP) == 0) |
458 | goto out; | 439 | goto out; |
459 | result = -EBUSY; | 440 | result = -EBUSY; |
460 | if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { | 441 | if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { |
461 | if (d_test(2) && printk_ratelimit()) | ||
462 | d_printf(2, dev, "Max frames in flight " | ||
463 | "stopping queue.\n"); | ||
464 | netif_stop_queue(net_dev); | 442 | netif_stop_queue(net_dev); |
465 | goto error_max_inflight; | 443 | goto error_max_inflight; |
466 | } | 444 | } |
@@ -489,21 +467,6 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
489 | wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); | 467 | wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); |
490 | } | 468 | } |
491 | 469 | ||
492 | #if 0 | ||
493 | dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len); | ||
494 | dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
495 | #endif | ||
496 | #if 0 | ||
497 | /* simulates a device lockup after every lockup# packets */ | ||
498 | if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) { | ||
499 | /* Simulate a dropped transmit interrupt */ | ||
500 | net_dev->trans_start = jiffies; | ||
501 | netif_stop_queue(net_dev); | ||
502 | dev_err(dev, "Simulate lockup at %ld\n", jiffies); | ||
503 | return result; | ||
504 | } | ||
505 | #endif | ||
506 | |||
507 | result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ | 470 | result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ |
508 | if (result < 0) { | 471 | if (result < 0) { |
509 | dev_err(dev, "TX: cannot submit URB: %d\n", result); | 472 | dev_err(dev, "TX: cannot submit URB: %d\n", result); |
@@ -513,8 +476,6 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
513 | } | 476 | } |
514 | atomic_inc(&i1480u->tx_inflight.count); | 477 | atomic_inc(&i1480u->tx_inflight.count); |
515 | net_dev->trans_start = jiffies; | 478 | net_dev->trans_start = jiffies; |
516 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
517 | net_dev, result); | ||
518 | return result; | 479 | return result; |
519 | 480 | ||
520 | error_tx_urb_submit: | 481 | error_tx_urb_submit: |
@@ -522,13 +483,11 @@ error_tx_urb_submit: | |||
522 | error_wtx_alloc: | 483 | error_wtx_alloc: |
523 | error_max_inflight: | 484 | error_max_inflight: |
524 | out: | 485 | out: |
525 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
526 | net_dev, result); | ||
527 | return result; | 486 | return result; |
528 | } | 487 | } |
529 | 488 | ||
530 | 489 | ||
531 | /** | 490 | /* |
532 | * Transmit an skb Called when an skbuf has to be transmitted | 491 | * Transmit an skb Called when an skbuf has to be transmitted |
533 | * | 492 | * |
534 | * The skb is first passed to WLP substack to ensure this is a valid | 493 | * The skb is first passed to WLP substack to ensure this is a valid |
@@ -551,9 +510,6 @@ int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
551 | struct device *dev = &i1480u->usb_iface->dev; | 510 | struct device *dev = &i1480u->usb_iface->dev; |
552 | struct uwb_dev_addr dst; | 511 | struct uwb_dev_addr dst; |
553 | 512 | ||
554 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
555 | net_dev); | ||
556 | BUG_ON(i1480u->wlp.rc == NULL); | ||
557 | if ((net_dev->flags & IFF_UP) == 0) | 513 | if ((net_dev->flags & IFF_UP) == 0) |
558 | goto error; | 514 | goto error; |
559 | result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); | 515 | result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); |
@@ -562,31 +518,25 @@ int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
562 | "Dropping packet.\n", result); | 518 | "Dropping packet.\n", result); |
563 | goto error; | 519 | goto error; |
564 | } else if (result == 1) { | 520 | } else if (result == 1) { |
565 | d_printf(6, dev, "WLP will transmit frame. \n"); | ||
566 | /* trans_start time will be set when WLP actually transmits | 521 | /* trans_start time will be set when WLP actually transmits |
567 | * the frame */ | 522 | * the frame */ |
568 | goto out; | 523 | goto out; |
569 | } | 524 | } |
570 | d_printf(6, dev, "Transmitting frame. \n"); | ||
571 | result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); | 525 | result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); |
572 | if (result < 0) { | 526 | if (result < 0) { |
573 | dev_err(dev, "Frame TX failed (%d).\n", result); | 527 | dev_err(dev, "Frame TX failed (%d).\n", result); |
574 | goto error; | 528 | goto error; |
575 | } | 529 | } |
576 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
577 | net_dev, result); | ||
578 | return NETDEV_TX_OK; | 530 | return NETDEV_TX_OK; |
579 | error: | 531 | error: |
580 | dev_kfree_skb_any(skb); | 532 | dev_kfree_skb_any(skb); |
581 | i1480u->stats.tx_dropped++; | 533 | i1480u->stats.tx_dropped++; |
582 | out: | 534 | out: |
583 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
584 | net_dev, result); | ||
585 | return NETDEV_TX_OK; | 535 | return NETDEV_TX_OK; |
586 | } | 536 | } |
587 | 537 | ||
588 | 538 | ||
589 | /** | 539 | /* |
590 | * Called when a pkt transmission doesn't complete in a reasonable period | 540 | * Called when a pkt transmission doesn't complete in a reasonable period |
591 | * Device reset may sleep - do it outside of interrupt context (delayed) | 541 | * Device reset may sleep - do it outside of interrupt context (delayed) |
592 | */ | 542 | */ |
diff --git a/drivers/uwb/ie-rcv.c b/drivers/uwb/ie-rcv.c new file mode 100644 index 000000000000..917e6d78a798 --- /dev/null +++ b/drivers/uwb/ie-rcv.c | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * IE Received notification handling. | ||
4 | * | ||
5 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/bitmap.h> | ||
24 | #include "uwb-internal.h" | ||
25 | |||
26 | /* | ||
27 | * Process an incoming IE Received notification. | ||
28 | */ | ||
29 | int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt) | ||
30 | { | ||
31 | int result = -EINVAL; | ||
32 | struct device *dev = &evt->rc->uwb_dev.dev; | ||
33 | struct uwb_rc_evt_ie_rcv *iercv; | ||
34 | size_t iesize; | ||
35 | |||
36 | /* Is there enough data to decode it? */ | ||
37 | if (evt->notif.size < sizeof(*iercv)) { | ||
38 | dev_err(dev, "IE Received notification: Not enough data to " | ||
39 | "decode (%zu vs %zu bytes needed)\n", | ||
40 | evt->notif.size, sizeof(*iercv)); | ||
41 | goto error; | ||
42 | } | ||
43 | iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb); | ||
44 | iesize = le16_to_cpu(iercv->wIELength); | ||
45 | |||
46 | dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]); | ||
47 | |||
48 | if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) { | ||
49 | dev_warn(dev, "unhandled Relinquish Request IE\n"); | ||
50 | } | ||
51 | |||
52 | return 0; | ||
53 | error: | ||
54 | return result; | ||
55 | } | ||
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c index cf6f3d152b9d..ab976686175b 100644 --- a/drivers/uwb/ie.c +++ b/drivers/uwb/ie.c | |||
@@ -25,8 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include "uwb-internal.h" | 27 | #include "uwb-internal.h" |
28 | #define D_LOCAL 0 | ||
29 | #include <linux/uwb/debug.h> | ||
30 | 28 | ||
31 | /** | 29 | /** |
32 | * uwb_ie_next - get the next IE in a buffer | 30 | * uwb_ie_next - get the next IE in a buffer |
@@ -61,6 +59,42 @@ struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len) | |||
61 | EXPORT_SYMBOL_GPL(uwb_ie_next); | 59 | EXPORT_SYMBOL_GPL(uwb_ie_next); |
62 | 60 | ||
63 | /** | 61 | /** |
62 | * uwb_ie_dump_hex - print IEs to a character buffer | ||
63 | * @ies: the IEs to print. | ||
64 | * @len: length of all the IEs. | ||
65 | * @buf: the destination buffer. | ||
66 | * @size: size of @buf. | ||
67 | * | ||
68 | * Returns the number of characters written. | ||
69 | */ | ||
70 | int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, | ||
71 | char *buf, size_t size) | ||
72 | { | ||
73 | void *ptr; | ||
74 | const struct uwb_ie_hdr *ie; | ||
75 | int r = 0; | ||
76 | u8 *d; | ||
77 | |||
78 | ptr = (void *)ies; | ||
79 | for (;;) { | ||
80 | ie = uwb_ie_next(&ptr, &len); | ||
81 | if (!ie) | ||
82 | break; | ||
83 | |||
84 | r += scnprintf(buf + r, size - r, "%02x %02x", | ||
85 | (unsigned)ie->element_id, | ||
86 | (unsigned)ie->length); | ||
87 | d = (uint8_t *)ie + sizeof(struct uwb_ie_hdr); | ||
88 | while (d != ptr && r < size) | ||
89 | r += scnprintf(buf + r, size - r, " %02x", (unsigned)*d++); | ||
90 | if (r < size) | ||
91 | buf[r++] = '\n'; | ||
92 | }; | ||
93 | |||
94 | return r; | ||
95 | } | ||
96 | |||
97 | /** | ||
64 | * Get the IEs that a radio controller is sending in its beacon | 98 | * Get the IEs that a radio controller is sending in its beacon |
65 | * | 99 | * |
66 | * @uwb_rc: UWB Radio Controller | 100 | * @uwb_rc: UWB Radio Controller |
@@ -70,6 +104,7 @@ EXPORT_SYMBOL_GPL(uwb_ie_next); | |||
70 | * anything. Once done with the iedata buffer, call | 104 | * anything. Once done with the iedata buffer, call |
71 | * uwb_rc_ie_release(iedata). Don't call kfree on it. | 105 | * uwb_rc_ie_release(iedata). Don't call kfree on it. |
72 | */ | 106 | */ |
107 | static | ||
73 | ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) | 108 | ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) |
74 | { | 109 | { |
75 | ssize_t result; | 110 | ssize_t result; |
@@ -78,148 +113,35 @@ ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) | |||
78 | struct uwb_rceb *reply = NULL; | 113 | struct uwb_rceb *reply = NULL; |
79 | struct uwb_rc_evt_get_ie *get_ie; | 114 | struct uwb_rc_evt_get_ie *get_ie; |
80 | 115 | ||
81 | d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie); | ||
82 | result = -ENOMEM; | ||
83 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | 116 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
84 | if (cmd == NULL) | 117 | if (cmd == NULL) |
85 | goto error_kzalloc; | 118 | return -ENOMEM; |
119 | |||
86 | cmd->bCommandType = UWB_RC_CET_GENERAL; | 120 | cmd->bCommandType = UWB_RC_CET_GENERAL; |
87 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); | 121 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); |
88 | result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), | 122 | result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), |
89 | UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, | 123 | UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, |
90 | &reply); | 124 | &reply); |
125 | kfree(cmd); | ||
91 | if (result < 0) | 126 | if (result < 0) |
92 | goto error_cmd; | 127 | return result; |
128 | |||
93 | get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); | 129 | get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); |
94 | if (result < sizeof(*get_ie)) { | 130 | if (result < sizeof(*get_ie)) { |
95 | dev_err(dev, "not enough data returned for decoding GET IE " | 131 | dev_err(dev, "not enough data returned for decoding GET IE " |
96 | "(%zu bytes received vs %zu needed)\n", | 132 | "(%zu bytes received vs %zu needed)\n", |
97 | result, sizeof(*get_ie)); | 133 | result, sizeof(*get_ie)); |
98 | result = -EINVAL; | 134 | return -EINVAL; |
99 | } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { | 135 | } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { |
100 | dev_err(dev, "not enough data returned for decoding GET IE " | 136 | dev_err(dev, "not enough data returned for decoding GET IE " |
101 | "payload (%zu bytes received vs %zu needed)\n", result, | 137 | "payload (%zu bytes received vs %zu needed)\n", result, |
102 | sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); | 138 | sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); |
103 | result = -EINVAL; | ||
104 | } else | ||
105 | *pget_ie = get_ie; | ||
106 | error_cmd: | ||
107 | kfree(cmd); | ||
108 | error_kzalloc: | ||
109 | d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result); | ||
110 | return result; | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(uwb_rc_get_ie); | ||
113 | |||
114 | |||
115 | /* | ||
116 | * Given a pointer to an IE, print it in ASCII/hex followed by a new line | ||
117 | * | ||
118 | * @ie_hdr: pointer to the IE header. Length is in there, and it is | ||
119 | * guaranteed that the ie_hdr->length bytes following it are | ||
120 | * safely accesible. | ||
121 | * | ||
122 | * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx | ||
123 | */ | ||
124 | int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | ||
125 | size_t offset, void *_ctx) | ||
126 | { | ||
127 | struct uwb_buf_ctx *ctx = _ctx; | ||
128 | const u8 *pl = (void *)(ie_hdr + 1); | ||
129 | u8 pl_itr; | ||
130 | |||
131 | ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes, | ||
132 | "%02x %02x ", (unsigned) ie_hdr->element_id, | ||
133 | (unsigned) ie_hdr->length); | ||
134 | pl_itr = 0; | ||
135 | while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size) | ||
136 | ctx->bytes += scnprintf(ctx->buf + ctx->bytes, | ||
137 | ctx->size - ctx->bytes, | ||
138 | "%02x ", (unsigned) pl[pl_itr++]); | ||
139 | if (ctx->bytes < ctx->size) | ||
140 | ctx->buf[ctx->bytes++] = '\n'; | ||
141 | return 0; | ||
142 | } | ||
143 | EXPORT_SYMBOL_GPL(uwb_ie_dump_hex); | ||
144 | |||
145 | |||
146 | /** | ||
147 | * Verify that a pointer in a buffer points to valid IE | ||
148 | * | ||
149 | * @start: pointer to start of buffer in which IE appears | ||
150 | * @itr: pointer to IE inside buffer that will be verified | ||
151 | * @top: pointer to end of buffer | ||
152 | * | ||
153 | * @returns: 0 if IE is valid, <0 otherwise | ||
154 | * | ||
155 | * Verification involves checking that the buffer can contain a | ||
156 | * header and the amount of data reported in the IE header can be found in | ||
157 | * the buffer. | ||
158 | */ | ||
159 | static | ||
160 | int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start, | ||
161 | const void *itr, const void *top) | ||
162 | { | ||
163 | struct device *dev = &uwb_dev->dev; | ||
164 | const struct uwb_ie_hdr *ie_hdr; | ||
165 | |||
166 | if (top - itr < sizeof(*ie_hdr)) { | ||
167 | dev_err(dev, "Bad IE: no data to decode header " | ||
168 | "(%zu bytes left vs %zu needed) at offset %zu\n", | ||
169 | top - itr, sizeof(*ie_hdr), itr - start); | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | ie_hdr = itr; | ||
173 | itr += sizeof(*ie_hdr); | ||
174 | if (top - itr < ie_hdr->length) { | ||
175 | dev_err(dev, "Bad IE: not enough data for payload " | ||
176 | "(%zu bytes left vs %zu needed) at offset %zu\n", | ||
177 | top - itr, (size_t)ie_hdr->length, | ||
178 | (void *)ie_hdr - start); | ||
179 | return -EINVAL; | 139 | return -EINVAL; |
180 | } | 140 | } |
181 | return 0; | ||
182 | } | ||
183 | 141 | ||
184 | 142 | *pget_ie = get_ie; | |
185 | /** | ||
186 | * Walk a buffer filled with consecutive IE's a buffer | ||
187 | * | ||
188 | * @uwb_dev: UWB device this IEs belong to (for err messages mainly) | ||
189 | * | ||
190 | * @fn: function to call with each IE; if it returns 0, we keep | ||
191 | * traversing the buffer. If it returns !0, we'll stop and return | ||
192 | * that value. | ||
193 | * | ||
194 | * @data: pointer passed to @fn | ||
195 | * | ||
196 | * @buf: buffer where the consecutive IEs are located | ||
197 | * | ||
198 | * @size: size of @buf | ||
199 | * | ||
200 | * Each IE is checked for basic correctness (there is space left for | ||
201 | * the header and the payload). If that test is failed, we stop | ||
202 | * processing. For every good IE, @fn is called. | ||
203 | */ | ||
204 | ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, | ||
205 | const void *buf, size_t size) | ||
206 | { | ||
207 | ssize_t result = 0; | ||
208 | const struct uwb_ie_hdr *ie_hdr; | ||
209 | const void *itr = buf, *top = itr + size; | ||
210 | |||
211 | while (itr < top) { | ||
212 | if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0) | ||
213 | break; | ||
214 | ie_hdr = itr; | ||
215 | itr += sizeof(*ie_hdr) + ie_hdr->length; | ||
216 | result = fn(uwb_dev, ie_hdr, itr - buf, data); | ||
217 | if (result != 0) | ||
218 | break; | ||
219 | } | ||
220 | return result; | 143 | return result; |
221 | } | 144 | } |
222 | EXPORT_SYMBOL_GPL(uwb_ie_for_each); | ||
223 | 145 | ||
224 | 146 | ||
225 | /** | 147 | /** |
@@ -256,70 +178,6 @@ error_cmd: | |||
256 | return result; | 178 | return result; |
257 | } | 179 | } |
258 | 180 | ||
259 | /** | ||
260 | * Determine by IE id if IE is host settable | ||
261 | * WUSB 1.0 [8.6.2.8 Table 8.85] | ||
262 | * | ||
263 | * EXCEPTION: | ||
264 | * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE | ||
265 | * is required for the WLP substack to perform association with its WSS so | ||
266 | * we hope that the WUSB spec will be changed to reflect this. | ||
267 | */ | ||
268 | static | ||
269 | int uwb_rc_ie_is_host_settable(enum uwb_ie element_id) | ||
270 | { | ||
271 | if (element_id == UWB_PCA_AVAILABILITY || | ||
272 | element_id == UWB_BP_SWITCH_IE || | ||
273 | element_id == UWB_MAC_CAPABILITIES_IE || | ||
274 | element_id == UWB_PHY_CAPABILITIES_IE || | ||
275 | element_id == UWB_APP_SPEC_PROBE_IE || | ||
276 | element_id == UWB_IDENTIFICATION_IE || | ||
277 | element_id == UWB_MASTER_KEY_ID_IE || | ||
278 | element_id == UWB_IE_WLP || | ||
279 | element_id == UWB_APP_SPEC_IE) | ||
280 | return 1; | ||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | |||
285 | /** | ||
286 | * Extract Host Settable IEs from IE | ||
287 | * | ||
288 | * @ie_data: pointer to buffer containing all IEs | ||
289 | * @size: size of buffer | ||
290 | * | ||
291 | * @returns: length of buffer that only includes host settable IEs | ||
292 | * | ||
293 | * Given a buffer of IEs we move all Host Settable IEs to front of buffer | ||
294 | * by overwriting the IEs that are not Host Settable. | ||
295 | * Buffer length is adjusted accordingly. | ||
296 | */ | ||
297 | static | ||
298 | ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev, | ||
299 | void *ie_data, size_t size) | ||
300 | { | ||
301 | size_t new_len = size; | ||
302 | struct uwb_ie_hdr *ie_hdr; | ||
303 | size_t ie_length; | ||
304 | void *itr = ie_data, *top = itr + size; | ||
305 | |||
306 | while (itr < top) { | ||
307 | if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0) | ||
308 | break; | ||
309 | ie_hdr = itr; | ||
310 | ie_length = sizeof(*ie_hdr) + ie_hdr->length; | ||
311 | if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) { | ||
312 | itr += ie_length; | ||
313 | } else { | ||
314 | memmove(itr, itr + ie_length, top - (itr + ie_length)); | ||
315 | new_len -= ie_length; | ||
316 | top -= ie_length; | ||
317 | } | ||
318 | } | ||
319 | return new_len; | ||
320 | } | ||
321 | |||
322 | |||
323 | /* Cleanup the whole IE management subsystem */ | 181 | /* Cleanup the whole IE management subsystem */ |
324 | void uwb_rc_ie_init(struct uwb_rc *uwb_rc) | 182 | void uwb_rc_ie_init(struct uwb_rc *uwb_rc) |
325 | { | 183 | { |
@@ -328,49 +186,34 @@ void uwb_rc_ie_init(struct uwb_rc *uwb_rc) | |||
328 | 186 | ||
329 | 187 | ||
330 | /** | 188 | /** |
331 | * Set up cache for host settable IEs currently being transmitted | 189 | * uwb_rc_ie_setup - setup a radio controller's IE manager |
190 | * @uwb_rc: the radio controller. | ||
332 | * | 191 | * |
333 | * First we just call GET-IE to get the current IEs being transmitted | 192 | * The current set of IEs are obtained from the hardware with a GET-IE |
334 | * (or we workaround and pretend we did) and (because the format is | 193 | * command (since the radio controller is not yet beaconing this will |
335 | * the same) reuse that as the IE cache (with the command prefix, as | 194 | * be just the hardware's MAC and PHY Capability IEs). |
336 | * explained in 'struct uwb_rc'). | ||
337 | * | 195 | * |
338 | * @returns: size of cache created | 196 | * Returns 0 on success; -ve on an error. |
339 | */ | 197 | */ |
340 | ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc) | 198 | int uwb_rc_ie_setup(struct uwb_rc *uwb_rc) |
341 | { | 199 | { |
342 | struct device *dev = &uwb_rc->uwb_dev.dev; | 200 | struct uwb_rc_evt_get_ie *ie_info = NULL; |
343 | ssize_t result; | 201 | int capacity; |
344 | size_t capacity; | 202 | |
345 | struct uwb_rc_evt_get_ie *ie_info; | 203 | capacity = uwb_rc_get_ie(uwb_rc, &ie_info); |
204 | if (capacity < 0) | ||
205 | return capacity; | ||
346 | 206 | ||
347 | d_fnstart(3, dev, "(%p)\n", uwb_rc); | ||
348 | mutex_lock(&uwb_rc->ies_mutex); | 207 | mutex_lock(&uwb_rc->ies_mutex); |
349 | result = uwb_rc_get_ie(uwb_rc, &ie_info); | 208 | |
350 | if (result < 0) | 209 | uwb_rc->ies = (struct uwb_rc_cmd_set_ie *)ie_info; |
351 | goto error_get_ie; | ||
352 | capacity = result; | ||
353 | d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result, | ||
354 | (size_t)le16_to_cpu(ie_info->wIELength), ie_info); | ||
355 | |||
356 | /* Remove IEs that host should not set. */ | ||
357 | result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev, | ||
358 | ie_info->IEData, le16_to_cpu(ie_info->wIELength)); | ||
359 | if (result < 0) | ||
360 | goto error_parse; | ||
361 | d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result); | ||
362 | uwb_rc->ies = (void *) ie_info; | ||
363 | uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; | 210 | uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; |
364 | uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); | 211 | uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); |
365 | uwb_rc->ies_capacity = capacity; | 212 | uwb_rc->ies_capacity = capacity; |
366 | d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n", | 213 | |
367 | ie_info, result, capacity); | ||
368 | result = 0; | ||
369 | error_parse: | ||
370 | error_get_ie: | ||
371 | mutex_unlock(&uwb_rc->ies_mutex); | 214 | mutex_unlock(&uwb_rc->ies_mutex); |
372 | d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result); | 215 | |
373 | return result; | 216 | return 0; |
374 | } | 217 | } |
375 | 218 | ||
376 | 219 | ||
@@ -383,26 +226,47 @@ void uwb_rc_ie_release(struct uwb_rc *uwb_rc) | |||
383 | } | 226 | } |
384 | 227 | ||
385 | 228 | ||
386 | static | 229 | static int uwb_rc_ie_add_one(struct uwb_rc *rc, const struct uwb_ie_hdr *new_ie) |
387 | int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | ||
388 | size_t offset, void *_ctx) | ||
389 | { | 230 | { |
390 | size_t *acc_size = _ctx; | 231 | struct uwb_rc_cmd_set_ie *new_ies; |
391 | *acc_size += sizeof(*ie_hdr) + ie_hdr->length; | 232 | void *ptr, *prev_ie; |
392 | d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size); | 233 | struct uwb_ie_hdr *ie; |
234 | size_t length, new_ie_len, new_capacity, size, prev_size; | ||
235 | |||
236 | length = le16_to_cpu(rc->ies->wIELength); | ||
237 | new_ie_len = sizeof(struct uwb_ie_hdr) + new_ie->length; | ||
238 | new_capacity = sizeof(struct uwb_rc_cmd_set_ie) + length + new_ie_len; | ||
239 | |||
240 | if (new_capacity > rc->ies_capacity) { | ||
241 | new_ies = krealloc(rc->ies, new_capacity, GFP_KERNEL); | ||
242 | if (!new_ies) | ||
243 | return -ENOMEM; | ||
244 | rc->ies = new_ies; | ||
245 | } | ||
246 | |||
247 | ptr = rc->ies->IEData; | ||
248 | size = length; | ||
249 | for (;;) { | ||
250 | prev_ie = ptr; | ||
251 | prev_size = size; | ||
252 | ie = uwb_ie_next(&ptr, &size); | ||
253 | if (!ie || ie->element_id > new_ie->element_id) | ||
254 | break; | ||
255 | } | ||
256 | |||
257 | memmove(prev_ie + new_ie_len, prev_ie, prev_size); | ||
258 | memcpy(prev_ie, new_ie, new_ie_len); | ||
259 | rc->ies->wIELength = cpu_to_le16(length + new_ie_len); | ||
260 | |||
393 | return 0; | 261 | return 0; |
394 | } | 262 | } |
395 | 263 | ||
396 | |||
397 | /** | 264 | /** |
398 | * Add a new IE to IEs currently being transmitted by device | 265 | * uwb_rc_ie_add - add new IEs to the radio controller's beacon |
399 | * | 266 | * @uwb_rc: the radio controller. |
400 | * @ies: the buffer containing the new IE or IEs to be added to | 267 | * @ies: the buffer containing the new IE or IEs to be added to |
401 | * the device's beacon. The buffer will be verified for | 268 | * the device's beacon. |
402 | * consistence (meaning the headers should be right) and | 269 | * @size: length of all the IEs. |
403 | * consistent with the buffer size. | ||
404 | * @size: size of @ies (in bytes, total buffer size) | ||
405 | * @returns: 0 if ok, <0 errno code on error | ||
406 | * | 270 | * |
407 | * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB | 271 | * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB |
408 | * after the device sent the first beacon that includes the IEs specified | 272 | * after the device sent the first beacon that includes the IEs specified |
@@ -411,66 +275,40 @@ int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | |||
411 | * we start beaconing. | 275 | * we start beaconing. |
412 | * | 276 | * |
413 | * Setting an IE on the device will overwrite all current IEs in device. So | 277 | * Setting an IE on the device will overwrite all current IEs in device. So |
414 | * we take the current IEs being transmitted by the device, append the | 278 | * we take the current IEs being transmitted by the device, insert the |
415 | * new one, and call SET IE with all the IEs needed. | 279 | * new one, and call SET IE with all the IEs needed. |
416 | * | 280 | * |
417 | * The local IE cache will only be updated with the new IE if SET IE | 281 | * Returns 0 on success; or -ENOMEM. |
418 | * completed successfully. | ||
419 | */ | 282 | */ |
420 | int uwb_rc_ie_add(struct uwb_rc *uwb_rc, | 283 | int uwb_rc_ie_add(struct uwb_rc *uwb_rc, |
421 | const struct uwb_ie_hdr *ies, size_t size) | 284 | const struct uwb_ie_hdr *ies, size_t size) |
422 | { | 285 | { |
423 | int result = 0; | 286 | int result = 0; |
424 | struct device *dev = &uwb_rc->uwb_dev.dev; | 287 | void *ptr; |
425 | struct uwb_rc_cmd_set_ie *new_ies; | 288 | const struct uwb_ie_hdr *ie; |
426 | size_t ies_size, total_size, acc_size = 0; | 289 | |
427 | |||
428 | if (uwb_rc->ies == NULL) | ||
429 | return -ESHUTDOWN; | ||
430 | uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size); | ||
431 | if (acc_size != size) { | ||
432 | dev_err(dev, "BUG: bad IEs, misconstructed headers " | ||
433 | "[%zu bytes reported vs %zu calculated]\n", | ||
434 | size, acc_size); | ||
435 | WARN_ON(1); | ||
436 | return -EINVAL; | ||
437 | } | ||
438 | mutex_lock(&uwb_rc->ies_mutex); | 290 | mutex_lock(&uwb_rc->ies_mutex); |
439 | ies_size = le16_to_cpu(uwb_rc->ies->wIELength); | 291 | |
440 | total_size = sizeof(*uwb_rc->ies) + ies_size; | 292 | ptr = (void *)ies; |
441 | if (total_size + size > uwb_rc->ies_capacity) { | 293 | for (;;) { |
442 | d_printf(4, dev, "Reallocating IE cache from %p capacity %zu " | 294 | ie = uwb_ie_next(&ptr, &size); |
443 | "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity, | 295 | if (!ie) |
444 | total_size + size); | 296 | break; |
445 | new_ies = kzalloc(total_size + size, GFP_KERNEL); | 297 | |
446 | if (new_ies == NULL) { | 298 | result = uwb_rc_ie_add_one(uwb_rc, ie); |
447 | dev_err(dev, "No memory for adding new IE\n"); | 299 | if (result < 0) |
448 | result = -ENOMEM; | 300 | break; |
449 | goto error_alloc; | ||
450 | } | ||
451 | memcpy(new_ies, uwb_rc->ies, total_size); | ||
452 | uwb_rc->ies_capacity = total_size + size; | ||
453 | kfree(uwb_rc->ies); | ||
454 | uwb_rc->ies = new_ies; | ||
455 | d_printf(4, dev, "New IE cache at %p capacity %zu\n", | ||
456 | uwb_rc->ies, uwb_rc->ies_capacity); | ||
457 | } | 301 | } |
458 | memcpy((void *)uwb_rc->ies + total_size, ies, size); | 302 | if (result >= 0) { |
459 | uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size); | 303 | if (size == 0) { |
460 | if (uwb_rc->beaconing != -1) { | 304 | if (uwb_rc->beaconing != -1) |
461 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); | 305 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); |
462 | if (result < 0) { | ||
463 | dev_err(dev, "Cannot set new IE on device: %d\n", | ||
464 | result); | ||
465 | uwb_rc->ies->wIELength = cpu_to_le16(ies_size); | ||
466 | } else | 306 | } else |
467 | result = 0; | 307 | result = -EINVAL; |
468 | } | 308 | } |
469 | d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n", | 309 | |
470 | le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity, | ||
471 | uwb_rc->ies); | ||
472 | error_alloc: | ||
473 | mutex_unlock(&uwb_rc->ies_mutex); | 310 | mutex_unlock(&uwb_rc->ies_mutex); |
311 | |||
474 | return result; | 312 | return result; |
475 | } | 313 | } |
476 | EXPORT_SYMBOL_GPL(uwb_rc_ie_add); | 314 | EXPORT_SYMBOL_GPL(uwb_rc_ie_add); |
@@ -489,53 +327,52 @@ EXPORT_SYMBOL_GPL(uwb_rc_ie_add); | |||
489 | * beacon. We don't reallocate, we just mark the size smaller. | 327 | * beacon. We don't reallocate, we just mark the size smaller. |
490 | */ | 328 | */ |
491 | static | 329 | static |
492 | int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) | 330 | void uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) |
493 | { | 331 | { |
494 | struct uwb_ie_hdr *ie_hdr; | 332 | struct uwb_ie_hdr *ie; |
495 | size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength); | 333 | size_t len = le16_to_cpu(uwb_rc->ies->wIELength); |
496 | void *itr = uwb_rc->ies->IEData; | 334 | void *ptr; |
497 | void *top = itr + new_len; | 335 | size_t size; |
498 | 336 | ||
499 | while (itr < top) { | 337 | ptr = uwb_rc->ies->IEData; |
500 | ie_hdr = itr; | 338 | size = len; |
501 | if (ie_hdr->element_id != to_remove) { | 339 | for (;;) { |
502 | itr += sizeof(*ie_hdr) + ie_hdr->length; | 340 | ie = uwb_ie_next(&ptr, &size); |
503 | } else { | 341 | if (!ie) |
504 | int ie_length; | 342 | break; |
505 | ie_length = sizeof(*ie_hdr) + ie_hdr->length; | 343 | if (ie->element_id == to_remove) { |
506 | if (top - itr != ie_length) | 344 | len -= sizeof(struct uwb_ie_hdr) + ie->length; |
507 | memmove(itr, itr + ie_length, top - itr + ie_length); | 345 | memmove(ie, ptr, size); |
508 | top -= ie_length; | 346 | ptr = ie; |
509 | new_len -= ie_length; | ||
510 | } | 347 | } |
511 | } | 348 | } |
512 | uwb_rc->ies->wIELength = cpu_to_le16(new_len); | 349 | uwb_rc->ies->wIELength = cpu_to_le16(len); |
513 | return 0; | ||
514 | } | 350 | } |
515 | 351 | ||
516 | 352 | ||
517 | /** | 353 | /** |
518 | * Remove an IE currently being transmitted by device | 354 | * uwb_rc_ie_rm - remove an IE from the radio controller's beacon |
355 | * @uwb_rc: the radio controller. | ||
356 | * @element_id: the element ID of the IE to remove. | ||
519 | * | 357 | * |
520 | * @element_id: id of IE to be removed from device's beacon | 358 | * Only IEs previously added with uwb_rc_ie_add() may be removed. |
359 | * | ||
360 | * Returns 0 on success; or -ve the SET-IE command to the radio | ||
361 | * controller failed. | ||
521 | */ | 362 | */ |
522 | int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) | 363 | int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) |
523 | { | 364 | { |
524 | struct device *dev = &uwb_rc->uwb_dev.dev; | 365 | int result = 0; |
525 | int result; | ||
526 | 366 | ||
527 | if (uwb_rc->ies == NULL) | ||
528 | return -ESHUTDOWN; | ||
529 | mutex_lock(&uwb_rc->ies_mutex); | 367 | mutex_lock(&uwb_rc->ies_mutex); |
530 | result = uwb_rc_ie_cache_rm(uwb_rc, element_id); | 368 | |
531 | if (result < 0) | 369 | uwb_rc_ie_cache_rm(uwb_rc, element_id); |
532 | dev_err(dev, "Cannot remove IE from cache.\n"); | 370 | |
533 | if (uwb_rc->beaconing != -1) { | 371 | if (uwb_rc->beaconing != -1) |
534 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); | 372 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); |
535 | if (result < 0) | 373 | |
536 | dev_err(dev, "Cannot set new IE on device.\n"); | ||
537 | } | ||
538 | mutex_unlock(&uwb_rc->ies_mutex); | 374 | mutex_unlock(&uwb_rc->ies_mutex); |
375 | |||
539 | return result; | 376 | return result; |
540 | } | 377 | } |
541 | EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); | 378 | EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); |
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c index 15f856c9689a..e9fe1bb7eb23 100644 --- a/drivers/uwb/lc-dev.c +++ b/drivers/uwb/lc-dev.c | |||
@@ -22,7 +22,6 @@ | |||
22 | * | 22 | * |
23 | * FIXME: docs | 23 | * FIXME: docs |
24 | */ | 24 | */ |
25 | |||
26 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
27 | #include <linux/device.h> | 26 | #include <linux/device.h> |
28 | #include <linux/err.h> | 27 | #include <linux/err.h> |
@@ -30,10 +29,6 @@ | |||
30 | #include <linux/random.h> | 29 | #include <linux/random.h> |
31 | #include "uwb-internal.h" | 30 | #include "uwb-internal.h" |
32 | 31 | ||
33 | #define D_LOCAL 1 | ||
34 | #include <linux/uwb/debug.h> | ||
35 | |||
36 | |||
37 | /* We initialize addresses to 0xff (invalid, as it is bcast) */ | 32 | /* We initialize addresses to 0xff (invalid, as it is bcast) */ |
38 | static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) | 33 | static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) |
39 | { | 34 | { |
@@ -104,12 +99,9 @@ static void uwb_dev_sys_release(struct device *dev) | |||
104 | { | 99 | { |
105 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | 100 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); |
106 | 101 | ||
107 | d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev); | ||
108 | uwb_bce_put(uwb_dev->bce); | 102 | uwb_bce_put(uwb_dev->bce); |
109 | d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev); | ||
110 | memset(uwb_dev, 0x69, sizeof(*uwb_dev)); | 103 | memset(uwb_dev, 0x69, sizeof(*uwb_dev)); |
111 | kfree(uwb_dev); | 104 | kfree(uwb_dev); |
112 | d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev); | ||
113 | } | 105 | } |
114 | 106 | ||
115 | /* | 107 | /* |
@@ -275,12 +267,8 @@ static struct attribute_group *groups[] = { | |||
275 | */ | 267 | */ |
276 | static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) | 268 | static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) |
277 | { | 269 | { |
278 | int result; | ||
279 | struct device *dev; | 270 | struct device *dev; |
280 | 271 | ||
281 | d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev); | ||
282 | BUG_ON(parent_dev == NULL); | ||
283 | |||
284 | dev = &uwb_dev->dev; | 272 | dev = &uwb_dev->dev; |
285 | /* Device sysfs files are only useful for neighbor devices not | 273 | /* Device sysfs files are only useful for neighbor devices not |
286 | local radio controllers. */ | 274 | local radio controllers. */ |
@@ -289,18 +277,14 @@ static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) | |||
289 | dev->parent = parent_dev; | 277 | dev->parent = parent_dev; |
290 | dev_set_drvdata(dev, uwb_dev); | 278 | dev_set_drvdata(dev, uwb_dev); |
291 | 279 | ||
292 | result = device_add(dev); | 280 | return device_add(dev); |
293 | d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result); | ||
294 | return result; | ||
295 | } | 281 | } |
296 | 282 | ||
297 | 283 | ||
298 | static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) | 284 | static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) |
299 | { | 285 | { |
300 | d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev); | ||
301 | dev_set_drvdata(&uwb_dev->dev, NULL); | 286 | dev_set_drvdata(&uwb_dev->dev, NULL); |
302 | device_del(&uwb_dev->dev); | 287 | device_del(&uwb_dev->dev); |
303 | d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev); | ||
304 | } | 288 | } |
305 | 289 | ||
306 | 290 | ||
@@ -384,7 +368,6 @@ int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) | |||
384 | struct device *dev = &uwb_dev->dev; | 368 | struct device *dev = &uwb_dev->dev; |
385 | char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; | 369 | char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; |
386 | 370 | ||
387 | d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc); | ||
388 | uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); | 371 | uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); |
389 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); | 372 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); |
390 | dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", | 373 | dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", |
@@ -392,8 +375,10 @@ int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) | |||
392 | rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", | 375 | rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", |
393 | rc ? dev_name(rc->uwb_dev.dev.parent) : ""); | 376 | rc ? dev_name(rc->uwb_dev.dev.parent) : ""); |
394 | uwb_dev_rm(uwb_dev); | 377 | uwb_dev_rm(uwb_dev); |
378 | list_del(&uwb_dev->bce->node); | ||
379 | uwb_bce_put(uwb_dev->bce); | ||
395 | uwb_dev_put(uwb_dev); /* for the creation in _onair() */ | 380 | uwb_dev_put(uwb_dev); /* for the creation in _onair() */ |
396 | d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc); | 381 | |
397 | return 0; | 382 | return 0; |
398 | } | 383 | } |
399 | 384 | ||
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c index ee5772f00d42..9cf21e6bb624 100644 --- a/drivers/uwb/lc-rc.c +++ b/drivers/uwb/lc-rc.c | |||
@@ -36,8 +36,6 @@ | |||
36 | #include <linux/etherdevice.h> | 36 | #include <linux/etherdevice.h> |
37 | #include <linux/usb.h> | 37 | #include <linux/usb.h> |
38 | 38 | ||
39 | #define D_LOCAL 1 | ||
40 | #include <linux/uwb/debug.h> | ||
41 | #include "uwb-internal.h" | 39 | #include "uwb-internal.h" |
42 | 40 | ||
43 | static int uwb_rc_index_match(struct device *dev, void *data) | 41 | static int uwb_rc_index_match(struct device *dev, void *data) |
@@ -81,9 +79,7 @@ static void uwb_rc_sys_release(struct device *dev) | |||
81 | struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); | 79 | struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); |
82 | struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); | 80 | struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); |
83 | 81 | ||
84 | uwb_rc_neh_destroy(rc); | ||
85 | uwb_rc_ie_release(rc); | 82 | uwb_rc_ie_release(rc); |
86 | d_printf(1, dev, "freed uwb_rc %p\n", rc); | ||
87 | kfree(rc); | 83 | kfree(rc); |
88 | } | 84 | } |
89 | 85 | ||
@@ -100,6 +96,8 @@ void uwb_rc_init(struct uwb_rc *rc) | |||
100 | rc->scan_type = UWB_SCAN_DISABLED; | 96 | rc->scan_type = UWB_SCAN_DISABLED; |
101 | INIT_LIST_HEAD(&rc->notifs_chain.list); | 97 | INIT_LIST_HEAD(&rc->notifs_chain.list); |
102 | mutex_init(&rc->notifs_chain.mutex); | 98 | mutex_init(&rc->notifs_chain.mutex); |
99 | INIT_LIST_HEAD(&rc->uwb_beca.list); | ||
100 | mutex_init(&rc->uwb_beca.mutex); | ||
103 | uwb_drp_avail_init(rc); | 101 | uwb_drp_avail_init(rc); |
104 | uwb_rc_ie_init(rc); | 102 | uwb_rc_ie_init(rc); |
105 | uwb_rsv_init(rc); | 103 | uwb_rsv_init(rc); |
@@ -191,9 +189,9 @@ static int uwb_rc_setup(struct uwb_rc *rc) | |||
191 | int result; | 189 | int result; |
192 | struct device *dev = &rc->uwb_dev.dev; | 190 | struct device *dev = &rc->uwb_dev.dev; |
193 | 191 | ||
194 | result = uwb_rc_reset(rc); | 192 | result = uwb_radio_setup(rc); |
195 | if (result < 0) { | 193 | if (result < 0) { |
196 | dev_err(dev, "cannot reset UWB radio: %d\n", result); | 194 | dev_err(dev, "cannot setup UWB radio: %d\n", result); |
197 | goto error; | 195 | goto error; |
198 | } | 196 | } |
199 | result = uwb_rc_mac_addr_setup(rc); | 197 | result = uwb_rc_mac_addr_setup(rc); |
@@ -250,6 +248,12 @@ int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv) | |||
250 | 248 | ||
251 | rc->priv = priv; | 249 | rc->priv = priv; |
252 | 250 | ||
251 | init_waitqueue_head(&rc->uwbd.wq); | ||
252 | INIT_LIST_HEAD(&rc->uwbd.event_list); | ||
253 | spin_lock_init(&rc->uwbd.event_list_lock); | ||
254 | |||
255 | uwbd_start(rc); | ||
256 | |||
253 | result = rc->start(rc); | 257 | result = rc->start(rc); |
254 | if (result < 0) | 258 | if (result < 0) |
255 | goto error_rc_start; | 259 | goto error_rc_start; |
@@ -284,7 +288,7 @@ error_sys_add: | |||
284 | error_dev_add: | 288 | error_dev_add: |
285 | error_rc_setup: | 289 | error_rc_setup: |
286 | rc->stop(rc); | 290 | rc->stop(rc); |
287 | uwbd_flush(rc); | 291 | uwbd_stop(rc); |
288 | error_rc_start: | 292 | error_rc_start: |
289 | return result; | 293 | return result; |
290 | } | 294 | } |
@@ -306,25 +310,24 @@ void uwb_rc_rm(struct uwb_rc *rc) | |||
306 | rc->ready = 0; | 310 | rc->ready = 0; |
307 | 311 | ||
308 | uwb_dbg_del_rc(rc); | 312 | uwb_dbg_del_rc(rc); |
309 | uwb_rsv_cleanup(rc); | 313 | uwb_rsv_remove_all(rc); |
310 | uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE); | 314 | uwb_radio_shutdown(rc); |
311 | if (rc->beaconing >= 0) | ||
312 | uwb_rc_beacon(rc, -1, 0); | ||
313 | if (rc->scan_type != UWB_SCAN_DISABLED) | ||
314 | uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0); | ||
315 | uwb_rc_reset(rc); | ||
316 | 315 | ||
317 | rc->stop(rc); | 316 | rc->stop(rc); |
318 | uwbd_flush(rc); | 317 | |
318 | uwbd_stop(rc); | ||
319 | uwb_rc_neh_destroy(rc); | ||
319 | 320 | ||
320 | uwb_dev_lock(&rc->uwb_dev); | 321 | uwb_dev_lock(&rc->uwb_dev); |
321 | rc->priv = NULL; | 322 | rc->priv = NULL; |
322 | rc->cmd = NULL; | 323 | rc->cmd = NULL; |
323 | uwb_dev_unlock(&rc->uwb_dev); | 324 | uwb_dev_unlock(&rc->uwb_dev); |
324 | mutex_lock(&uwb_beca.mutex); | 325 | mutex_lock(&rc->uwb_beca.mutex); |
325 | uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); | 326 | uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); |
326 | __uwb_rc_sys_rm(rc); | 327 | __uwb_rc_sys_rm(rc); |
327 | mutex_unlock(&uwb_beca.mutex); | 328 | mutex_unlock(&rc->uwb_beca.mutex); |
329 | uwb_rsv_cleanup(rc); | ||
330 | uwb_beca_release(rc); | ||
328 | uwb_dev_rm(&rc->uwb_dev); | 331 | uwb_dev_rm(&rc->uwb_dev); |
329 | } | 332 | } |
330 | EXPORT_SYMBOL_GPL(uwb_rc_rm); | 333 | EXPORT_SYMBOL_GPL(uwb_rc_rm); |
@@ -468,28 +471,3 @@ void uwb_rc_put(struct uwb_rc *rc) | |||
468 | __uwb_rc_put(rc); | 471 | __uwb_rc_put(rc); |
469 | } | 472 | } |
470 | EXPORT_SYMBOL_GPL(uwb_rc_put); | 473 | EXPORT_SYMBOL_GPL(uwb_rc_put); |
471 | |||
472 | /* | ||
473 | * | ||
474 | * | ||
475 | */ | ||
476 | ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size) | ||
477 | { | ||
478 | ssize_t result; | ||
479 | struct uwb_rc_evt_get_ie *ie_info; | ||
480 | struct uwb_buf_ctx ctx; | ||
481 | |||
482 | result = uwb_rc_get_ie(uwb_rc, &ie_info); | ||
483 | if (result < 0) | ||
484 | goto error_get_ie; | ||
485 | ctx.buf = buf; | ||
486 | ctx.size = size; | ||
487 | ctx.bytes = 0; | ||
488 | uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx, | ||
489 | ie_info->IEData, result - sizeof(*ie_info)); | ||
490 | result = ctx.bytes; | ||
491 | kfree(ie_info); | ||
492 | error_get_ie: | ||
493 | return result; | ||
494 | } | ||
495 | |||
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c index 9b4eb64327ac..0af8916d9bef 100644 --- a/drivers/uwb/neh.c +++ b/drivers/uwb/neh.c | |||
@@ -86,8 +86,6 @@ | |||
86 | #include <linux/err.h> | 86 | #include <linux/err.h> |
87 | 87 | ||
88 | #include "uwb-internal.h" | 88 | #include "uwb-internal.h" |
89 | #define D_LOCAL 0 | ||
90 | #include <linux/uwb/debug.h> | ||
91 | 89 | ||
92 | /* | 90 | /* |
93 | * UWB Radio Controller Notification/Event Handle | 91 | * UWB Radio Controller Notification/Event Handle |
@@ -254,7 +252,6 @@ error_kzalloc: | |||
254 | 252 | ||
255 | static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) | 253 | static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) |
256 | { | 254 | { |
257 | del_timer(&neh->timer); | ||
258 | __uwb_rc_ctx_put(rc, neh); | 255 | __uwb_rc_ctx_put(rc, neh); |
259 | list_del(&neh->list_node); | 256 | list_del(&neh->list_node); |
260 | } | 257 | } |
@@ -275,6 +272,7 @@ void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) | |||
275 | __uwb_rc_neh_rm(rc, neh); | 272 | __uwb_rc_neh_rm(rc, neh); |
276 | spin_unlock_irqrestore(&rc->neh_lock, flags); | 273 | spin_unlock_irqrestore(&rc->neh_lock, flags); |
277 | 274 | ||
275 | del_timer_sync(&neh->timer); | ||
278 | uwb_rc_neh_put(neh); | 276 | uwb_rc_neh_put(neh); |
279 | } | 277 | } |
280 | 278 | ||
@@ -349,7 +347,7 @@ struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc, | |||
349 | } | 347 | } |
350 | 348 | ||
351 | 349 | ||
352 | /** | 350 | /* |
353 | * Process notifications coming from the radio control interface | 351 | * Process notifications coming from the radio control interface |
354 | * | 352 | * |
355 | * @rc: UWB Radio Control Interface descriptor | 353 | * @rc: UWB Radio Control Interface descriptor |
@@ -401,23 +399,6 @@ void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size) | |||
401 | uwb_evt->notif.size = size; | 399 | uwb_evt->notif.size = size; |
402 | uwb_evt->notif.rceb = rceb; | 400 | uwb_evt->notif.rceb = rceb; |
403 | 401 | ||
404 | switch (le16_to_cpu(rceb->wEvent)) { | ||
405 | /* Trap some vendor specific events | ||
406 | * | ||
407 | * FIXME: move this to handling in ptc-est, where we | ||
408 | * register a NULL event handler for these two guys | ||
409 | * using the Intel IDs. | ||
410 | */ | ||
411 | case 0x0103: | ||
412 | dev_info(dev, "FIXME: DEVICE ADD\n"); | ||
413 | return; | ||
414 | case 0x0104: | ||
415 | dev_info(dev, "FIXME: DEVICE RM\n"); | ||
416 | return; | ||
417 | default: | ||
418 | break; | ||
419 | } | ||
420 | |||
421 | uwbd_event_queue(uwb_evt); | 402 | uwbd_event_queue(uwb_evt); |
422 | } | 403 | } |
423 | 404 | ||
@@ -438,9 +419,10 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size | |||
438 | rceb->bEventContext, size); | 419 | rceb->bEventContext, size); |
439 | } else { | 420 | } else { |
440 | neh = uwb_rc_neh_lookup(rc, rceb); | 421 | neh = uwb_rc_neh_lookup(rc, rceb); |
441 | if (neh) | 422 | if (neh) { |
423 | del_timer_sync(&neh->timer); | ||
442 | uwb_rc_neh_cb(neh, rceb, size); | 424 | uwb_rc_neh_cb(neh, rceb, size); |
443 | else | 425 | } else |
444 | dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", | 426 | dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", |
445 | rceb->bEventType, le16_to_cpu(rceb->wEvent), | 427 | rceb->bEventType, le16_to_cpu(rceb->wEvent), |
446 | rceb->bEventContext, size); | 428 | rceb->bEventContext, size); |
@@ -495,8 +477,6 @@ void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size) | |||
495 | size_t size, real_size, event_size; | 477 | size_t size, real_size, event_size; |
496 | int needtofree; | 478 | int needtofree; |
497 | 479 | ||
498 | d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size); | ||
499 | d_printf(2, dev, "groking event block: %zu bytes\n", buf_size); | ||
500 | itr = buf; | 480 | itr = buf; |
501 | size = buf_size; | 481 | size = buf_size; |
502 | while (size > 0) { | 482 | while (size > 0) { |
@@ -544,10 +524,7 @@ void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size) | |||
544 | 524 | ||
545 | itr += real_size; | 525 | itr += real_size; |
546 | size -= real_size; | 526 | size -= real_size; |
547 | d_printf(2, dev, "consumed %zd bytes, %zu left\n", | ||
548 | event_size, size); | ||
549 | } | 527 | } |
550 | d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size); | ||
551 | } | 528 | } |
552 | EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); | 529 | EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); |
553 | 530 | ||
@@ -562,16 +539,22 @@ EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); | |||
562 | */ | 539 | */ |
563 | void uwb_rc_neh_error(struct uwb_rc *rc, int error) | 540 | void uwb_rc_neh_error(struct uwb_rc *rc, int error) |
564 | { | 541 | { |
565 | struct uwb_rc_neh *neh, *next; | 542 | struct uwb_rc_neh *neh; |
566 | unsigned long flags; | 543 | unsigned long flags; |
567 | 544 | ||
568 | BUG_ON(error >= 0); | 545 | for (;;) { |
569 | spin_lock_irqsave(&rc->neh_lock, flags); | 546 | spin_lock_irqsave(&rc->neh_lock, flags); |
570 | list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { | 547 | if (list_empty(&rc->neh_list)) { |
548 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
549 | break; | ||
550 | } | ||
551 | neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); | ||
571 | __uwb_rc_neh_rm(rc, neh); | 552 | __uwb_rc_neh_rm(rc, neh); |
553 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
554 | |||
555 | del_timer_sync(&neh->timer); | ||
572 | uwb_rc_neh_cb(neh, NULL, error); | 556 | uwb_rc_neh_cb(neh, NULL, error); |
573 | } | 557 | } |
574 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
575 | } | 558 | } |
576 | EXPORT_SYMBOL_GPL(uwb_rc_neh_error); | 559 | EXPORT_SYMBOL_GPL(uwb_rc_neh_error); |
577 | 560 | ||
@@ -583,10 +566,14 @@ static void uwb_rc_neh_timer(unsigned long arg) | |||
583 | unsigned long flags; | 566 | unsigned long flags; |
584 | 567 | ||
585 | spin_lock_irqsave(&rc->neh_lock, flags); | 568 | spin_lock_irqsave(&rc->neh_lock, flags); |
586 | __uwb_rc_neh_rm(rc, neh); | 569 | if (neh->context) |
570 | __uwb_rc_neh_rm(rc, neh); | ||
571 | else | ||
572 | neh = NULL; | ||
587 | spin_unlock_irqrestore(&rc->neh_lock, flags); | 573 | spin_unlock_irqrestore(&rc->neh_lock, flags); |
588 | 574 | ||
589 | uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); | 575 | if (neh) |
576 | uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); | ||
590 | } | 577 | } |
591 | 578 | ||
592 | /** Initializes the @rc's neh subsystem | 579 | /** Initializes the @rc's neh subsystem |
@@ -605,12 +592,19 @@ void uwb_rc_neh_create(struct uwb_rc *rc) | |||
605 | void uwb_rc_neh_destroy(struct uwb_rc *rc) | 592 | void uwb_rc_neh_destroy(struct uwb_rc *rc) |
606 | { | 593 | { |
607 | unsigned long flags; | 594 | unsigned long flags; |
608 | struct uwb_rc_neh *neh, *next; | 595 | struct uwb_rc_neh *neh; |
609 | 596 | ||
610 | spin_lock_irqsave(&rc->neh_lock, flags); | 597 | for (;;) { |
611 | list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { | 598 | spin_lock_irqsave(&rc->neh_lock, flags); |
599 | if (list_empty(&rc->neh_list)) { | ||
600 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
601 | break; | ||
602 | } | ||
603 | neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); | ||
612 | __uwb_rc_neh_rm(rc, neh); | 604 | __uwb_rc_neh_rm(rc, neh); |
605 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
606 | |||
607 | del_timer_sync(&neh->timer); | ||
613 | uwb_rc_neh_put(neh); | 608 | uwb_rc_neh_put(neh); |
614 | } | 609 | } |
615 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
616 | } | 610 | } |
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c index 1afb38eacb9a..99a19c199095 100644 --- a/drivers/uwb/pal.c +++ b/drivers/uwb/pal.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/debugfs.h> | ||
19 | #include <linux/uwb.h> | 20 | #include <linux/uwb.h> |
20 | 21 | ||
21 | #include "uwb-internal.h" | 22 | #include "uwb-internal.h" |
@@ -32,13 +33,13 @@ EXPORT_SYMBOL_GPL(uwb_pal_init); | |||
32 | 33 | ||
33 | /** | 34 | /** |
34 | * uwb_pal_register - register a UWB PAL | 35 | * uwb_pal_register - register a UWB PAL |
35 | * @rc: the radio controller the PAL will be using | ||
36 | * @pal: the PAL | 36 | * @pal: the PAL |
37 | * | 37 | * |
38 | * The PAL must be initialized with uwb_pal_init(). | 38 | * The PAL must be initialized with uwb_pal_init(). |
39 | */ | 39 | */ |
40 | int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) | 40 | int uwb_pal_register(struct uwb_pal *pal) |
41 | { | 41 | { |
42 | struct uwb_rc *rc = pal->rc; | ||
42 | int ret; | 43 | int ret; |
43 | 44 | ||
44 | if (pal->device) { | 45 | if (pal->device) { |
@@ -54,9 +55,11 @@ int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) | |||
54 | } | 55 | } |
55 | } | 56 | } |
56 | 57 | ||
57 | spin_lock(&rc->pal_lock); | 58 | pal->debugfs_dir = uwb_dbg_create_pal_dir(pal); |
59 | |||
60 | mutex_lock(&rc->uwb_dev.mutex); | ||
58 | list_add(&pal->node, &rc->pals); | 61 | list_add(&pal->node, &rc->pals); |
59 | spin_unlock(&rc->pal_lock); | 62 | mutex_unlock(&rc->uwb_dev.mutex); |
60 | 63 | ||
61 | return 0; | 64 | return 0; |
62 | } | 65 | } |
@@ -64,14 +67,19 @@ EXPORT_SYMBOL_GPL(uwb_pal_register); | |||
64 | 67 | ||
65 | /** | 68 | /** |
66 | * uwb_pal_register - unregister a UWB PAL | 69 | * uwb_pal_register - unregister a UWB PAL |
67 | * @rc: the radio controller the PAL was using | ||
68 | * @pal: the PAL | 70 | * @pal: the PAL |
69 | */ | 71 | */ |
70 | void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal) | 72 | void uwb_pal_unregister(struct uwb_pal *pal) |
71 | { | 73 | { |
72 | spin_lock(&rc->pal_lock); | 74 | struct uwb_rc *rc = pal->rc; |
75 | |||
76 | uwb_radio_stop(pal); | ||
77 | |||
78 | mutex_lock(&rc->uwb_dev.mutex); | ||
73 | list_del(&pal->node); | 79 | list_del(&pal->node); |
74 | spin_unlock(&rc->pal_lock); | 80 | mutex_unlock(&rc->uwb_dev.mutex); |
81 | |||
82 | debugfs_remove(pal->debugfs_dir); | ||
75 | 83 | ||
76 | if (pal->device) { | 84 | if (pal->device) { |
77 | sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); | 85 | sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); |
@@ -86,6 +94,5 @@ EXPORT_SYMBOL_GPL(uwb_pal_unregister); | |||
86 | */ | 94 | */ |
87 | void uwb_rc_pal_init(struct uwb_rc *rc) | 95 | void uwb_rc_pal_init(struct uwb_rc *rc) |
88 | { | 96 | { |
89 | spin_lock_init(&rc->pal_lock); | ||
90 | INIT_LIST_HEAD(&rc->pals); | 97 | INIT_LIST_HEAD(&rc->pals); |
91 | } | 98 | } |
diff --git a/drivers/uwb/radio.c b/drivers/uwb/radio.c new file mode 100644 index 000000000000..f0d55495f5e9 --- /dev/null +++ b/drivers/uwb/radio.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * UWB radio (channel) management. | ||
3 | * | ||
4 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/uwb.h> | ||
20 | |||
21 | #include "uwb-internal.h" | ||
22 | |||
23 | |||
24 | static int uwb_radio_select_channel(struct uwb_rc *rc) | ||
25 | { | ||
26 | /* | ||
27 | * Default to channel 9 (BG1, TFC1) unless the user has | ||
28 | * selected a specific channel or there are no active PALs. | ||
29 | */ | ||
30 | if (rc->active_pals == 0) | ||
31 | return -1; | ||
32 | if (rc->beaconing_forced) | ||
33 | return rc->beaconing_forced; | ||
34 | return 9; | ||
35 | } | ||
36 | |||
37 | |||
38 | /* | ||
39 | * Notify all active PALs that the channel has changed. | ||
40 | */ | ||
41 | static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel) | ||
42 | { | ||
43 | struct uwb_pal *pal; | ||
44 | |||
45 | list_for_each_entry(pal, &rc->pals, node) { | ||
46 | if (pal->channel && channel != pal->channel) { | ||
47 | pal->channel = channel; | ||
48 | if (pal->channel_changed) | ||
49 | pal->channel_changed(pal, pal->channel); | ||
50 | } | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Change to a new channel and notify any active PALs of the new | ||
56 | * channel. | ||
57 | * | ||
58 | * When stopping the radio, PALs need to be notified first so they can | ||
59 | * terminate any active reservations. | ||
60 | */ | ||
61 | static int uwb_radio_change_channel(struct uwb_rc *rc, int channel) | ||
62 | { | ||
63 | int ret = 0; | ||
64 | |||
65 | if (channel == -1) | ||
66 | uwb_radio_channel_changed(rc, channel); | ||
67 | |||
68 | if (channel != rc->beaconing) { | ||
69 | if (rc->beaconing != -1 && channel != -1) { | ||
70 | /* | ||
71 | * FIXME: should signal the channel change | ||
72 | * with a Channel Change IE. | ||
73 | */ | ||
74 | ret = uwb_radio_change_channel(rc, -1); | ||
75 | if (ret < 0) | ||
76 | return ret; | ||
77 | } | ||
78 | ret = uwb_rc_beacon(rc, channel, 0); | ||
79 | } | ||
80 | |||
81 | if (channel != -1) | ||
82 | uwb_radio_channel_changed(rc, rc->beaconing); | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * uwb_radio_start - request that the radio be started | ||
89 | * @pal: the PAL making the request. | ||
90 | * | ||
91 | * If the radio is not already active, aa suitable channel is selected | ||
92 | * and beacons are started. | ||
93 | */ | ||
94 | int uwb_radio_start(struct uwb_pal *pal) | ||
95 | { | ||
96 | struct uwb_rc *rc = pal->rc; | ||
97 | int ret = 0; | ||
98 | |||
99 | mutex_lock(&rc->uwb_dev.mutex); | ||
100 | |||
101 | if (!pal->channel) { | ||
102 | pal->channel = -1; | ||
103 | rc->active_pals++; | ||
104 | ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); | ||
105 | } | ||
106 | |||
107 | mutex_unlock(&rc->uwb_dev.mutex); | ||
108 | return ret; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(uwb_radio_start); | ||
111 | |||
112 | /** | ||
113 | * uwb_radio_stop - request tha the radio be stopped. | ||
114 | * @pal: the PAL making the request. | ||
115 | * | ||
116 | * Stops the radio if no other PAL is making use of it. | ||
117 | */ | ||
118 | void uwb_radio_stop(struct uwb_pal *pal) | ||
119 | { | ||
120 | struct uwb_rc *rc = pal->rc; | ||
121 | |||
122 | mutex_lock(&rc->uwb_dev.mutex); | ||
123 | |||
124 | if (pal->channel) { | ||
125 | rc->active_pals--; | ||
126 | uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); | ||
127 | pal->channel = 0; | ||
128 | } | ||
129 | |||
130 | mutex_unlock(&rc->uwb_dev.mutex); | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(uwb_radio_stop); | ||
133 | |||
134 | /* | ||
135 | * uwb_radio_force_channel - force a specific channel to be used | ||
136 | * @rc: the radio controller. | ||
137 | * @channel: the channel to use; -1 to force the radio to stop; 0 to | ||
138 | * use the default channel selection algorithm. | ||
139 | */ | ||
140 | int uwb_radio_force_channel(struct uwb_rc *rc, int channel) | ||
141 | { | ||
142 | int ret = 0; | ||
143 | |||
144 | mutex_lock(&rc->uwb_dev.mutex); | ||
145 | |||
146 | rc->beaconing_forced = channel; | ||
147 | ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); | ||
148 | |||
149 | mutex_unlock(&rc->uwb_dev.mutex); | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * uwb_radio_setup - setup the radio manager | ||
155 | * @rc: the radio controller. | ||
156 | * | ||
157 | * The radio controller is reset to ensure it's in a known state | ||
158 | * before it's used. | ||
159 | */ | ||
160 | int uwb_radio_setup(struct uwb_rc *rc) | ||
161 | { | ||
162 | return uwb_rc_reset(rc); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * uwb_radio_reset_state - reset any radio manager state | ||
167 | * @rc: the radio controller. | ||
168 | * | ||
169 | * All internal radio manager state is reset to values corresponding | ||
170 | * to a reset radio controller. | ||
171 | */ | ||
172 | void uwb_radio_reset_state(struct uwb_rc *rc) | ||
173 | { | ||
174 | struct uwb_pal *pal; | ||
175 | |||
176 | mutex_lock(&rc->uwb_dev.mutex); | ||
177 | |||
178 | list_for_each_entry(pal, &rc->pals, node) { | ||
179 | if (pal->channel) { | ||
180 | pal->channel = -1; | ||
181 | if (pal->channel_changed) | ||
182 | pal->channel_changed(pal, -1); | ||
183 | } | ||
184 | } | ||
185 | |||
186 | rc->beaconing = -1; | ||
187 | rc->scanning = -1; | ||
188 | |||
189 | mutex_unlock(&rc->uwb_dev.mutex); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * uwb_radio_shutdown - shutdown the radio manager | ||
194 | * @rc: the radio controller. | ||
195 | * | ||
196 | * The radio controller is reset. | ||
197 | */ | ||
198 | void uwb_radio_shutdown(struct uwb_rc *rc) | ||
199 | { | ||
200 | uwb_radio_reset_state(rc); | ||
201 | uwb_rc_reset(rc); | ||
202 | } | ||
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c index 8de856fa7958..70f8050221ff 100644 --- a/drivers/uwb/reset.c +++ b/drivers/uwb/reset.c | |||
@@ -32,8 +32,6 @@ | |||
32 | #include <linux/err.h> | 32 | #include <linux/err.h> |
33 | 33 | ||
34 | #include "uwb-internal.h" | 34 | #include "uwb-internal.h" |
35 | #define D_LOCAL 0 | ||
36 | #include <linux/uwb/debug.h> | ||
37 | 35 | ||
38 | /** | 36 | /** |
39 | * Command result codes (WUSB1.0[T8-69]) | 37 | * Command result codes (WUSB1.0[T8-69]) |
@@ -323,17 +321,16 @@ int uwbd_msg_handle_reset(struct uwb_event *evt) | |||
323 | struct uwb_rc *rc = evt->rc; | 321 | struct uwb_rc *rc = evt->rc; |
324 | int ret; | 322 | int ret; |
325 | 323 | ||
326 | /* Need to prevent the RC hardware module going away while in | ||
327 | the rc->reset() call. */ | ||
328 | if (!try_module_get(rc->owner)) | ||
329 | return 0; | ||
330 | |||
331 | dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); | 324 | dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); |
332 | ret = rc->reset(rc); | 325 | ret = rc->reset(rc); |
333 | if (ret) | 326 | if (ret) { |
334 | dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); | 327 | dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); |
335 | 328 | goto error; | |
336 | module_put(rc->owner); | 329 | } |
330 | return 0; | ||
331 | error: | ||
332 | /* Nothing can be done except try the reset again. */ | ||
333 | uwb_rc_reset_all(rc); | ||
337 | return ret; | 334 | return ret; |
338 | } | 335 | } |
339 | 336 | ||
@@ -360,3 +357,33 @@ void uwb_rc_reset_all(struct uwb_rc *rc) | |||
360 | uwbd_event_queue(evt); | 357 | uwbd_event_queue(evt); |
361 | } | 358 | } |
362 | EXPORT_SYMBOL_GPL(uwb_rc_reset_all); | 359 | EXPORT_SYMBOL_GPL(uwb_rc_reset_all); |
360 | |||
361 | void uwb_rc_pre_reset(struct uwb_rc *rc) | ||
362 | { | ||
363 | rc->stop(rc); | ||
364 | uwbd_flush(rc); | ||
365 | |||
366 | uwb_radio_reset_state(rc); | ||
367 | uwb_rsv_remove_all(rc); | ||
368 | } | ||
369 | EXPORT_SYMBOL_GPL(uwb_rc_pre_reset); | ||
370 | |||
371 | void uwb_rc_post_reset(struct uwb_rc *rc) | ||
372 | { | ||
373 | int ret; | ||
374 | |||
375 | ret = rc->start(rc); | ||
376 | if (ret) | ||
377 | goto error; | ||
378 | ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr); | ||
379 | if (ret) | ||
380 | goto error; | ||
381 | ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr); | ||
382 | if (ret) | ||
383 | goto error; | ||
384 | return; | ||
385 | error: | ||
386 | /* Nothing can be done except try the reset again. */ | ||
387 | uwb_rc_reset_all(rc); | ||
388 | } | ||
389 | EXPORT_SYMBOL_GPL(uwb_rc_post_reset); | ||
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c index bae16204576d..ec6eecb32f30 100644 --- a/drivers/uwb/rsv.c +++ b/drivers/uwb/rsv.c | |||
@@ -15,23 +15,33 @@ | |||
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/uwb.h> | 19 | #include <linux/uwb.h> |
20 | #include <linux/random.h> | ||
21 | 21 | ||
22 | #include "uwb-internal.h" | 22 | #include "uwb-internal.h" |
23 | 23 | ||
24 | static void uwb_rsv_timer(unsigned long arg); | 24 | static void uwb_rsv_timer(unsigned long arg); |
25 | 25 | ||
26 | static const char *rsv_states[] = { | 26 | static const char *rsv_states[] = { |
27 | [UWB_RSV_STATE_NONE] = "none", | 27 | [UWB_RSV_STATE_NONE] = "none ", |
28 | [UWB_RSV_STATE_O_INITIATED] = "initiated", | 28 | [UWB_RSV_STATE_O_INITIATED] = "o initiated ", |
29 | [UWB_RSV_STATE_O_PENDING] = "pending", | 29 | [UWB_RSV_STATE_O_PENDING] = "o pending ", |
30 | [UWB_RSV_STATE_O_MODIFIED] = "modified", | 30 | [UWB_RSV_STATE_O_MODIFIED] = "o modified ", |
31 | [UWB_RSV_STATE_O_ESTABLISHED] = "established", | 31 | [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", |
32 | [UWB_RSV_STATE_T_ACCEPTED] = "accepted", | 32 | [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", |
33 | [UWB_RSV_STATE_T_DENIED] = "denied", | 33 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", |
34 | [UWB_RSV_STATE_T_PENDING] = "pending", | 34 | [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", |
35 | [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", | ||
36 | [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", | ||
37 | [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", | ||
38 | [UWB_RSV_STATE_T_PENDING] = "t pending ", | ||
39 | [UWB_RSV_STATE_T_DENIED] = "t denied ", | ||
40 | [UWB_RSV_STATE_T_RESIZED] = "t resized ", | ||
41 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", | ||
42 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", | ||
43 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", | ||
44 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", | ||
35 | }; | 45 | }; |
36 | 46 | ||
37 | static const char *rsv_types[] = { | 47 | static const char *rsv_types[] = { |
@@ -42,6 +52,31 @@ static const char *rsv_types[] = { | |||
42 | [UWB_DRP_TYPE_PCA] = "pca", | 52 | [UWB_DRP_TYPE_PCA] = "pca", |
43 | }; | 53 | }; |
44 | 54 | ||
55 | bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) | ||
56 | { | ||
57 | static const bool has_two_drp_ies[] = { | ||
58 | [UWB_RSV_STATE_O_INITIATED] = false, | ||
59 | [UWB_RSV_STATE_O_PENDING] = false, | ||
60 | [UWB_RSV_STATE_O_MODIFIED] = false, | ||
61 | [UWB_RSV_STATE_O_ESTABLISHED] = false, | ||
62 | [UWB_RSV_STATE_O_TO_BE_MOVED] = false, | ||
63 | [UWB_RSV_STATE_O_MOVE_COMBINING] = false, | ||
64 | [UWB_RSV_STATE_O_MOVE_REDUCING] = false, | ||
65 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, | ||
66 | [UWB_RSV_STATE_T_ACCEPTED] = false, | ||
67 | [UWB_RSV_STATE_T_CONFLICT] = false, | ||
68 | [UWB_RSV_STATE_T_PENDING] = false, | ||
69 | [UWB_RSV_STATE_T_DENIED] = false, | ||
70 | [UWB_RSV_STATE_T_RESIZED] = false, | ||
71 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, | ||
72 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, | ||
73 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, | ||
74 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, | ||
75 | }; | ||
76 | |||
77 | return has_two_drp_ies[rsv->state]; | ||
78 | } | ||
79 | |||
45 | /** | 80 | /** |
46 | * uwb_rsv_state_str - return a string for a reservation state | 81 | * uwb_rsv_state_str - return a string for a reservation state |
47 | * @state: the reservation state. | 82 | * @state: the reservation state. |
@@ -66,7 +101,7 @@ const char *uwb_rsv_type_str(enum uwb_drp_type type) | |||
66 | } | 101 | } |
67 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); | 102 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); |
68 | 103 | ||
69 | static void uwb_rsv_dump(struct uwb_rsv *rsv) | 104 | void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) |
70 | { | 105 | { |
71 | struct device *dev = &rsv->rc->uwb_dev.dev; | 106 | struct device *dev = &rsv->rc->uwb_dev.dev; |
72 | struct uwb_dev_addr devaddr; | 107 | struct uwb_dev_addr devaddr; |
@@ -82,6 +117,23 @@ static void uwb_rsv_dump(struct uwb_rsv *rsv) | |||
82 | dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); | 117 | dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); |
83 | } | 118 | } |
84 | 119 | ||
120 | static void uwb_rsv_release(struct kref *kref) | ||
121 | { | ||
122 | struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); | ||
123 | |||
124 | kfree(rsv); | ||
125 | } | ||
126 | |||
127 | void uwb_rsv_get(struct uwb_rsv *rsv) | ||
128 | { | ||
129 | kref_get(&rsv->kref); | ||
130 | } | ||
131 | |||
132 | void uwb_rsv_put(struct uwb_rsv *rsv) | ||
133 | { | ||
134 | kref_put(&rsv->kref, uwb_rsv_release); | ||
135 | } | ||
136 | |||
85 | /* | 137 | /* |
86 | * Get a free stream index for a reservation. | 138 | * Get a free stream index for a reservation. |
87 | * | 139 | * |
@@ -92,6 +144,7 @@ static void uwb_rsv_dump(struct uwb_rsv *rsv) | |||
92 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | 144 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) |
93 | { | 145 | { |
94 | struct uwb_rc *rc = rsv->rc; | 146 | struct uwb_rc *rc = rsv->rc; |
147 | struct device *dev = &rc->uwb_dev.dev; | ||
95 | unsigned long *streams_bm; | 148 | unsigned long *streams_bm; |
96 | int stream; | 149 | int stream; |
97 | 150 | ||
@@ -113,12 +166,15 @@ static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | |||
113 | rsv->stream = stream; | 166 | rsv->stream = stream; |
114 | set_bit(stream, streams_bm); | 167 | set_bit(stream, streams_bm); |
115 | 168 | ||
169 | dev_dbg(dev, "get stream %d\n", rsv->stream); | ||
170 | |||
116 | return 0; | 171 | return 0; |
117 | } | 172 | } |
118 | 173 | ||
119 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | 174 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) |
120 | { | 175 | { |
121 | struct uwb_rc *rc = rsv->rc; | 176 | struct uwb_rc *rc = rsv->rc; |
177 | struct device *dev = &rc->uwb_dev.dev; | ||
122 | unsigned long *streams_bm; | 178 | unsigned long *streams_bm; |
123 | 179 | ||
124 | switch (rsv->target.type) { | 180 | switch (rsv->target.type) { |
@@ -133,86 +189,52 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | |||
133 | } | 189 | } |
134 | 190 | ||
135 | clear_bit(rsv->stream, streams_bm); | 191 | clear_bit(rsv->stream, streams_bm); |
192 | |||
193 | dev_dbg(dev, "put stream %d\n", rsv->stream); | ||
136 | } | 194 | } |
137 | 195 | ||
138 | /* | 196 | void uwb_rsv_backoff_win_timer(unsigned long arg) |
139 | * Generate a MAS allocation with a single row component. | ||
140 | */ | ||
141 | static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, | ||
142 | int first_mas, int mas_per_zone, | ||
143 | int zs, int ze) | ||
144 | { | 197 | { |
145 | struct uwb_mas_bm col; | 198 | struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; |
146 | int z; | 199 | struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); |
147 | 200 | struct device *dev = &rc->uwb_dev.dev; | |
148 | bitmap_zero(mas->bm, UWB_NUM_MAS); | 201 | |
149 | bitmap_zero(col.bm, UWB_NUM_MAS); | 202 | bow->can_reserve_extra_mases = true; |
150 | bitmap_fill(col.bm, mas_per_zone); | 203 | if (bow->total_expired <= 4) { |
151 | bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 204 | bow->total_expired++; |
152 | 205 | } else { | |
153 | for (z = zs; z <= ze; z++) { | 206 | /* after 4 backoff window has expired we can exit from |
154 | bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); | 207 | * the backoff procedure */ |
155 | bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 208 | bow->total_expired = 0; |
209 | bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; | ||
156 | } | 210 | } |
211 | dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); | ||
212 | |||
213 | /* try to relocate all the "to be moved" relocations */ | ||
214 | uwb_rsv_handle_drp_avail_change(rc); | ||
157 | } | 215 | } |
158 | 216 | ||
159 | /* | 217 | void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) |
160 | * Allocate some MAS for this reservation based on current local | ||
161 | * availability, the reservation parameters (max_mas, min_mas, | ||
162 | * sparsity), and the WiMedia rules for MAS allocations. | ||
163 | * | ||
164 | * Returns -EBUSY is insufficient free MAS are available. | ||
165 | * | ||
166 | * FIXME: to simplify this, only safe reservations with a single row | ||
167 | * component in zones 1 to 15 are tried (zone 0 is skipped to avoid | ||
168 | * problems with the MAS reserved for the BP). | ||
169 | * | ||
170 | * [ECMA-368] section B.2. | ||
171 | */ | ||
172 | static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) | ||
173 | { | 218 | { |
174 | static const int safe_mas_in_row[UWB_NUM_ZONES] = { | 219 | struct uwb_drp_backoff_win *bow = &rc->bow; |
175 | 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, | 220 | struct device *dev = &rc->uwb_dev.dev; |
176 | }; | 221 | unsigned timeout_us; |
177 | int n, r; | ||
178 | struct uwb_mas_bm mas; | ||
179 | bool found = false; | ||
180 | 222 | ||
181 | /* | 223 | dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); |
182 | * Search all valid safe allocations until either: too few MAS | ||
183 | * are available; or the smallest allocation with sufficient | ||
184 | * MAS is found. | ||
185 | * | ||
186 | * The top of the zones are preferred, so space for larger | ||
187 | * allocations is available in the bottom of the zone (e.g., a | ||
188 | * 15 MAS allocation should start in row 14 leaving space for | ||
189 | * a 120 MAS allocation at row 0). | ||
190 | */ | ||
191 | for (n = safe_mas_in_row[0]; n >= 1; n--) { | ||
192 | int num_mas; | ||
193 | 224 | ||
194 | num_mas = n * (UWB_NUM_ZONES - 1); | 225 | bow->can_reserve_extra_mases = false; |
195 | if (num_mas < rsv->min_mas) | ||
196 | break; | ||
197 | if (found && num_mas < rsv->max_mas) | ||
198 | break; | ||
199 | 226 | ||
200 | for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { | 227 | if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) |
201 | if (safe_mas_in_row[r] < n) | 228 | return; |
202 | continue; | ||
203 | uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); | ||
204 | if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { | ||
205 | found = true; | ||
206 | break; | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | 229 | ||
211 | if (!found) | 230 | bow->window <<= 1; |
212 | return -EBUSY; | 231 | bow->n = random32() & (bow->window - 1); |
232 | dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); | ||
213 | 233 | ||
214 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | 234 | /* reset the timer associated variables */ |
215 | return 0; | 235 | timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; |
236 | bow->total_expired = 0; | ||
237 | mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
216 | } | 238 | } |
217 | 239 | ||
218 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | 240 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) |
@@ -225,13 +247,16 @@ static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | |||
225 | * received. | 247 | * received. |
226 | */ | 248 | */ |
227 | if (rsv->is_multicast) { | 249 | if (rsv->is_multicast) { |
228 | if (rsv->state == UWB_RSV_STATE_O_INITIATED) | 250 | if (rsv->state == UWB_RSV_STATE_O_INITIATED |
251 | || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING | ||
252 | || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING | ||
253 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) | ||
229 | sframes = 1; | 254 | sframes = 1; |
230 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) | 255 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) |
231 | sframes = 0; | 256 | sframes = 0; |
257 | |||
232 | } | 258 | } |
233 | 259 | ||
234 | rsv->expired = false; | ||
235 | if (sframes > 0) { | 260 | if (sframes > 0) { |
236 | /* | 261 | /* |
237 | * Add an additional 2 superframes to account for the | 262 | * Add an additional 2 superframes to account for the |
@@ -253,7 +278,7 @@ static void uwb_rsv_state_update(struct uwb_rsv *rsv, | |||
253 | rsv->state = new_state; | 278 | rsv->state = new_state; |
254 | rsv->ie_valid = false; | 279 | rsv->ie_valid = false; |
255 | 280 | ||
256 | uwb_rsv_dump(rsv); | 281 | uwb_rsv_dump("SU", rsv); |
257 | 282 | ||
258 | uwb_rsv_stroke_timer(rsv); | 283 | uwb_rsv_stroke_timer(rsv); |
259 | uwb_rsv_sched_update(rsv->rc); | 284 | uwb_rsv_sched_update(rsv->rc); |
@@ -267,10 +292,17 @@ static void uwb_rsv_callback(struct uwb_rsv *rsv) | |||
267 | 292 | ||
268 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | 293 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) |
269 | { | 294 | { |
295 | struct uwb_rsv_move *mv = &rsv->mv; | ||
296 | |||
270 | if (rsv->state == new_state) { | 297 | if (rsv->state == new_state) { |
271 | switch (rsv->state) { | 298 | switch (rsv->state) { |
272 | case UWB_RSV_STATE_O_ESTABLISHED: | 299 | case UWB_RSV_STATE_O_ESTABLISHED: |
300 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
301 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
302 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
273 | case UWB_RSV_STATE_T_ACCEPTED: | 303 | case UWB_RSV_STATE_T_ACCEPTED: |
304 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
305 | case UWB_RSV_STATE_T_RESIZED: | ||
274 | case UWB_RSV_STATE_NONE: | 306 | case UWB_RSV_STATE_NONE: |
275 | uwb_rsv_stroke_timer(rsv); | 307 | uwb_rsv_stroke_timer(rsv); |
276 | break; | 308 | break; |
@@ -282,10 +314,10 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
282 | return; | 314 | return; |
283 | } | 315 | } |
284 | 316 | ||
317 | uwb_rsv_dump("SC", rsv); | ||
318 | |||
285 | switch (new_state) { | 319 | switch (new_state) { |
286 | case UWB_RSV_STATE_NONE: | 320 | case UWB_RSV_STATE_NONE: |
287 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
288 | uwb_rsv_put_stream(rsv); | ||
289 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); | 321 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); |
290 | uwb_rsv_callback(rsv); | 322 | uwb_rsv_callback(rsv); |
291 | break; | 323 | break; |
@@ -295,12 +327,45 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
295 | case UWB_RSV_STATE_O_PENDING: | 327 | case UWB_RSV_STATE_O_PENDING: |
296 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); | 328 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); |
297 | break; | 329 | break; |
330 | case UWB_RSV_STATE_O_MODIFIED: | ||
331 | /* in the companion there are the MASes to drop */ | ||
332 | bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
333 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
334 | break; | ||
298 | case UWB_RSV_STATE_O_ESTABLISHED: | 335 | case UWB_RSV_STATE_O_ESTABLISHED: |
336 | if (rsv->state == UWB_RSV_STATE_O_MODIFIED | ||
337 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { | ||
338 | uwb_drp_avail_release(rsv->rc, &mv->companion_mas); | ||
339 | rsv->needs_release_companion_mas = false; | ||
340 | } | ||
299 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 341 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
300 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 342 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); |
301 | uwb_rsv_callback(rsv); | 343 | uwb_rsv_callback(rsv); |
302 | break; | 344 | break; |
345 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
346 | rsv->needs_release_companion_mas = true; | ||
347 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
348 | break; | ||
349 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
350 | rsv->needs_release_companion_mas = false; | ||
351 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
352 | bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
353 | rsv->mas.safe += mv->companion_mas.safe; | ||
354 | rsv->mas.unsafe += mv->companion_mas.unsafe; | ||
355 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
356 | break; | ||
357 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
358 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
359 | rsv->needs_release_companion_mas = true; | ||
360 | rsv->mas.safe = mv->final_mas.safe; | ||
361 | rsv->mas.unsafe = mv->final_mas.unsafe; | ||
362 | bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
363 | bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); | ||
364 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
365 | break; | ||
303 | case UWB_RSV_STATE_T_ACCEPTED: | 366 | case UWB_RSV_STATE_T_ACCEPTED: |
367 | case UWB_RSV_STATE_T_RESIZED: | ||
368 | rsv->needs_release_companion_mas = false; | ||
304 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 369 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
305 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); | 370 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); |
306 | uwb_rsv_callback(rsv); | 371 | uwb_rsv_callback(rsv); |
@@ -308,12 +373,82 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
308 | case UWB_RSV_STATE_T_DENIED: | 373 | case UWB_RSV_STATE_T_DENIED: |
309 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); | 374 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); |
310 | break; | 375 | break; |
376 | case UWB_RSV_STATE_T_CONFLICT: | ||
377 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
378 | break; | ||
379 | case UWB_RSV_STATE_T_PENDING: | ||
380 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); | ||
381 | break; | ||
382 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
383 | rsv->needs_release_companion_mas = true; | ||
384 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
385 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); | ||
386 | break; | ||
311 | default: | 387 | default: |
312 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", | 388 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", |
313 | uwb_rsv_state_str(new_state), new_state); | 389 | uwb_rsv_state_str(new_state), new_state); |
314 | } | 390 | } |
315 | } | 391 | } |
316 | 392 | ||
393 | static void uwb_rsv_handle_timeout_work(struct work_struct *work) | ||
394 | { | ||
395 | struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, | ||
396 | handle_timeout_work); | ||
397 | struct uwb_rc *rc = rsv->rc; | ||
398 | |||
399 | mutex_lock(&rc->rsvs_mutex); | ||
400 | |||
401 | uwb_rsv_dump("TO", rsv); | ||
402 | |||
403 | switch (rsv->state) { | ||
404 | case UWB_RSV_STATE_O_INITIATED: | ||
405 | if (rsv->is_multicast) { | ||
406 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
407 | goto unlock; | ||
408 | } | ||
409 | break; | ||
410 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
411 | if (rsv->is_multicast) { | ||
412 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
413 | goto unlock; | ||
414 | } | ||
415 | break; | ||
416 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
417 | if (rsv->is_multicast) { | ||
418 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
419 | goto unlock; | ||
420 | } | ||
421 | break; | ||
422 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
423 | if (rsv->is_multicast) { | ||
424 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
425 | goto unlock; | ||
426 | } | ||
427 | break; | ||
428 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
429 | if (rsv->is_multicast) | ||
430 | goto unlock; | ||
431 | break; | ||
432 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
433 | /* | ||
434 | * The time out could be for the main or of the | ||
435 | * companion DRP, assume it's for the companion and | ||
436 | * drop that first. A further time out is required to | ||
437 | * drop the main. | ||
438 | */ | ||
439 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
440 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
441 | goto unlock; | ||
442 | default: | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | uwb_rsv_remove(rsv); | ||
447 | |||
448 | unlock: | ||
449 | mutex_unlock(&rc->rsvs_mutex); | ||
450 | } | ||
451 | |||
317 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | 452 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) |
318 | { | 453 | { |
319 | struct uwb_rsv *rsv; | 454 | struct uwb_rsv *rsv; |
@@ -324,23 +459,17 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | |||
324 | 459 | ||
325 | INIT_LIST_HEAD(&rsv->rc_node); | 460 | INIT_LIST_HEAD(&rsv->rc_node); |
326 | INIT_LIST_HEAD(&rsv->pal_node); | 461 | INIT_LIST_HEAD(&rsv->pal_node); |
462 | kref_init(&rsv->kref); | ||
327 | init_timer(&rsv->timer); | 463 | init_timer(&rsv->timer); |
328 | rsv->timer.function = uwb_rsv_timer; | 464 | rsv->timer.function = uwb_rsv_timer; |
329 | rsv->timer.data = (unsigned long)rsv; | 465 | rsv->timer.data = (unsigned long)rsv; |
330 | 466 | ||
331 | rsv->rc = rc; | 467 | rsv->rc = rc; |
468 | INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); | ||
332 | 469 | ||
333 | return rsv; | 470 | return rsv; |
334 | } | 471 | } |
335 | 472 | ||
336 | static void uwb_rsv_free(struct uwb_rsv *rsv) | ||
337 | { | ||
338 | uwb_dev_put(rsv->owner); | ||
339 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
340 | uwb_dev_put(rsv->target.dev); | ||
341 | kfree(rsv); | ||
342 | } | ||
343 | |||
344 | /** | 473 | /** |
345 | * uwb_rsv_create - allocate and initialize a UWB reservation structure | 474 | * uwb_rsv_create - allocate and initialize a UWB reservation structure |
346 | * @rc: the radio controller | 475 | * @rc: the radio controller |
@@ -371,26 +500,36 @@ EXPORT_SYMBOL_GPL(uwb_rsv_create); | |||
371 | 500 | ||
372 | void uwb_rsv_remove(struct uwb_rsv *rsv) | 501 | void uwb_rsv_remove(struct uwb_rsv *rsv) |
373 | { | 502 | { |
503 | uwb_rsv_dump("RM", rsv); | ||
504 | |||
374 | if (rsv->state != UWB_RSV_STATE_NONE) | 505 | if (rsv->state != UWB_RSV_STATE_NONE) |
375 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 506 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
507 | |||
508 | if (rsv->needs_release_companion_mas) | ||
509 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
510 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
511 | |||
512 | if (uwb_rsv_is_owner(rsv)) | ||
513 | uwb_rsv_put_stream(rsv); | ||
514 | |||
376 | del_timer_sync(&rsv->timer); | 515 | del_timer_sync(&rsv->timer); |
377 | list_del(&rsv->rc_node); | 516 | uwb_dev_put(rsv->owner); |
378 | uwb_rsv_free(rsv); | 517 | if (rsv->target.type == UWB_RSV_TARGET_DEV) |
518 | uwb_dev_put(rsv->target.dev); | ||
519 | |||
520 | list_del_init(&rsv->rc_node); | ||
521 | uwb_rsv_put(rsv); | ||
379 | } | 522 | } |
380 | 523 | ||
381 | /** | 524 | /** |
382 | * uwb_rsv_destroy - free a UWB reservation structure | 525 | * uwb_rsv_destroy - free a UWB reservation structure |
383 | * @rsv: the reservation to free | 526 | * @rsv: the reservation to free |
384 | * | 527 | * |
385 | * The reservation will be terminated if it is pending or established. | 528 | * The reservation must already be terminated. |
386 | */ | 529 | */ |
387 | void uwb_rsv_destroy(struct uwb_rsv *rsv) | 530 | void uwb_rsv_destroy(struct uwb_rsv *rsv) |
388 | { | 531 | { |
389 | struct uwb_rc *rc = rsv->rc; | 532 | uwb_rsv_put(rsv); |
390 | |||
391 | mutex_lock(&rc->rsvs_mutex); | ||
392 | uwb_rsv_remove(rsv); | ||
393 | mutex_unlock(&rc->rsvs_mutex); | ||
394 | } | 533 | } |
395 | EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | 534 | EXPORT_SYMBOL_GPL(uwb_rsv_destroy); |
396 | 535 | ||
@@ -399,7 +538,7 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
399 | * @rsv: the reservation | 538 | * @rsv: the reservation |
400 | * | 539 | * |
401 | * The PAL should fill in @rsv's owner, target, type, max_mas, | 540 | * The PAL should fill in @rsv's owner, target, type, max_mas, |
402 | * min_mas, sparsity and is_multicast fields. If the target is a | 541 | * min_mas, max_interval and is_multicast fields. If the target is a |
403 | * uwb_dev it must be referenced. | 542 | * uwb_dev it must be referenced. |
404 | * | 543 | * |
405 | * The reservation's callback will be called when the reservation is | 544 | * The reservation's callback will be called when the reservation is |
@@ -408,20 +547,32 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
408 | int uwb_rsv_establish(struct uwb_rsv *rsv) | 547 | int uwb_rsv_establish(struct uwb_rsv *rsv) |
409 | { | 548 | { |
410 | struct uwb_rc *rc = rsv->rc; | 549 | struct uwb_rc *rc = rsv->rc; |
550 | struct uwb_mas_bm available; | ||
411 | int ret; | 551 | int ret; |
412 | 552 | ||
413 | mutex_lock(&rc->rsvs_mutex); | 553 | mutex_lock(&rc->rsvs_mutex); |
414 | |||
415 | ret = uwb_rsv_get_stream(rsv); | 554 | ret = uwb_rsv_get_stream(rsv); |
416 | if (ret) | 555 | if (ret) |
417 | goto out; | 556 | goto out; |
418 | 557 | ||
419 | ret = uwb_rsv_alloc_mas(rsv); | 558 | rsv->tiebreaker = random32() & 1; |
420 | if (ret) { | 559 | /* get available mas bitmap */ |
560 | uwb_drp_available(rc, &available); | ||
561 | |||
562 | ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); | ||
563 | if (ret == UWB_RSV_ALLOC_NOT_FOUND) { | ||
564 | ret = -EBUSY; | ||
565 | uwb_rsv_put_stream(rsv); | ||
566 | goto out; | ||
567 | } | ||
568 | |||
569 | ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); | ||
570 | if (ret != 0) { | ||
421 | uwb_rsv_put_stream(rsv); | 571 | uwb_rsv_put_stream(rsv); |
422 | goto out; | 572 | goto out; |
423 | } | 573 | } |
424 | 574 | ||
575 | uwb_rsv_get(rsv); | ||
425 | list_add_tail(&rsv->rc_node, &rc->reservations); | 576 | list_add_tail(&rsv->rc_node, &rc->reservations); |
426 | rsv->owner = &rc->uwb_dev; | 577 | rsv->owner = &rc->uwb_dev; |
427 | uwb_dev_get(rsv->owner); | 578 | uwb_dev_get(rsv->owner); |
@@ -437,16 +588,71 @@ EXPORT_SYMBOL_GPL(uwb_rsv_establish); | |||
437 | * @rsv: the reservation to modify | 588 | * @rsv: the reservation to modify |
438 | * @max_mas: new maximum MAS to reserve | 589 | * @max_mas: new maximum MAS to reserve |
439 | * @min_mas: new minimum MAS to reserve | 590 | * @min_mas: new minimum MAS to reserve |
440 | * @sparsity: new sparsity to use | 591 | * @max_interval: new max_interval to use |
441 | * | 592 | * |
442 | * FIXME: implement this once there are PALs that use it. | 593 | * FIXME: implement this once there are PALs that use it. |
443 | */ | 594 | */ |
444 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) | 595 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) |
445 | { | 596 | { |
446 | return -ENOSYS; | 597 | return -ENOSYS; |
447 | } | 598 | } |
448 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); | 599 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); |
449 | 600 | ||
601 | /* | ||
602 | * move an already established reservation (rc->rsvs_mutex must to be | ||
603 | * taken when tis function is called) | ||
604 | */ | ||
605 | int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) | ||
606 | { | ||
607 | struct uwb_rc *rc = rsv->rc; | ||
608 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
609 | struct device *dev = &rc->uwb_dev.dev; | ||
610 | struct uwb_rsv_move *mv; | ||
611 | int ret = 0; | ||
612 | |||
613 | if (bow->can_reserve_extra_mases == false) | ||
614 | return -EBUSY; | ||
615 | |||
616 | mv = &rsv->mv; | ||
617 | |||
618 | if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { | ||
619 | |||
620 | if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { | ||
621 | /* We want to move the reservation */ | ||
622 | bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
623 | uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); | ||
624 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
625 | } | ||
626 | } else { | ||
627 | dev_dbg(dev, "new allocation not found\n"); | ||
628 | } | ||
629 | |||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | /* It will try to move every reservation in state O_ESTABLISHED giving | ||
634 | * to the MAS allocator algorithm an availability that is the real one | ||
635 | * plus the allocation already established from the reservation. */ | ||
636 | void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) | ||
637 | { | ||
638 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
639 | struct uwb_rsv *rsv; | ||
640 | struct uwb_mas_bm mas; | ||
641 | |||
642 | if (bow->can_reserve_extra_mases == false) | ||
643 | return; | ||
644 | |||
645 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
646 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || | ||
647 | rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { | ||
648 | uwb_drp_available(rc, &mas); | ||
649 | bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
650 | uwb_rsv_try_move(rsv, &mas); | ||
651 | } | ||
652 | } | ||
653 | |||
654 | } | ||
655 | |||
450 | /** | 656 | /** |
451 | * uwb_rsv_terminate - terminate an established reservation | 657 | * uwb_rsv_terminate - terminate an established reservation |
452 | * @rsv: the reservation to terminate | 658 | * @rsv: the reservation to terminate |
@@ -463,7 +669,8 @@ void uwb_rsv_terminate(struct uwb_rsv *rsv) | |||
463 | 669 | ||
464 | mutex_lock(&rc->rsvs_mutex); | 670 | mutex_lock(&rc->rsvs_mutex); |
465 | 671 | ||
466 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 672 | if (rsv->state != UWB_RSV_STATE_NONE) |
673 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
467 | 674 | ||
468 | mutex_unlock(&rc->rsvs_mutex); | 675 | mutex_unlock(&rc->rsvs_mutex); |
469 | } | 676 | } |
@@ -477,9 +684,14 @@ EXPORT_SYMBOL_GPL(uwb_rsv_terminate); | |||
477 | * | 684 | * |
478 | * Reservation requests from peers are denied unless a PAL accepts it | 685 | * Reservation requests from peers are denied unless a PAL accepts it |
479 | * by calling this function. | 686 | * by calling this function. |
687 | * | ||
688 | * The PAL call uwb_rsv_destroy() for all accepted reservations before | ||
689 | * calling uwb_pal_unregister(). | ||
480 | */ | 690 | */ |
481 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) | 691 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) |
482 | { | 692 | { |
693 | uwb_rsv_get(rsv); | ||
694 | |||
483 | rsv->callback = cb; | 695 | rsv->callback = cb; |
484 | rsv->pal_priv = pal_priv; | 696 | rsv->pal_priv = pal_priv; |
485 | rsv->state = UWB_RSV_STATE_T_ACCEPTED; | 697 | rsv->state = UWB_RSV_STATE_T_ACCEPTED; |
@@ -530,9 +742,9 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
530 | uwb_dev_get(rsv->owner); | 742 | uwb_dev_get(rsv->owner); |
531 | rsv->target.type = UWB_RSV_TARGET_DEV; | 743 | rsv->target.type = UWB_RSV_TARGET_DEV; |
532 | rsv->target.dev = &rc->uwb_dev; | 744 | rsv->target.dev = &rc->uwb_dev; |
745 | uwb_dev_get(&rc->uwb_dev); | ||
533 | rsv->type = uwb_ie_drp_type(drp_ie); | 746 | rsv->type = uwb_ie_drp_type(drp_ie); |
534 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); | 747 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); |
535 | set_bit(rsv->stream, rsv->owner->streams); | ||
536 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); | 748 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); |
537 | 749 | ||
538 | /* | 750 | /* |
@@ -540,24 +752,46 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
540 | * deny the request. | 752 | * deny the request. |
541 | */ | 753 | */ |
542 | rsv->state = UWB_RSV_STATE_T_DENIED; | 754 | rsv->state = UWB_RSV_STATE_T_DENIED; |
543 | spin_lock(&rc->pal_lock); | 755 | mutex_lock(&rc->uwb_dev.mutex); |
544 | list_for_each_entry(pal, &rc->pals, node) { | 756 | list_for_each_entry(pal, &rc->pals, node) { |
545 | if (pal->new_rsv) | 757 | if (pal->new_rsv) |
546 | pal->new_rsv(rsv); | 758 | pal->new_rsv(pal, rsv); |
547 | if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) | 759 | if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) |
548 | break; | 760 | break; |
549 | } | 761 | } |
550 | spin_unlock(&rc->pal_lock); | 762 | mutex_unlock(&rc->uwb_dev.mutex); |
551 | 763 | ||
552 | list_add_tail(&rsv->rc_node, &rc->reservations); | 764 | list_add_tail(&rsv->rc_node, &rc->reservations); |
553 | state = rsv->state; | 765 | state = rsv->state; |
554 | rsv->state = UWB_RSV_STATE_NONE; | 766 | rsv->state = UWB_RSV_STATE_NONE; |
555 | uwb_rsv_set_state(rsv, state); | 767 | |
768 | /* FIXME: do something sensible here */ | ||
769 | if (state == UWB_RSV_STATE_T_ACCEPTED | ||
770 | && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { | ||
771 | /* FIXME: do something sensible here */ | ||
772 | } else { | ||
773 | uwb_rsv_set_state(rsv, state); | ||
774 | } | ||
556 | 775 | ||
557 | return rsv; | 776 | return rsv; |
558 | } | 777 | } |
559 | 778 | ||
560 | /** | 779 | /** |
780 | * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations | ||
781 | * @rsv: the reservation. | ||
782 | * @mas: returns the available MAS. | ||
783 | * | ||
784 | * The usable MAS of a reservation may be less than the negotiated MAS | ||
785 | * if alien BPs are present. | ||
786 | */ | ||
787 | void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) | ||
788 | { | ||
789 | bitmap_zero(mas->bm, UWB_NUM_MAS); | ||
790 | bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); | ||
791 | } | ||
792 | EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); | ||
793 | |||
794 | /** | ||
561 | * uwb_rsv_find - find a reservation for a received DRP IE. | 795 | * uwb_rsv_find - find a reservation for a received DRP IE. |
562 | * @rc: the radio controller | 796 | * @rc: the radio controller |
563 | * @src: source of the DRP IE | 797 | * @src: source of the DRP IE |
@@ -596,8 +830,6 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
596 | bool ie_updated = false; | 830 | bool ie_updated = false; |
597 | 831 | ||
598 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | 832 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { |
599 | if (rsv->expired) | ||
600 | uwb_drp_handle_timeout(rsv); | ||
601 | if (!rsv->ie_valid) { | 833 | if (!rsv->ie_valid) { |
602 | uwb_drp_ie_update(rsv); | 834 | uwb_drp_ie_update(rsv); |
603 | ie_updated = true; | 835 | ie_updated = true; |
@@ -607,9 +839,47 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
607 | return ie_updated; | 839 | return ie_updated; |
608 | } | 840 | } |
609 | 841 | ||
842 | void uwb_rsv_queue_update(struct uwb_rc *rc) | ||
843 | { | ||
844 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
845 | |||
846 | queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); | ||
847 | } | ||
848 | |||
849 | /** | ||
850 | * uwb_rsv_sched_update - schedule an update of the DRP IEs | ||
851 | * @rc: the radio controller. | ||
852 | * | ||
853 | * To improve performance and ensure correctness with [ECMA-368] the | ||
854 | * number of SET-DRP-IE commands that are done are limited. | ||
855 | * | ||
856 | * DRP IEs update come from two sources: DRP events from the hardware | ||
857 | * which all occur at the beginning of the superframe ('syncronous' | ||
858 | * events) and reservation establishment/termination requests from | ||
859 | * PALs or timers ('asynchronous' events). | ||
860 | * | ||
861 | * A delayed work ensures that all the synchronous events result in | ||
862 | * one SET-DRP-IE command. | ||
863 | * | ||
864 | * Additional logic (the set_drp_ie_pending and rsv_updated_postponed | ||
865 | * flags) will prevent an asynchrous event starting a SET-DRP-IE | ||
866 | * command if one is currently awaiting a response. | ||
867 | * | ||
868 | * FIXME: this does leave a window where an asynchrous event can delay | ||
869 | * the SET-DRP-IE for a synchronous event by one superframe. | ||
870 | */ | ||
610 | void uwb_rsv_sched_update(struct uwb_rc *rc) | 871 | void uwb_rsv_sched_update(struct uwb_rc *rc) |
611 | { | 872 | { |
612 | queue_work(rc->rsv_workq, &rc->rsv_update_work); | 873 | spin_lock(&rc->rsvs_lock); |
874 | if (!delayed_work_pending(&rc->rsv_update_work)) { | ||
875 | if (rc->set_drp_ie_pending > 0) { | ||
876 | rc->set_drp_ie_pending++; | ||
877 | goto unlock; | ||
878 | } | ||
879 | uwb_rsv_queue_update(rc); | ||
880 | } | ||
881 | unlock: | ||
882 | spin_unlock(&rc->rsvs_lock); | ||
613 | } | 883 | } |
614 | 884 | ||
615 | /* | 885 | /* |
@@ -618,7 +888,8 @@ void uwb_rsv_sched_update(struct uwb_rc *rc) | |||
618 | */ | 888 | */ |
619 | static void uwb_rsv_update_work(struct work_struct *work) | 889 | static void uwb_rsv_update_work(struct work_struct *work) |
620 | { | 890 | { |
621 | struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); | 891 | struct uwb_rc *rc = container_of(work, struct uwb_rc, |
892 | rsv_update_work.work); | ||
622 | bool ie_updated; | 893 | bool ie_updated; |
623 | 894 | ||
624 | mutex_lock(&rc->rsvs_mutex); | 895 | mutex_lock(&rc->rsvs_mutex); |
@@ -630,25 +901,71 @@ static void uwb_rsv_update_work(struct work_struct *work) | |||
630 | ie_updated = true; | 901 | ie_updated = true; |
631 | } | 902 | } |
632 | 903 | ||
633 | if (ie_updated) | 904 | if (ie_updated && (rc->set_drp_ie_pending == 0)) |
634 | uwb_rc_send_all_drp_ie(rc); | 905 | uwb_rc_send_all_drp_ie(rc); |
635 | 906 | ||
636 | mutex_unlock(&rc->rsvs_mutex); | 907 | mutex_unlock(&rc->rsvs_mutex); |
637 | } | 908 | } |
638 | 909 | ||
910 | static void uwb_rsv_alien_bp_work(struct work_struct *work) | ||
911 | { | ||
912 | struct uwb_rc *rc = container_of(work, struct uwb_rc, | ||
913 | rsv_alien_bp_work.work); | ||
914 | struct uwb_rsv *rsv; | ||
915 | |||
916 | mutex_lock(&rc->rsvs_mutex); | ||
917 | |||
918 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
919 | if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { | ||
920 | rsv->callback(rsv); | ||
921 | } | ||
922 | } | ||
923 | |||
924 | mutex_unlock(&rc->rsvs_mutex); | ||
925 | } | ||
926 | |||
639 | static void uwb_rsv_timer(unsigned long arg) | 927 | static void uwb_rsv_timer(unsigned long arg) |
640 | { | 928 | { |
641 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; | 929 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; |
642 | 930 | ||
643 | rsv->expired = true; | 931 | queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); |
644 | uwb_rsv_sched_update(rsv->rc); | 932 | } |
933 | |||
934 | /** | ||
935 | * uwb_rsv_remove_all - remove all reservations | ||
936 | * @rc: the radio controller | ||
937 | * | ||
938 | * A DRP IE update is not done. | ||
939 | */ | ||
940 | void uwb_rsv_remove_all(struct uwb_rc *rc) | ||
941 | { | ||
942 | struct uwb_rsv *rsv, *t; | ||
943 | |||
944 | mutex_lock(&rc->rsvs_mutex); | ||
945 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
946 | uwb_rsv_remove(rsv); | ||
947 | } | ||
948 | /* Cancel any postponed update. */ | ||
949 | rc->set_drp_ie_pending = 0; | ||
950 | mutex_unlock(&rc->rsvs_mutex); | ||
951 | |||
952 | cancel_delayed_work_sync(&rc->rsv_update_work); | ||
645 | } | 953 | } |
646 | 954 | ||
647 | void uwb_rsv_init(struct uwb_rc *rc) | 955 | void uwb_rsv_init(struct uwb_rc *rc) |
648 | { | 956 | { |
649 | INIT_LIST_HEAD(&rc->reservations); | 957 | INIT_LIST_HEAD(&rc->reservations); |
958 | INIT_LIST_HEAD(&rc->cnflt_alien_list); | ||
650 | mutex_init(&rc->rsvs_mutex); | 959 | mutex_init(&rc->rsvs_mutex); |
651 | INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | 960 | spin_lock_init(&rc->rsvs_lock); |
961 | INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | ||
962 | INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); | ||
963 | rc->bow.can_reserve_extra_mases = true; | ||
964 | rc->bow.total_expired = 0; | ||
965 | rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; | ||
966 | init_timer(&rc->bow.timer); | ||
967 | rc->bow.timer.function = uwb_rsv_backoff_win_timer; | ||
968 | rc->bow.timer.data = (unsigned long)&rc->bow; | ||
652 | 969 | ||
653 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); | 970 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); |
654 | } | 971 | } |
@@ -667,14 +984,6 @@ int uwb_rsv_setup(struct uwb_rc *rc) | |||
667 | 984 | ||
668 | void uwb_rsv_cleanup(struct uwb_rc *rc) | 985 | void uwb_rsv_cleanup(struct uwb_rc *rc) |
669 | { | 986 | { |
670 | struct uwb_rsv *rsv, *t; | 987 | uwb_rsv_remove_all(rc); |
671 | |||
672 | mutex_lock(&rc->rsvs_mutex); | ||
673 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
674 | uwb_rsv_remove(rsv); | ||
675 | } | ||
676 | mutex_unlock(&rc->rsvs_mutex); | ||
677 | |||
678 | cancel_work_sync(&rc->rsv_update_work); | ||
679 | destroy_workqueue(rc->rsv_workq); | 988 | destroy_workqueue(rc->rsv_workq); |
680 | } | 989 | } |
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c index 2d8d62d9f53e..5ad36164c13b 100644 --- a/drivers/uwb/umc-bus.c +++ b/drivers/uwb/umc-bus.c | |||
@@ -11,23 +11,48 @@ | |||
11 | #include <linux/uwb/umc.h> | 11 | #include <linux/uwb/umc.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | 13 | ||
14 | static int umc_bus_unbind_helper(struct device *dev, void *data) | 14 | static int umc_bus_pre_reset_helper(struct device *dev, void *data) |
15 | { | 15 | { |
16 | struct device *parent = data; | 16 | int ret = 0; |
17 | 17 | ||
18 | if (dev->parent == parent && dev->driver) | 18 | if (dev->driver) { |
19 | device_release_driver(dev); | 19 | struct umc_dev *umc = to_umc_dev(dev); |
20 | return 0; | 20 | struct umc_driver *umc_drv = to_umc_driver(dev->driver); |
21 | |||
22 | if (umc_drv->pre_reset) | ||
23 | ret = umc_drv->pre_reset(umc); | ||
24 | else | ||
25 | device_release_driver(dev); | ||
26 | } | ||
27 | return ret; | ||
28 | } | ||
29 | |||
30 | static int umc_bus_post_reset_helper(struct device *dev, void *data) | ||
31 | { | ||
32 | int ret = 0; | ||
33 | |||
34 | if (dev->driver) { | ||
35 | struct umc_dev *umc = to_umc_dev(dev); | ||
36 | struct umc_driver *umc_drv = to_umc_driver(dev->driver); | ||
37 | |||
38 | if (umc_drv->post_reset) | ||
39 | ret = umc_drv->post_reset(umc); | ||
40 | } else | ||
41 | ret = device_attach(dev); | ||
42 | |||
43 | return ret; | ||
21 | } | 44 | } |
22 | 45 | ||
23 | /** | 46 | /** |
24 | * umc_controller_reset - reset the whole UMC controller | 47 | * umc_controller_reset - reset the whole UMC controller |
25 | * @umc: the UMC device for the radio controller. | 48 | * @umc: the UMC device for the radio controller. |
26 | * | 49 | * |
27 | * Drivers will be unbound from all UMC devices belonging to the | 50 | * Drivers or all capabilities of the controller will have their |
28 | * controller and then the radio controller will be rebound. The | 51 | * pre_reset methods called or be unbound from their device. Then all |
29 | * radio controller is expected to do a full hardware reset when it is | 52 | * post_reset methods will be called or the drivers will be rebound. |
30 | * probed. | 53 | * |
54 | * Radio controllers must provide pre_reset and post_reset methods and | ||
55 | * reset the hardware in their start method. | ||
31 | * | 56 | * |
32 | * If this is called while a probe() or remove() is in progress it | 57 | * If this is called while a probe() or remove() is in progress it |
33 | * will return -EAGAIN and not perform the reset. | 58 | * will return -EAGAIN and not perform the reset. |
@@ -35,14 +60,13 @@ static int umc_bus_unbind_helper(struct device *dev, void *data) | |||
35 | int umc_controller_reset(struct umc_dev *umc) | 60 | int umc_controller_reset(struct umc_dev *umc) |
36 | { | 61 | { |
37 | struct device *parent = umc->dev.parent; | 62 | struct device *parent = umc->dev.parent; |
38 | int ret; | 63 | int ret = 0; |
39 | 64 | ||
40 | if (down_trylock(&parent->sem)) | 65 | if(down_trylock(&parent->sem)) |
41 | return -EAGAIN; | 66 | return -EAGAIN; |
42 | bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper); | 67 | ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper); |
43 | ret = device_attach(&umc->dev); | 68 | if (ret >= 0) |
44 | if (ret == 1) | 69 | device_for_each_child(parent, parent, umc_bus_post_reset_helper); |
45 | ret = 0; | ||
46 | up(&parent->sem); | 70 | up(&parent->sem); |
47 | 71 | ||
48 | return ret; | 72 | return ret; |
@@ -75,10 +99,10 @@ static int umc_bus_rescan_helper(struct device *dev, void *data) | |||
75 | if (!dev->driver) | 99 | if (!dev->driver) |
76 | ret = device_attach(dev); | 100 | ret = device_attach(dev); |
77 | 101 | ||
78 | return ret < 0 ? ret : 0; | 102 | return ret; |
79 | } | 103 | } |
80 | 104 | ||
81 | static void umc_bus_rescan(void) | 105 | static void umc_bus_rescan(struct device *parent) |
82 | { | 106 | { |
83 | int err; | 107 | int err; |
84 | 108 | ||
@@ -86,7 +110,7 @@ static void umc_bus_rescan(void) | |||
86 | * We can't use bus_rescan_devices() here as it deadlocks when | 110 | * We can't use bus_rescan_devices() here as it deadlocks when |
87 | * it tries to retake the dev->parent semaphore. | 111 | * it tries to retake the dev->parent semaphore. |
88 | */ | 112 | */ |
89 | err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper); | 113 | err = device_for_each_child(parent, NULL, umc_bus_rescan_helper); |
90 | if (err < 0) | 114 | if (err < 0) |
91 | printk(KERN_WARNING "%s: rescan of bus failed: %d\n", | 115 | printk(KERN_WARNING "%s: rescan of bus failed: %d\n", |
92 | KBUILD_MODNAME, err); | 116 | KBUILD_MODNAME, err); |
@@ -120,7 +144,7 @@ static int umc_device_probe(struct device *dev) | |||
120 | if (err) | 144 | if (err) |
121 | put_device(dev); | 145 | put_device(dev); |
122 | else | 146 | else |
123 | umc_bus_rescan(); | 147 | umc_bus_rescan(dev->parent); |
124 | 148 | ||
125 | return err; | 149 | return err; |
126 | } | 150 | } |
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c index aa44e1c1a102..1fc7d8270bb8 100644 --- a/drivers/uwb/umc-dev.c +++ b/drivers/uwb/umc-dev.c | |||
@@ -7,8 +7,6 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/uwb/umc.h> | 9 | #include <linux/uwb/umc.h> |
10 | #define D_LOCAL 0 | ||
11 | #include <linux/uwb/debug.h> | ||
12 | 10 | ||
13 | static void umc_device_release(struct device *dev) | 11 | static void umc_device_release(struct device *dev) |
14 | { | 12 | { |
@@ -31,8 +29,7 @@ struct umc_dev *umc_device_create(struct device *parent, int n) | |||
31 | 29 | ||
32 | umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); | 30 | umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); |
33 | if (umc) { | 31 | if (umc) { |
34 | snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d", | 32 | dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n); |
35 | parent->bus_id, n); | ||
36 | umc->dev.parent = parent; | 33 | umc->dev.parent = parent; |
37 | umc->dev.bus = &umc_bus_type; | 34 | umc->dev.bus = &umc_bus_type; |
38 | umc->dev.release = umc_device_release; | 35 | umc->dev.release = umc_device_release; |
@@ -54,8 +51,6 @@ int umc_device_register(struct umc_dev *umc) | |||
54 | { | 51 | { |
55 | int err; | 52 | int err; |
56 | 53 | ||
57 | d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc); | ||
58 | |||
59 | err = request_resource(umc->resource.parent, &umc->resource); | 54 | err = request_resource(umc->resource.parent, &umc->resource); |
60 | if (err < 0) { | 55 | if (err < 0) { |
61 | dev_err(&umc->dev, "can't allocate resource range " | 56 | dev_err(&umc->dev, "can't allocate resource range " |
@@ -69,13 +64,11 @@ int umc_device_register(struct umc_dev *umc) | |||
69 | err = device_register(&umc->dev); | 64 | err = device_register(&umc->dev); |
70 | if (err < 0) | 65 | if (err < 0) |
71 | goto error_device_register; | 66 | goto error_device_register; |
72 | d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc); | ||
73 | return 0; | 67 | return 0; |
74 | 68 | ||
75 | error_device_register: | 69 | error_device_register: |
76 | release_resource(&umc->resource); | 70 | release_resource(&umc->resource); |
77 | error_request_resource: | 71 | error_request_resource: |
78 | d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err); | ||
79 | return err; | 72 | return err; |
80 | } | 73 | } |
81 | EXPORT_SYMBOL_GPL(umc_device_register); | 74 | EXPORT_SYMBOL_GPL(umc_device_register); |
@@ -95,10 +88,8 @@ void umc_device_unregister(struct umc_dev *umc) | |||
95 | if (!umc) | 88 | if (!umc) |
96 | return; | 89 | return; |
97 | dev = get_device(&umc->dev); | 90 | dev = get_device(&umc->dev); |
98 | d_fnstart(3, dev, "(umc_dev %p)\n", umc); | ||
99 | device_unregister(&umc->dev); | 91 | device_unregister(&umc->dev); |
100 | release_resource(&umc->resource); | 92 | release_resource(&umc->resource); |
101 | d_fnend(3, dev, "(umc_dev %p) = void\n", umc); | ||
102 | put_device(dev); | 93 | put_device(dev); |
103 | } | 94 | } |
104 | EXPORT_SYMBOL_GPL(umc_device_unregister); | 95 | EXPORT_SYMBOL_GPL(umc_device_unregister); |
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c index 6d232c35d07d..4a42993700c1 100644 --- a/drivers/uwb/uwb-debug.c +++ b/drivers/uwb/uwb-debug.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * | 4 | * |
5 | * Copyright (C) 2005-2006 Intel Corporation | 5 | * Copyright (C) 2005-2006 Intel Corporation |
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | 6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> |
7 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License version | 10 | * modify it under the terms of the GNU General Public License version |
@@ -33,31 +34,9 @@ | |||
33 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
34 | 35 | ||
35 | #include <linux/uwb/debug-cmd.h> | 36 | #include <linux/uwb/debug-cmd.h> |
36 | #define D_LOCAL 0 | ||
37 | #include <linux/uwb/debug.h> | ||
38 | 37 | ||
39 | #include "uwb-internal.h" | 38 | #include "uwb-internal.h" |
40 | 39 | ||
41 | void dump_bytes(struct device *dev, const void *_buf, size_t rsize) | ||
42 | { | ||
43 | const char *buf = _buf; | ||
44 | char line[32]; | ||
45 | size_t offset = 0; | ||
46 | int cnt, cnt2; | ||
47 | for (cnt = 0; cnt < rsize; cnt += 8) { | ||
48 | size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8; | ||
49 | for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) { | ||
50 | offset += scnprintf(line + offset, sizeof(line) - offset, | ||
51 | "%02x ", buf[cnt + cnt2] & 0xff); | ||
52 | } | ||
53 | if (dev) | ||
54 | dev_info(dev, "%s\n", line); | ||
55 | else | ||
56 | printk(KERN_INFO "%s\n", line); | ||
57 | } | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(dump_bytes); | ||
60 | |||
61 | /* | 40 | /* |
62 | * Debug interface | 41 | * Debug interface |
63 | * | 42 | * |
@@ -84,26 +63,23 @@ struct uwb_dbg { | |||
84 | struct dentry *reservations_f; | 63 | struct dentry *reservations_f; |
85 | struct dentry *accept_f; | 64 | struct dentry *accept_f; |
86 | struct dentry *drp_avail_f; | 65 | struct dentry *drp_avail_f; |
66 | spinlock_t list_lock; | ||
87 | }; | 67 | }; |
88 | 68 | ||
89 | static struct dentry *root_dir; | 69 | static struct dentry *root_dir; |
90 | 70 | ||
91 | static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) | 71 | static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) |
92 | { | 72 | { |
93 | struct uwb_rc *rc = rsv->rc; | 73 | struct uwb_dbg *dbg = rsv->pal_priv; |
94 | struct device *dev = &rc->uwb_dev.dev; | ||
95 | struct uwb_dev_addr devaddr; | ||
96 | char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; | ||
97 | |||
98 | uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); | ||
99 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
100 | devaddr = rsv->target.dev->dev_addr; | ||
101 | else | ||
102 | devaddr = rsv->target.devaddr; | ||
103 | uwb_dev_addr_print(target, sizeof(target), &devaddr); | ||
104 | 74 | ||
105 | dev_dbg(dev, "debug: rsv %s -> %s: %s\n", | 75 | uwb_rsv_dump("debug", rsv); |
106 | owner, target, uwb_rsv_state_str(rsv->state)); | 76 | |
77 | if (rsv->state == UWB_RSV_STATE_NONE) { | ||
78 | spin_lock(&dbg->list_lock); | ||
79 | list_del(&rsv->pal_node); | ||
80 | spin_unlock(&dbg->list_lock); | ||
81 | uwb_rsv_destroy(rsv); | ||
82 | } | ||
107 | } | 83 | } |
108 | 84 | ||
109 | static int cmd_rsv_establish(struct uwb_rc *rc, | 85 | static int cmd_rsv_establish(struct uwb_rc *rc, |
@@ -119,26 +95,27 @@ static int cmd_rsv_establish(struct uwb_rc *rc, | |||
119 | if (target == NULL) | 95 | if (target == NULL) |
120 | return -ENODEV; | 96 | return -ENODEV; |
121 | 97 | ||
122 | rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL); | 98 | rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, rc->dbg); |
123 | if (rsv == NULL) { | 99 | if (rsv == NULL) { |
124 | uwb_dev_put(target); | 100 | uwb_dev_put(target); |
125 | return -ENOMEM; | 101 | return -ENOMEM; |
126 | } | 102 | } |
127 | 103 | ||
128 | rsv->owner = &rc->uwb_dev; | 104 | rsv->target.type = UWB_RSV_TARGET_DEV; |
129 | rsv->target.type = UWB_RSV_TARGET_DEV; | 105 | rsv->target.dev = target; |
130 | rsv->target.dev = target; | 106 | rsv->type = cmd->type; |
131 | rsv->type = cmd->type; | 107 | rsv->max_mas = cmd->max_mas; |
132 | rsv->max_mas = cmd->max_mas; | 108 | rsv->min_mas = cmd->min_mas; |
133 | rsv->min_mas = cmd->min_mas; | 109 | rsv->max_interval = cmd->max_interval; |
134 | rsv->sparsity = cmd->sparsity; | ||
135 | 110 | ||
136 | ret = uwb_rsv_establish(rsv); | 111 | ret = uwb_rsv_establish(rsv); |
137 | if (ret) | 112 | if (ret) |
138 | uwb_rsv_destroy(rsv); | 113 | uwb_rsv_destroy(rsv); |
139 | else | 114 | else { |
115 | spin_lock(&(rc->dbg)->list_lock); | ||
140 | list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); | 116 | list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); |
141 | 117 | spin_unlock(&(rc->dbg)->list_lock); | |
118 | } | ||
142 | return ret; | 119 | return ret; |
143 | } | 120 | } |
144 | 121 | ||
@@ -148,21 +125,40 @@ static int cmd_rsv_terminate(struct uwb_rc *rc, | |||
148 | struct uwb_rsv *rsv, *found = NULL; | 125 | struct uwb_rsv *rsv, *found = NULL; |
149 | int i = 0; | 126 | int i = 0; |
150 | 127 | ||
128 | spin_lock(&(rc->dbg)->list_lock); | ||
129 | |||
151 | list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { | 130 | list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { |
152 | if (i == cmd->index) { | 131 | if (i == cmd->index) { |
153 | found = rsv; | 132 | found = rsv; |
133 | uwb_rsv_get(found); | ||
154 | break; | 134 | break; |
155 | } | 135 | } |
136 | i++; | ||
156 | } | 137 | } |
138 | |||
139 | spin_unlock(&(rc->dbg)->list_lock); | ||
140 | |||
157 | if (!found) | 141 | if (!found) |
158 | return -EINVAL; | 142 | return -EINVAL; |
159 | 143 | ||
160 | list_del(&found->pal_node); | ||
161 | uwb_rsv_terminate(found); | 144 | uwb_rsv_terminate(found); |
145 | uwb_rsv_put(found); | ||
162 | 146 | ||
163 | return 0; | 147 | return 0; |
164 | } | 148 | } |
165 | 149 | ||
150 | static int cmd_ie_add(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_add) | ||
151 | { | ||
152 | return uwb_rc_ie_add(rc, | ||
153 | (const struct uwb_ie_hdr *) ie_to_add->data, | ||
154 | ie_to_add->len); | ||
155 | } | ||
156 | |||
157 | static int cmd_ie_rm(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_rm) | ||
158 | { | ||
159 | return uwb_rc_ie_rm(rc, ie_to_rm->data[0]); | ||
160 | } | ||
161 | |||
166 | static int command_open(struct inode *inode, struct file *file) | 162 | static int command_open(struct inode *inode, struct file *file) |
167 | { | 163 | { |
168 | file->private_data = inode->i_private; | 164 | file->private_data = inode->i_private; |
@@ -175,8 +171,8 @@ static ssize_t command_write(struct file *file, const char __user *buf, | |||
175 | { | 171 | { |
176 | struct uwb_rc *rc = file->private_data; | 172 | struct uwb_rc *rc = file->private_data; |
177 | struct uwb_dbg_cmd cmd; | 173 | struct uwb_dbg_cmd cmd; |
178 | int ret; | 174 | int ret = 0; |
179 | 175 | ||
180 | if (len != sizeof(struct uwb_dbg_cmd)) | 176 | if (len != sizeof(struct uwb_dbg_cmd)) |
181 | return -EINVAL; | 177 | return -EINVAL; |
182 | 178 | ||
@@ -190,6 +186,18 @@ static ssize_t command_write(struct file *file, const char __user *buf, | |||
190 | case UWB_DBG_CMD_RSV_TERMINATE: | 186 | case UWB_DBG_CMD_RSV_TERMINATE: |
191 | ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); | 187 | ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); |
192 | break; | 188 | break; |
189 | case UWB_DBG_CMD_IE_ADD: | ||
190 | ret = cmd_ie_add(rc, &cmd.ie_add); | ||
191 | break; | ||
192 | case UWB_DBG_CMD_IE_RM: | ||
193 | ret = cmd_ie_rm(rc, &cmd.ie_rm); | ||
194 | break; | ||
195 | case UWB_DBG_CMD_RADIO_START: | ||
196 | ret = uwb_radio_start(&rc->dbg->pal); | ||
197 | break; | ||
198 | case UWB_DBG_CMD_RADIO_STOP: | ||
199 | uwb_radio_stop(&rc->dbg->pal); | ||
200 | break; | ||
193 | default: | 201 | default: |
194 | return -EINVAL; | 202 | return -EINVAL; |
195 | } | 203 | } |
@@ -283,12 +291,26 @@ static struct file_operations drp_avail_fops = { | |||
283 | .owner = THIS_MODULE, | 291 | .owner = THIS_MODULE, |
284 | }; | 292 | }; |
285 | 293 | ||
286 | static void uwb_dbg_new_rsv(struct uwb_rsv *rsv) | 294 | static void uwb_dbg_channel_changed(struct uwb_pal *pal, int channel) |
295 | { | ||
296 | struct device *dev = &pal->rc->uwb_dev.dev; | ||
297 | |||
298 | if (channel > 0) | ||
299 | dev_info(dev, "debug: channel %d started\n", channel); | ||
300 | else | ||
301 | dev_info(dev, "debug: channel stopped\n"); | ||
302 | } | ||
303 | |||
304 | static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv) | ||
287 | { | 305 | { |
288 | struct uwb_rc *rc = rsv->rc; | 306 | struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal); |
289 | 307 | ||
290 | if (rc->dbg->accept) | 308 | if (dbg->accept) { |
291 | uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL); | 309 | spin_lock(&dbg->list_lock); |
310 | list_add_tail(&rsv->pal_node, &dbg->rsvs); | ||
311 | spin_unlock(&dbg->list_lock); | ||
312 | uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg); | ||
313 | } | ||
292 | } | 314 | } |
293 | 315 | ||
294 | /** | 316 | /** |
@@ -302,10 +324,14 @@ void uwb_dbg_add_rc(struct uwb_rc *rc) | |||
302 | return; | 324 | return; |
303 | 325 | ||
304 | INIT_LIST_HEAD(&rc->dbg->rsvs); | 326 | INIT_LIST_HEAD(&rc->dbg->rsvs); |
327 | spin_lock_init(&(rc->dbg)->list_lock); | ||
305 | 328 | ||
306 | uwb_pal_init(&rc->dbg->pal); | 329 | uwb_pal_init(&rc->dbg->pal); |
330 | rc->dbg->pal.rc = rc; | ||
331 | rc->dbg->pal.channel_changed = uwb_dbg_channel_changed; | ||
307 | rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; | 332 | rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; |
308 | uwb_pal_register(rc, &rc->dbg->pal); | 333 | uwb_pal_register(&rc->dbg->pal); |
334 | |||
309 | if (root_dir) { | 335 | if (root_dir) { |
310 | rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), | 336 | rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), |
311 | root_dir); | 337 | root_dir); |
@@ -325,7 +351,7 @@ void uwb_dbg_add_rc(struct uwb_rc *rc) | |||
325 | } | 351 | } |
326 | 352 | ||
327 | /** | 353 | /** |
328 | * uwb_dbg_add_rc - remove a radio controller's debug interface | 354 | * uwb_dbg_del_rc - remove a radio controller's debug interface |
329 | * @rc: the radio controller | 355 | * @rc: the radio controller |
330 | */ | 356 | */ |
331 | void uwb_dbg_del_rc(struct uwb_rc *rc) | 357 | void uwb_dbg_del_rc(struct uwb_rc *rc) |
@@ -336,10 +362,10 @@ void uwb_dbg_del_rc(struct uwb_rc *rc) | |||
336 | return; | 362 | return; |
337 | 363 | ||
338 | list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { | 364 | list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { |
339 | uwb_rsv_destroy(rsv); | 365 | uwb_rsv_terminate(rsv); |
340 | } | 366 | } |
341 | 367 | ||
342 | uwb_pal_unregister(rc, &rc->dbg->pal); | 368 | uwb_pal_unregister(&rc->dbg->pal); |
343 | 369 | ||
344 | if (root_dir) { | 370 | if (root_dir) { |
345 | debugfs_remove(rc->dbg->drp_avail_f); | 371 | debugfs_remove(rc->dbg->drp_avail_f); |
@@ -365,3 +391,16 @@ void uwb_dbg_exit(void) | |||
365 | { | 391 | { |
366 | debugfs_remove(root_dir); | 392 | debugfs_remove(root_dir); |
367 | } | 393 | } |
394 | |||
395 | /** | ||
396 | * uwb_dbg_create_pal_dir - create a debugfs directory for a PAL | ||
397 | * @pal: The PAL. | ||
398 | */ | ||
399 | struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal) | ||
400 | { | ||
401 | struct uwb_rc *rc = pal->rc; | ||
402 | |||
403 | if (root_dir && rc->dbg && rc->dbg->root_d && pal->name) | ||
404 | return debugfs_create_dir(pal->name, rc->dbg->root_d); | ||
405 | return NULL; | ||
406 | } | ||
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h index 2ad307d12961..d5bcfc1c227a 100644 --- a/drivers/uwb/uwb-internal.h +++ b/drivers/uwb/uwb-internal.h | |||
@@ -66,14 +66,14 @@ extern int uwb_rc_scan(struct uwb_rc *rc, | |||
66 | unsigned channel, enum uwb_scan_type type, | 66 | unsigned channel, enum uwb_scan_type type, |
67 | unsigned bpst_offset); | 67 | unsigned bpst_offset); |
68 | extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); | 68 | extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); |
69 | extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t); | 69 | |
70 | extern void uwb_rc_ie_init(struct uwb_rc *); | 70 | void uwb_rc_ie_init(struct uwb_rc *); |
71 | extern void uwb_rc_ie_init(struct uwb_rc *); | 71 | int uwb_rc_ie_setup(struct uwb_rc *); |
72 | extern ssize_t uwb_rc_ie_setup(struct uwb_rc *); | 72 | void uwb_rc_ie_release(struct uwb_rc *); |
73 | extern void uwb_rc_ie_release(struct uwb_rc *); | 73 | int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, |
74 | extern int uwb_rc_ie_add(struct uwb_rc *, | 74 | char *buf, size_t size); |
75 | const struct uwb_ie_hdr *, size_t); | 75 | int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); |
76 | extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); | 76 | |
77 | 77 | ||
78 | extern const char *uwb_rc_strerror(unsigned code); | 78 | extern const char *uwb_rc_strerror(unsigned code); |
79 | 79 | ||
@@ -92,6 +92,12 @@ extern const char *uwb_rc_strerror(unsigned code); | |||
92 | 92 | ||
93 | struct uwb_rc_neh; | 93 | struct uwb_rc_neh; |
94 | 94 | ||
95 | extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, | ||
96 | struct uwb_rccb *cmd, size_t cmd_size, | ||
97 | u8 expected_type, u16 expected_event, | ||
98 | uwb_rc_cmd_cb_f cb, void *arg); | ||
99 | |||
100 | |||
95 | void uwb_rc_neh_create(struct uwb_rc *rc); | 101 | void uwb_rc_neh_create(struct uwb_rc *rc); |
96 | void uwb_rc_neh_destroy(struct uwb_rc *rc); | 102 | void uwb_rc_neh_destroy(struct uwb_rc *rc); |
97 | 103 | ||
@@ -106,7 +112,69 @@ void uwb_rc_neh_put(struct uwb_rc_neh *neh); | |||
106 | extern int uwb_est_create(void); | 112 | extern int uwb_est_create(void); |
107 | extern void uwb_est_destroy(void); | 113 | extern void uwb_est_destroy(void); |
108 | 114 | ||
115 | /* | ||
116 | * UWB conflicting alien reservations | ||
117 | */ | ||
118 | struct uwb_cnflt_alien { | ||
119 | struct uwb_rc *rc; | ||
120 | struct list_head rc_node; | ||
121 | struct uwb_mas_bm mas; | ||
122 | struct timer_list timer; | ||
123 | struct work_struct cnflt_update_work; | ||
124 | }; | ||
125 | |||
126 | enum uwb_uwb_rsv_alloc_result { | ||
127 | UWB_RSV_ALLOC_FOUND = 0, | ||
128 | UWB_RSV_ALLOC_NOT_FOUND, | ||
129 | }; | ||
130 | |||
131 | enum uwb_rsv_mas_status { | ||
132 | UWB_RSV_MAS_NOT_AVAIL = 1, | ||
133 | UWB_RSV_MAS_SAFE, | ||
134 | UWB_RSV_MAS_UNSAFE, | ||
135 | }; | ||
136 | |||
137 | struct uwb_rsv_col_set_info { | ||
138 | unsigned char start_col; | ||
139 | unsigned char interval; | ||
140 | unsigned char safe_mas_per_col; | ||
141 | unsigned char unsafe_mas_per_col; | ||
142 | }; | ||
143 | |||
144 | struct uwb_rsv_col_info { | ||
145 | unsigned char max_avail_safe; | ||
146 | unsigned char max_avail_unsafe; | ||
147 | unsigned char highest_mas[UWB_MAS_PER_ZONE]; | ||
148 | struct uwb_rsv_col_set_info csi; | ||
149 | }; | ||
150 | |||
151 | struct uwb_rsv_row_info { | ||
152 | unsigned char avail[UWB_MAS_PER_ZONE]; | ||
153 | unsigned char free_rows; | ||
154 | unsigned char used_rows; | ||
155 | }; | ||
156 | |||
157 | /* | ||
158 | * UWB find allocation | ||
159 | */ | ||
160 | struct uwb_rsv_alloc_info { | ||
161 | unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES]; | ||
162 | struct uwb_rsv_col_info ci[UWB_NUM_ZONES]; | ||
163 | struct uwb_rsv_row_info ri; | ||
164 | struct uwb_mas_bm *not_available; | ||
165 | struct uwb_mas_bm *result; | ||
166 | int min_mas; | ||
167 | int max_mas; | ||
168 | int max_interval; | ||
169 | int total_allocated_mases; | ||
170 | int safe_allocated_mases; | ||
171 | int unsafe_allocated_mases; | ||
172 | int interval; | ||
173 | }; | ||
109 | 174 | ||
175 | int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, | ||
176 | struct uwb_mas_bm *result); | ||
177 | void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc); | ||
110 | /* | 178 | /* |
111 | * UWB Events & management daemon | 179 | * UWB Events & management daemon |
112 | */ | 180 | */ |
@@ -160,13 +228,14 @@ struct uwb_event { | |||
160 | }; | 228 | }; |
161 | }; | 229 | }; |
162 | 230 | ||
163 | extern void uwbd_start(void); | 231 | extern void uwbd_start(struct uwb_rc *rc); |
164 | extern void uwbd_stop(void); | 232 | extern void uwbd_stop(struct uwb_rc *rc); |
165 | extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); | 233 | extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); |
166 | extern void uwbd_event_queue(struct uwb_event *); | 234 | extern void uwbd_event_queue(struct uwb_event *); |
167 | void uwbd_flush(struct uwb_rc *rc); | 235 | void uwbd_flush(struct uwb_rc *rc); |
168 | 236 | ||
169 | /* UWB event handlers */ | 237 | /* UWB event handlers */ |
238 | extern int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *); | ||
170 | extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); | 239 | extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); |
171 | extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); | 240 | extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); |
172 | extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); | 241 | extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); |
@@ -193,15 +262,6 @@ int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt); | |||
193 | 262 | ||
194 | extern unsigned long beacon_timeout_ms; | 263 | extern unsigned long beacon_timeout_ms; |
195 | 264 | ||
196 | /** Beacon cache list */ | ||
197 | struct uwb_beca { | ||
198 | struct list_head list; | ||
199 | size_t entries; | ||
200 | struct mutex mutex; | ||
201 | }; | ||
202 | |||
203 | extern struct uwb_beca uwb_beca; | ||
204 | |||
205 | /** | 265 | /** |
206 | * Beacon cache entry | 266 | * Beacon cache entry |
207 | * | 267 | * |
@@ -228,9 +288,6 @@ struct uwb_beca_e { | |||
228 | struct uwb_beacon_frame; | 288 | struct uwb_beacon_frame; |
229 | extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, | 289 | extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, |
230 | char *, size_t); | 290 | char *, size_t); |
231 | extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *, | ||
232 | struct uwb_beacon_frame *, | ||
233 | unsigned long); | ||
234 | 291 | ||
235 | extern void uwb_bce_kfree(struct kref *_bce); | 292 | extern void uwb_bce_kfree(struct kref *_bce); |
236 | static inline void uwb_bce_get(struct uwb_beca_e *bce) | 293 | static inline void uwb_bce_get(struct uwb_beca_e *bce) |
@@ -241,14 +298,19 @@ static inline void uwb_bce_put(struct uwb_beca_e *bce) | |||
241 | { | 298 | { |
242 | kref_put(&bce->refcnt, uwb_bce_kfree); | 299 | kref_put(&bce->refcnt, uwb_bce_kfree); |
243 | } | 300 | } |
244 | extern void uwb_beca_purge(void); | 301 | extern void uwb_beca_purge(struct uwb_rc *rc); |
245 | extern void uwb_beca_release(void); | 302 | extern void uwb_beca_release(struct uwb_rc *rc); |
246 | 303 | ||
247 | struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, | 304 | struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, |
248 | const struct uwb_dev_addr *devaddr); | 305 | const struct uwb_dev_addr *devaddr); |
249 | struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, | 306 | struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, |
250 | const struct uwb_mac_addr *macaddr); | 307 | const struct uwb_mac_addr *macaddr); |
251 | 308 | ||
309 | int uwb_radio_setup(struct uwb_rc *rc); | ||
310 | void uwb_radio_reset_state(struct uwb_rc *rc); | ||
311 | void uwb_radio_shutdown(struct uwb_rc *rc); | ||
312 | int uwb_radio_force_channel(struct uwb_rc *rc, int channel); | ||
313 | |||
252 | /* -- UWB Sysfs representation */ | 314 | /* -- UWB Sysfs representation */ |
253 | extern struct class uwb_rc_class; | 315 | extern struct class uwb_rc_class; |
254 | extern struct device_attribute dev_attr_mac_address; | 316 | extern struct device_attribute dev_attr_mac_address; |
@@ -259,18 +321,29 @@ extern struct device_attribute dev_attr_scan; | |||
259 | void uwb_rsv_init(struct uwb_rc *rc); | 321 | void uwb_rsv_init(struct uwb_rc *rc); |
260 | int uwb_rsv_setup(struct uwb_rc *rc); | 322 | int uwb_rsv_setup(struct uwb_rc *rc); |
261 | void uwb_rsv_cleanup(struct uwb_rc *rc); | 323 | void uwb_rsv_cleanup(struct uwb_rc *rc); |
324 | void uwb_rsv_remove_all(struct uwb_rc *rc); | ||
325 | void uwb_rsv_get(struct uwb_rsv *rsv); | ||
326 | void uwb_rsv_put(struct uwb_rsv *rsv); | ||
327 | bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv); | ||
328 | void uwb_rsv_dump(char *text, struct uwb_rsv *rsv); | ||
329 | int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available); | ||
330 | void uwb_rsv_backoff_win_timer(unsigned long arg); | ||
331 | void uwb_rsv_backoff_win_increment(struct uwb_rc *rc); | ||
332 | int uwb_rsv_status(struct uwb_rsv *rsv); | ||
333 | int uwb_rsv_companion_status(struct uwb_rsv *rsv); | ||
262 | 334 | ||
263 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); | 335 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); |
264 | void uwb_rsv_remove(struct uwb_rsv *rsv); | 336 | void uwb_rsv_remove(struct uwb_rsv *rsv); |
265 | struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, | 337 | struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, |
266 | struct uwb_ie_drp *drp_ie); | 338 | struct uwb_ie_drp *drp_ie); |
267 | void uwb_rsv_sched_update(struct uwb_rc *rc); | 339 | void uwb_rsv_sched_update(struct uwb_rc *rc); |
340 | void uwb_rsv_queue_update(struct uwb_rc *rc); | ||
268 | 341 | ||
269 | void uwb_drp_handle_timeout(struct uwb_rsv *rsv); | ||
270 | int uwb_drp_ie_update(struct uwb_rsv *rsv); | 342 | int uwb_drp_ie_update(struct uwb_rsv *rsv); |
271 | void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); | 343 | void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); |
272 | 344 | ||
273 | void uwb_drp_avail_init(struct uwb_rc *rc); | 345 | void uwb_drp_avail_init(struct uwb_rc *rc); |
346 | void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail); | ||
274 | int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); | 347 | int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); |
275 | void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); | 348 | void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); |
276 | void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); | 349 | void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); |
@@ -289,8 +362,7 @@ void uwb_dbg_init(void); | |||
289 | void uwb_dbg_exit(void); | 362 | void uwb_dbg_exit(void); |
290 | void uwb_dbg_add_rc(struct uwb_rc *rc); | 363 | void uwb_dbg_add_rc(struct uwb_rc *rc); |
291 | void uwb_dbg_del_rc(struct uwb_rc *rc); | 364 | void uwb_dbg_del_rc(struct uwb_rc *rc); |
292 | 365 | struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal); | |
293 | /* Workarounds for version specific stuff */ | ||
294 | 366 | ||
295 | static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) | 367 | static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) |
296 | { | 368 | { |
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c index 78908416e42c..57bd6bfef37e 100644 --- a/drivers/uwb/uwbd.c +++ b/drivers/uwb/uwbd.c | |||
@@ -68,17 +68,13 @@ | |||
68 | * | 68 | * |
69 | * Handler functions are called normally uwbd_evt_handle_*(). | 69 | * Handler functions are called normally uwbd_evt_handle_*(). |
70 | */ | 70 | */ |
71 | |||
72 | #include <linux/kthread.h> | 71 | #include <linux/kthread.h> |
73 | #include <linux/module.h> | 72 | #include <linux/module.h> |
74 | #include <linux/freezer.h> | 73 | #include <linux/freezer.h> |
75 | #include "uwb-internal.h" | ||
76 | |||
77 | #define D_LOCAL 1 | ||
78 | #include <linux/uwb/debug.h> | ||
79 | 74 | ||
75 | #include "uwb-internal.h" | ||
80 | 76 | ||
81 | /** | 77 | /* |
82 | * UWBD Event handler function signature | 78 | * UWBD Event handler function signature |
83 | * | 79 | * |
84 | * Return !0 if the event needs not to be freed (ie the handler | 80 | * Return !0 if the event needs not to be freed (ie the handler |
@@ -101,9 +97,12 @@ struct uwbd_event { | |||
101 | const char *name; | 97 | const char *name; |
102 | }; | 98 | }; |
103 | 99 | ||
104 | /** Table of handlers for and properties of the UWBD Radio Control Events */ | 100 | /* Table of handlers for and properties of the UWBD Radio Control Events */ |
105 | static | 101 | static struct uwbd_event uwbd_urc_events[] = { |
106 | struct uwbd_event uwbd_events[] = { | 102 | [UWB_RC_EVT_IE_RCV] = { |
103 | .handler = uwbd_evt_handle_rc_ie_rcv, | ||
104 | .name = "IE_RECEIVED" | ||
105 | }, | ||
107 | [UWB_RC_EVT_BEACON] = { | 106 | [UWB_RC_EVT_BEACON] = { |
108 | .handler = uwbd_evt_handle_rc_beacon, | 107 | .handler = uwbd_evt_handle_rc_beacon, |
109 | .name = "BEACON_RECEIVED" | 108 | .name = "BEACON_RECEIVED" |
@@ -142,23 +141,15 @@ struct uwbd_evt_type_handler { | |||
142 | size_t size; | 141 | size_t size; |
143 | }; | 142 | }; |
144 | 143 | ||
145 | #define UWBD_EVT_TYPE_HANDLER(n,a) { \ | 144 | /* Table of handlers for each UWBD Event type. */ |
146 | .name = (n), \ | 145 | static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = { |
147 | .uwbd_events = (a), \ | 146 | [UWB_RC_CET_GENERAL] = { |
148 | .size = sizeof(a)/sizeof((a)[0]) \ | 147 | .name = "URC", |
149 | } | 148 | .uwbd_events = uwbd_urc_events, |
150 | 149 | .size = ARRAY_SIZE(uwbd_urc_events), | |
151 | 150 | }, | |
152 | /** Table of handlers for each UWBD Event type. */ | ||
153 | static | ||
154 | struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = { | ||
155 | [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events) | ||
156 | }; | 151 | }; |
157 | 152 | ||
158 | static const | ||
159 | size_t uwbd_evt_type_handlers_len = | ||
160 | sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]); | ||
161 | |||
162 | static const struct uwbd_event uwbd_message_handlers[] = { | 153 | static const struct uwbd_event uwbd_message_handlers[] = { |
163 | [UWB_EVT_MSG_RESET] = { | 154 | [UWB_EVT_MSG_RESET] = { |
164 | .handler = uwbd_msg_handle_reset, | 155 | .handler = uwbd_msg_handle_reset, |
@@ -166,9 +157,7 @@ static const struct uwbd_event uwbd_message_handlers[] = { | |||
166 | }, | 157 | }, |
167 | }; | 158 | }; |
168 | 159 | ||
169 | static DEFINE_MUTEX(uwbd_event_mutex); | 160 | /* |
170 | |||
171 | /** | ||
172 | * Handle an URC event passed to the UWB Daemon | 161 | * Handle an URC event passed to the UWB Daemon |
173 | * | 162 | * |
174 | * @evt: the event to handle | 163 | * @evt: the event to handle |
@@ -188,6 +177,7 @@ static DEFINE_MUTEX(uwbd_event_mutex); | |||
188 | static | 177 | static |
189 | int uwbd_event_handle_urc(struct uwb_event *evt) | 178 | int uwbd_event_handle_urc(struct uwb_event *evt) |
190 | { | 179 | { |
180 | int result = -EINVAL; | ||
191 | struct uwbd_evt_type_handler *type_table; | 181 | struct uwbd_evt_type_handler *type_table; |
192 | uwbd_evt_handler_f handler; | 182 | uwbd_evt_handler_f handler; |
193 | u8 type, context; | 183 | u8 type, context; |
@@ -197,26 +187,24 @@ int uwbd_event_handle_urc(struct uwb_event *evt) | |||
197 | event = le16_to_cpu(evt->notif.rceb->wEvent); | 187 | event = le16_to_cpu(evt->notif.rceb->wEvent); |
198 | context = evt->notif.rceb->bEventContext; | 188 | context = evt->notif.rceb->bEventContext; |
199 | 189 | ||
200 | if (type > uwbd_evt_type_handlers_len) { | 190 | if (type > ARRAY_SIZE(uwbd_urc_evt_type_handlers)) |
201 | printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type); | 191 | goto out; |
202 | return -EINVAL; | 192 | type_table = &uwbd_urc_evt_type_handlers[type]; |
203 | } | 193 | if (type_table->uwbd_events == NULL) |
204 | type_table = &uwbd_evt_type_handlers[type]; | 194 | goto out; |
205 | if (type_table->uwbd_events == NULL) { | 195 | if (event > type_table->size) |
206 | printk(KERN_ERR "UWBD: event type %u: unknown\n", type); | 196 | goto out; |
207 | return -EINVAL; | ||
208 | } | ||
209 | if (event > type_table->size) { | ||
210 | printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n", | ||
211 | type_table->name, event); | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | handler = type_table->uwbd_events[event].handler; | 197 | handler = type_table->uwbd_events[event].handler; |
215 | if (handler == NULL) { | 198 | if (handler == NULL) |
216 | printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event); | 199 | goto out; |
217 | return -EINVAL; | 200 | |
218 | } | 201 | result = (*handler)(evt); |
219 | return (*handler)(evt); | 202 | out: |
203 | if (result < 0) | ||
204 | dev_err(&evt->rc->uwb_dev.dev, | ||
205 | "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", | ||
206 | type, event, context, result); | ||
207 | return result; | ||
220 | } | 208 | } |
221 | 209 | ||
222 | static void uwbd_event_handle_message(struct uwb_event *evt) | 210 | static void uwbd_event_handle_message(struct uwb_event *evt) |
@@ -231,19 +219,10 @@ static void uwbd_event_handle_message(struct uwb_event *evt) | |||
231 | return; | 219 | return; |
232 | } | 220 | } |
233 | 221 | ||
234 | /* If this is a reset event we need to drop the | ||
235 | * uwbd_event_mutex or it deadlocks when the reset handler | ||
236 | * attempts to flush the uwbd events. */ | ||
237 | if (evt->message == UWB_EVT_MSG_RESET) | ||
238 | mutex_unlock(&uwbd_event_mutex); | ||
239 | |||
240 | result = uwbd_message_handlers[evt->message].handler(evt); | 222 | result = uwbd_message_handlers[evt->message].handler(evt); |
241 | if (result < 0) | 223 | if (result < 0) |
242 | dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", | 224 | dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", |
243 | uwbd_message_handlers[evt->message].name, result); | 225 | uwbd_message_handlers[evt->message].name, result); |
244 | |||
245 | if (evt->message == UWB_EVT_MSG_RESET) | ||
246 | mutex_lock(&uwbd_event_mutex); | ||
247 | } | 226 | } |
248 | 227 | ||
249 | static void uwbd_event_handle(struct uwb_event *evt) | 228 | static void uwbd_event_handle(struct uwb_event *evt) |
@@ -271,20 +250,6 @@ static void uwbd_event_handle(struct uwb_event *evt) | |||
271 | 250 | ||
272 | __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ | 251 | __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ |
273 | } | 252 | } |
274 | /* The UWB Daemon */ | ||
275 | |||
276 | |||
277 | /** Daemon's PID: used to decide if we can queue or not */ | ||
278 | static int uwbd_pid; | ||
279 | /** Daemon's task struct for managing the kthread */ | ||
280 | static struct task_struct *uwbd_task; | ||
281 | /** Daemon's waitqueue for waiting for new events */ | ||
282 | static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq); | ||
283 | /** Daemon's list of events; we queue/dequeue here */ | ||
284 | static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list); | ||
285 | /** Daemon's list lock to protect concurent access */ | ||
286 | static DEFINE_SPINLOCK(uwbd_event_list_lock); | ||
287 | |||
288 | 253 | ||
289 | /** | 254 | /** |
290 | * UWB Daemon | 255 | * UWB Daemon |
@@ -298,65 +263,58 @@ static DEFINE_SPINLOCK(uwbd_event_list_lock); | |||
298 | * FIXME: should change so we don't have a 1HZ timer all the time, but | 263 | * FIXME: should change so we don't have a 1HZ timer all the time, but |
299 | * only if there are devices. | 264 | * only if there are devices. |
300 | */ | 265 | */ |
301 | static int uwbd(void *unused) | 266 | static int uwbd(void *param) |
302 | { | 267 | { |
268 | struct uwb_rc *rc = param; | ||
303 | unsigned long flags; | 269 | unsigned long flags; |
304 | struct list_head list = LIST_HEAD_INIT(list); | 270 | struct uwb_event *evt; |
305 | struct uwb_event *evt, *nxt; | ||
306 | int should_stop = 0; | 271 | int should_stop = 0; |
272 | |||
307 | while (1) { | 273 | while (1) { |
308 | wait_event_interruptible_timeout( | 274 | wait_event_interruptible_timeout( |
309 | uwbd_wq, | 275 | rc->uwbd.wq, |
310 | !list_empty(&uwbd_event_list) | 276 | !list_empty(&rc->uwbd.event_list) |
311 | || (should_stop = kthread_should_stop()), | 277 | || (should_stop = kthread_should_stop()), |
312 | HZ); | 278 | HZ); |
313 | if (should_stop) | 279 | if (should_stop) |
314 | break; | 280 | break; |
315 | try_to_freeze(); | 281 | try_to_freeze(); |
316 | 282 | ||
317 | mutex_lock(&uwbd_event_mutex); | 283 | spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); |
318 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | 284 | if (!list_empty(&rc->uwbd.event_list)) { |
319 | list_splice_init(&uwbd_event_list, &list); | 285 | evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node); |
320 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | ||
321 | list_for_each_entry_safe(evt, nxt, &list, list_node) { | ||
322 | list_del(&evt->list_node); | 286 | list_del(&evt->list_node); |
287 | } else | ||
288 | evt = NULL; | ||
289 | spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); | ||
290 | |||
291 | if (evt) { | ||
323 | uwbd_event_handle(evt); | 292 | uwbd_event_handle(evt); |
324 | kfree(evt); | 293 | kfree(evt); |
325 | } | 294 | } |
326 | mutex_unlock(&uwbd_event_mutex); | ||
327 | 295 | ||
328 | uwb_beca_purge(); /* Purge devices that left */ | 296 | uwb_beca_purge(rc); /* Purge devices that left */ |
329 | } | 297 | } |
330 | return 0; | 298 | return 0; |
331 | } | 299 | } |
332 | 300 | ||
333 | 301 | ||
334 | /** Start the UWB daemon */ | 302 | /** Start the UWB daemon */ |
335 | void uwbd_start(void) | 303 | void uwbd_start(struct uwb_rc *rc) |
336 | { | 304 | { |
337 | uwbd_task = kthread_run(uwbd, NULL, "uwbd"); | 305 | rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); |
338 | if (uwbd_task == NULL) | 306 | if (rc->uwbd.task == NULL) |
339 | printk(KERN_ERR "UWB: Cannot start management daemon; " | 307 | printk(KERN_ERR "UWB: Cannot start management daemon; " |
340 | "UWB won't work\n"); | 308 | "UWB won't work\n"); |
341 | else | 309 | else |
342 | uwbd_pid = uwbd_task->pid; | 310 | rc->uwbd.pid = rc->uwbd.task->pid; |
343 | } | 311 | } |
344 | 312 | ||
345 | /* Stop the UWB daemon and free any unprocessed events */ | 313 | /* Stop the UWB daemon and free any unprocessed events */ |
346 | void uwbd_stop(void) | 314 | void uwbd_stop(struct uwb_rc *rc) |
347 | { | 315 | { |
348 | unsigned long flags; | 316 | kthread_stop(rc->uwbd.task); |
349 | struct uwb_event *evt, *nxt; | 317 | uwbd_flush(rc); |
350 | kthread_stop(uwbd_task); | ||
351 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | ||
352 | uwbd_pid = 0; | ||
353 | list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { | ||
354 | if (evt->type == UWB_EVT_TYPE_NOTIF) | ||
355 | kfree(evt->notif.rceb); | ||
356 | kfree(evt); | ||
357 | } | ||
358 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | ||
359 | uwb_beca_release(); | ||
360 | } | 318 | } |
361 | 319 | ||
362 | /* | 320 | /* |
@@ -373,18 +331,20 @@ void uwbd_stop(void) | |||
373 | */ | 331 | */ |
374 | void uwbd_event_queue(struct uwb_event *evt) | 332 | void uwbd_event_queue(struct uwb_event *evt) |
375 | { | 333 | { |
334 | struct uwb_rc *rc = evt->rc; | ||
376 | unsigned long flags; | 335 | unsigned long flags; |
377 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | 336 | |
378 | if (uwbd_pid != 0) { | 337 | spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); |
379 | list_add(&evt->list_node, &uwbd_event_list); | 338 | if (rc->uwbd.pid != 0) { |
380 | wake_up_all(&uwbd_wq); | 339 | list_add(&evt->list_node, &rc->uwbd.event_list); |
340 | wake_up_all(&rc->uwbd.wq); | ||
381 | } else { | 341 | } else { |
382 | __uwb_rc_put(evt->rc); | 342 | __uwb_rc_put(evt->rc); |
383 | if (evt->type == UWB_EVT_TYPE_NOTIF) | 343 | if (evt->type == UWB_EVT_TYPE_NOTIF) |
384 | kfree(evt->notif.rceb); | 344 | kfree(evt->notif.rceb); |
385 | kfree(evt); | 345 | kfree(evt); |
386 | } | 346 | } |
387 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | 347 | spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); |
388 | return; | 348 | return; |
389 | } | 349 | } |
390 | 350 | ||
@@ -392,10 +352,8 @@ void uwbd_flush(struct uwb_rc *rc) | |||
392 | { | 352 | { |
393 | struct uwb_event *evt, *nxt; | 353 | struct uwb_event *evt, *nxt; |
394 | 354 | ||
395 | mutex_lock(&uwbd_event_mutex); | 355 | spin_lock_irq(&rc->uwbd.event_list_lock); |
396 | 356 | list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) { | |
397 | spin_lock_irq(&uwbd_event_list_lock); | ||
398 | list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { | ||
399 | if (evt->rc == rc) { | 357 | if (evt->rc == rc) { |
400 | __uwb_rc_put(rc); | 358 | __uwb_rc_put(rc); |
401 | list_del(&evt->list_node); | 359 | list_del(&evt->list_node); |
@@ -404,7 +362,5 @@ void uwbd_flush(struct uwb_rc *rc) | |||
404 | kfree(evt); | 362 | kfree(evt); |
405 | } | 363 | } |
406 | } | 364 | } |
407 | spin_unlock_irq(&uwbd_event_list_lock); | 365 | spin_unlock_irq(&rc->uwbd.event_list_lock); |
408 | |||
409 | mutex_unlock(&uwbd_event_mutex); | ||
410 | } | 366 | } |
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c index 1711deadb114..19a1dd129212 100644 --- a/drivers/uwb/whc-rc.c +++ b/drivers/uwb/whc-rc.c | |||
@@ -39,7 +39,6 @@ | |||
39 | * them to the hw and transfer the replies/notifications back to the | 39 | * them to the hw and transfer the replies/notifications back to the |
40 | * UWB stack through the UWB daemon (UWBD). | 40 | * UWB stack through the UWB daemon (UWBD). |
41 | */ | 41 | */ |
42 | #include <linux/version.h> | ||
43 | #include <linux/init.h> | 42 | #include <linux/init.h> |
44 | #include <linux/module.h> | 43 | #include <linux/module.h> |
45 | #include <linux/pci.h> | 44 | #include <linux/pci.h> |
@@ -49,10 +48,8 @@ | |||
49 | #include <linux/uwb.h> | 48 | #include <linux/uwb.h> |
50 | #include <linux/uwb/whci.h> | 49 | #include <linux/uwb/whci.h> |
51 | #include <linux/uwb/umc.h> | 50 | #include <linux/uwb/umc.h> |
52 | #include "uwb-internal.h" | ||
53 | 51 | ||
54 | #define D_LOCAL 0 | 52 | #include "uwb-internal.h" |
55 | #include <linux/uwb/debug.h> | ||
56 | 53 | ||
57 | /** | 54 | /** |
58 | * Descriptor for an instance of the UWB Radio Control Driver that | 55 | * Descriptor for an instance of the UWB Radio Control Driver that |
@@ -98,13 +95,8 @@ static int whcrc_cmd(struct uwb_rc *uwb_rc, | |||
98 | struct device *dev = &whcrc->umc_dev->dev; | 95 | struct device *dev = &whcrc->umc_dev->dev; |
99 | u32 urccmd; | 96 | u32 urccmd; |
100 | 97 | ||
101 | d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size); | 98 | if (cmd_size >= 4096) |
102 | might_sleep(); | 99 | return -EINVAL; |
103 | |||
104 | if (cmd_size >= 4096) { | ||
105 | result = -E2BIG; | ||
106 | goto error; | ||
107 | } | ||
108 | 100 | ||
109 | /* | 101 | /* |
110 | * If the URC is halted, then the hardware has reset itself. | 102 | * If the URC is halted, then the hardware has reset itself. |
@@ -115,16 +107,14 @@ static int whcrc_cmd(struct uwb_rc *uwb_rc, | |||
115 | if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { | 107 | if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { |
116 | dev_err(dev, "requesting reset of halted radio controller\n"); | 108 | dev_err(dev, "requesting reset of halted radio controller\n"); |
117 | uwb_rc_reset_all(uwb_rc); | 109 | uwb_rc_reset_all(uwb_rc); |
118 | result = -EIO; | 110 | return -EIO; |
119 | goto error; | ||
120 | } | 111 | } |
121 | 112 | ||
122 | result = wait_event_timeout(whcrc->cmd_wq, | 113 | result = wait_event_timeout(whcrc->cmd_wq, |
123 | !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); | 114 | !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); |
124 | if (result == 0) { | 115 | if (result == 0) { |
125 | dev_err(dev, "device is not ready to execute commands\n"); | 116 | dev_err(dev, "device is not ready to execute commands\n"); |
126 | result = -ETIMEDOUT; | 117 | return -ETIMEDOUT; |
127 | goto error; | ||
128 | } | 118 | } |
129 | 119 | ||
130 | memmove(whcrc->cmd_buf, cmd, cmd_size); | 120 | memmove(whcrc->cmd_buf, cmd, cmd_size); |
@@ -137,10 +127,7 @@ static int whcrc_cmd(struct uwb_rc *uwb_rc, | |||
137 | whcrc->rc_base + URCCMD); | 127 | whcrc->rc_base + URCCMD); |
138 | spin_unlock(&whcrc->irq_lock); | 128 | spin_unlock(&whcrc->irq_lock); |
139 | 129 | ||
140 | error: | 130 | return 0; |
141 | d_fnend(3, dev, "(%p, %p, %zu) = %d\n", | ||
142 | uwb_rc, cmd, cmd_size, result); | ||
143 | return result; | ||
144 | } | 131 | } |
145 | 132 | ||
146 | static int whcrc_reset(struct uwb_rc *rc) | 133 | static int whcrc_reset(struct uwb_rc *rc) |
@@ -167,34 +154,25 @@ static int whcrc_reset(struct uwb_rc *rc) | |||
167 | static | 154 | static |
168 | void whcrc_enable_events(struct whcrc *whcrc) | 155 | void whcrc_enable_events(struct whcrc *whcrc) |
169 | { | 156 | { |
170 | struct device *dev = &whcrc->umc_dev->dev; | ||
171 | u32 urccmd; | 157 | u32 urccmd; |
172 | 158 | ||
173 | d_fnstart(4, dev, "(whcrc %p)\n", whcrc); | ||
174 | |||
175 | le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); | 159 | le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); |
176 | 160 | ||
177 | spin_lock(&whcrc->irq_lock); | 161 | spin_lock(&whcrc->irq_lock); |
178 | urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; | 162 | urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; |
179 | le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); | 163 | le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); |
180 | spin_unlock(&whcrc->irq_lock); | 164 | spin_unlock(&whcrc->irq_lock); |
181 | |||
182 | d_fnend(4, dev, "(whcrc %p) = void\n", whcrc); | ||
183 | } | 165 | } |
184 | 166 | ||
185 | static void whcrc_event_work(struct work_struct *work) | 167 | static void whcrc_event_work(struct work_struct *work) |
186 | { | 168 | { |
187 | struct whcrc *whcrc = container_of(work, struct whcrc, event_work); | 169 | struct whcrc *whcrc = container_of(work, struct whcrc, event_work); |
188 | struct device *dev = &whcrc->umc_dev->dev; | ||
189 | size_t size; | 170 | size_t size; |
190 | u64 urcevtaddr; | 171 | u64 urcevtaddr; |
191 | 172 | ||
192 | urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); | 173 | urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); |
193 | size = urcevtaddr & URCEVTADDR_OFFSET_MASK; | 174 | size = urcevtaddr & URCEVTADDR_OFFSET_MASK; |
194 | 175 | ||
195 | d_printf(3, dev, "received %zu octet event\n", size); | ||
196 | d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size); | ||
197 | |||
198 | uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); | 176 | uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); |
199 | whcrc_enable_events(whcrc); | 177 | whcrc_enable_events(whcrc); |
200 | } | 178 | } |
@@ -217,22 +195,15 @@ irqreturn_t whcrc_irq_cb(int irq, void *_whcrc) | |||
217 | return IRQ_NONE; | 195 | return IRQ_NONE; |
218 | le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); | 196 | le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); |
219 | 197 | ||
220 | d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n", | ||
221 | le_readl(whcrc->rc_base + URCSTS), urcsts); | ||
222 | |||
223 | if (urcsts & URCSTS_HSE) { | 198 | if (urcsts & URCSTS_HSE) { |
224 | dev_err(dev, "host system error -- hardware halted\n"); | 199 | dev_err(dev, "host system error -- hardware halted\n"); |
225 | /* FIXME: do something sensible here */ | 200 | /* FIXME: do something sensible here */ |
226 | goto out; | 201 | goto out; |
227 | } | 202 | } |
228 | if (urcsts & URCSTS_ER) { | 203 | if (urcsts & URCSTS_ER) |
229 | d_printf(3, dev, "ER: event ready\n"); | ||
230 | schedule_work(&whcrc->event_work); | 204 | schedule_work(&whcrc->event_work); |
231 | } | 205 | if (urcsts & URCSTS_RCI) |
232 | if (urcsts & URCSTS_RCI) { | ||
233 | d_printf(3, dev, "RCI: ready to execute another command\n"); | ||
234 | wake_up_all(&whcrc->cmd_wq); | 206 | wake_up_all(&whcrc->cmd_wq); |
235 | } | ||
236 | out: | 207 | out: |
237 | return IRQ_HANDLED; | 208 | return IRQ_HANDLED; |
238 | } | 209 | } |
@@ -251,8 +222,7 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc) | |||
251 | whcrc->area = umc_dev->resource.start; | 222 | whcrc->area = umc_dev->resource.start; |
252 | whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; | 223 | whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; |
253 | result = -EBUSY; | 224 | result = -EBUSY; |
254 | if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) | 225 | if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) { |
255 | == NULL) { | ||
256 | dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", | 226 | dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", |
257 | whcrc->rc_len, whcrc->area, result); | 227 | whcrc->rc_len, whcrc->area, result); |
258 | goto error_request_region; | 228 | goto error_request_region; |
@@ -287,8 +257,6 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc) | |||
287 | dev_err(dev, "Can't allocate evt transfer buffer\n"); | 257 | dev_err(dev, "Can't allocate evt transfer buffer\n"); |
288 | goto error_evt_buffer; | 258 | goto error_evt_buffer; |
289 | } | 259 | } |
290 | d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n", | ||
291 | whcrc->rc_len, whcrc->rc_base, umc_dev->irq); | ||
292 | return 0; | 260 | return 0; |
293 | 261 | ||
294 | error_evt_buffer: | 262 | error_evt_buffer: |
@@ -333,47 +301,23 @@ void whcrc_release_rc_umc(struct whcrc *whcrc) | |||
333 | static int whcrc_start_rc(struct uwb_rc *rc) | 301 | static int whcrc_start_rc(struct uwb_rc *rc) |
334 | { | 302 | { |
335 | struct whcrc *whcrc = rc->priv; | 303 | struct whcrc *whcrc = rc->priv; |
336 | int result = 0; | ||
337 | struct device *dev = &whcrc->umc_dev->dev; | 304 | struct device *dev = &whcrc->umc_dev->dev; |
338 | unsigned long start, duration; | ||
339 | 305 | ||
340 | /* Reset the thing */ | 306 | /* Reset the thing */ |
341 | le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); | 307 | le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); |
342 | if (d_test(3)) | ||
343 | start = jiffies; | ||
344 | if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, | 308 | if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, |
345 | 5000, "device to reset at init") < 0) { | 309 | 5000, "hardware reset") < 0) |
346 | result = -EBUSY; | 310 | return -EBUSY; |
347 | goto error; | ||
348 | } else if (d_test(3)) { | ||
349 | duration = jiffies - start; | ||
350 | if (duration > msecs_to_jiffies(40)) | ||
351 | dev_err(dev, "Device took %ums to " | ||
352 | "reset. MAX expected: 40ms\n", | ||
353 | jiffies_to_msecs(duration)); | ||
354 | } | ||
355 | 311 | ||
356 | /* Set the event buffer, start the controller (enable IRQs later) */ | 312 | /* Set the event buffer, start the controller (enable IRQs later) */ |
357 | le_writel(0, whcrc->rc_base + URCINTR); | 313 | le_writel(0, whcrc->rc_base + URCINTR); |
358 | le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); | 314 | le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); |
359 | result = -ETIMEDOUT; | ||
360 | if (d_test(3)) | ||
361 | start = jiffies; | ||
362 | if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, | 315 | if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, |
363 | 5000, "device to start") < 0) | 316 | 5000, "radio controller start") < 0) |
364 | goto error; | 317 | return -ETIMEDOUT; |
365 | if (d_test(3)) { | ||
366 | duration = jiffies - start; | ||
367 | if (duration > msecs_to_jiffies(40)) | ||
368 | dev_err(dev, "Device took %ums to start. " | ||
369 | "MAX expected: 40ms\n", | ||
370 | jiffies_to_msecs(duration)); | ||
371 | } | ||
372 | whcrc_enable_events(whcrc); | 318 | whcrc_enable_events(whcrc); |
373 | result = 0; | ||
374 | le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); | 319 | le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); |
375 | error: | 320 | return 0; |
376 | return result; | ||
377 | } | 321 | } |
378 | 322 | ||
379 | 323 | ||
@@ -395,7 +339,7 @@ void whcrc_stop_rc(struct uwb_rc *rc) | |||
395 | 339 | ||
396 | le_writel(0, whcrc->rc_base + URCCMD); | 340 | le_writel(0, whcrc->rc_base + URCCMD); |
397 | whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, | 341 | whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, |
398 | URCSTS_HALTED, 0, 40, "URCSTS.HALTED"); | 342 | URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop"); |
399 | } | 343 | } |
400 | 344 | ||
401 | static void whcrc_init(struct whcrc *whcrc) | 345 | static void whcrc_init(struct whcrc *whcrc) |
@@ -421,7 +365,6 @@ int whcrc_probe(struct umc_dev *umc_dev) | |||
421 | struct whcrc *whcrc; | 365 | struct whcrc *whcrc; |
422 | struct device *dev = &umc_dev->dev; | 366 | struct device *dev = &umc_dev->dev; |
423 | 367 | ||
424 | d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev); | ||
425 | result = -ENOMEM; | 368 | result = -ENOMEM; |
426 | uwb_rc = uwb_rc_alloc(); | 369 | uwb_rc = uwb_rc_alloc(); |
427 | if (uwb_rc == NULL) { | 370 | if (uwb_rc == NULL) { |
@@ -453,7 +396,6 @@ int whcrc_probe(struct umc_dev *umc_dev) | |||
453 | if (result < 0) | 396 | if (result < 0) |
454 | goto error_rc_add; | 397 | goto error_rc_add; |
455 | umc_set_drvdata(umc_dev, whcrc); | 398 | umc_set_drvdata(umc_dev, whcrc); |
456 | d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev); | ||
457 | return 0; | 399 | return 0; |
458 | 400 | ||
459 | error_rc_add: | 401 | error_rc_add: |
@@ -463,7 +405,6 @@ error_setup_rc_umc: | |||
463 | error_alloc: | 405 | error_alloc: |
464 | uwb_rc_put(uwb_rc); | 406 | uwb_rc_put(uwb_rc); |
465 | error_rc_alloc: | 407 | error_rc_alloc: |
466 | d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result); | ||
467 | return result; | 408 | return result; |
468 | } | 409 | } |
469 | 410 | ||
@@ -486,7 +427,24 @@ static void whcrc_remove(struct umc_dev *umc_dev) | |||
486 | whcrc_release_rc_umc(whcrc); | 427 | whcrc_release_rc_umc(whcrc); |
487 | kfree(whcrc); | 428 | kfree(whcrc); |
488 | uwb_rc_put(uwb_rc); | 429 | uwb_rc_put(uwb_rc); |
489 | d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc); | 430 | } |
431 | |||
432 | static int whcrc_pre_reset(struct umc_dev *umc) | ||
433 | { | ||
434 | struct whcrc *whcrc = umc_get_drvdata(umc); | ||
435 | struct uwb_rc *uwb_rc = whcrc->uwb_rc; | ||
436 | |||
437 | uwb_rc_pre_reset(uwb_rc); | ||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static int whcrc_post_reset(struct umc_dev *umc) | ||
442 | { | ||
443 | struct whcrc *whcrc = umc_get_drvdata(umc); | ||
444 | struct uwb_rc *uwb_rc = whcrc->uwb_rc; | ||
445 | |||
446 | uwb_rc_post_reset(uwb_rc); | ||
447 | return 0; | ||
490 | } | 448 | } |
491 | 449 | ||
492 | /* PCI device ID's that we handle [so it gets loaded] */ | 450 | /* PCI device ID's that we handle [so it gets loaded] */ |
@@ -497,10 +455,12 @@ static struct pci_device_id whcrc_id_table[] = { | |||
497 | MODULE_DEVICE_TABLE(pci, whcrc_id_table); | 455 | MODULE_DEVICE_TABLE(pci, whcrc_id_table); |
498 | 456 | ||
499 | static struct umc_driver whcrc_driver = { | 457 | static struct umc_driver whcrc_driver = { |
500 | .name = "whc-rc", | 458 | .name = "whc-rc", |
501 | .cap_id = UMC_CAP_ID_WHCI_RC, | 459 | .cap_id = UMC_CAP_ID_WHCI_RC, |
502 | .probe = whcrc_probe, | 460 | .probe = whcrc_probe, |
503 | .remove = whcrc_remove, | 461 | .remove = whcrc_remove, |
462 | .pre_reset = whcrc_pre_reset, | ||
463 | .post_reset = whcrc_post_reset, | ||
504 | }; | 464 | }; |
505 | 465 | ||
506 | static int __init whcrc_driver_init(void) | 466 | static int __init whcrc_driver_init(void) |
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c index 3df2388f908f..1f8964ed9882 100644 --- a/drivers/uwb/whci.c +++ b/drivers/uwb/whci.c | |||
@@ -67,11 +67,11 @@ int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result, | |||
67 | val = le_readl(reg); | 67 | val = le_readl(reg); |
68 | if ((val & mask) == result) | 68 | if ((val & mask) == result) |
69 | break; | 69 | break; |
70 | msleep(10); | ||
71 | if (t >= max_ms) { | 70 | if (t >= max_ms) { |
72 | dev_err(dev, "timed out waiting for %s ", tag); | 71 | dev_err(dev, "%s timed out\n", tag); |
73 | return -ETIMEDOUT; | 72 | return -ETIMEDOUT; |
74 | } | 73 | } |
74 | msleep(10); | ||
75 | t += 10; | 75 | t += 10; |
76 | } | 76 | } |
77 | return 0; | 77 | return 0; |
@@ -111,7 +111,7 @@ static int whci_add_cap(struct whci_card *card, int n) | |||
111 | + UWBCAPDATA_TO_OFFSET(capdata); | 111 | + UWBCAPDATA_TO_OFFSET(capdata); |
112 | umc->resource.end = umc->resource.start | 112 | umc->resource.end = umc->resource.start |
113 | + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; | 113 | + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; |
114 | umc->resource.name = umc->dev.bus_id; | 114 | umc->resource.name = dev_name(&umc->dev); |
115 | umc->resource.flags = card->pci->resource[bar].flags; | 115 | umc->resource.flags = card->pci->resource[bar].flags; |
116 | umc->resource.parent = &card->pci->resource[bar]; | 116 | umc->resource.parent = &card->pci->resource[bar]; |
117 | umc->irq = card->pci->irq; | 117 | umc->irq = card->pci->irq; |
diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c index 10985fa233cc..69e020039718 100644 --- a/drivers/uwb/wlp/eda.c +++ b/drivers/uwb/wlp/eda.c | |||
@@ -51,9 +51,7 @@ | |||
51 | * the tag and address of the transmitting neighbor. | 51 | * the tag and address of the transmitting neighbor. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | #define D_LOCAL 5 | ||
55 | #include <linux/netdevice.h> | 54 | #include <linux/netdevice.h> |
56 | #include <linux/uwb/debug.h> | ||
57 | #include <linux/etherdevice.h> | 55 | #include <linux/etherdevice.h> |
58 | #include <linux/wlp.h> | 56 | #include <linux/wlp.h> |
59 | #include "wlp-internal.h" | 57 | #include "wlp-internal.h" |
@@ -304,7 +302,6 @@ int wlp_eda_for_virtual(struct wlp_eda *eda, | |||
304 | { | 302 | { |
305 | int result = 0; | 303 | int result = 0; |
306 | struct wlp *wlp = container_of(eda, struct wlp, eda); | 304 | struct wlp *wlp = container_of(eda, struct wlp, eda); |
307 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
308 | struct wlp_eda_node *itr; | 305 | struct wlp_eda_node *itr; |
309 | unsigned long flags; | 306 | unsigned long flags; |
310 | int found = 0; | 307 | int found = 0; |
@@ -313,26 +310,14 @@ int wlp_eda_for_virtual(struct wlp_eda *eda, | |||
313 | list_for_each_entry(itr, &eda->cache, list_node) { | 310 | list_for_each_entry(itr, &eda->cache, list_node) { |
314 | if (!memcmp(itr->virt_addr, virt_addr, | 311 | if (!memcmp(itr->virt_addr, virt_addr, |
315 | sizeof(itr->virt_addr))) { | 312 | sizeof(itr->virt_addr))) { |
316 | d_printf(6, dev, "EDA: looking for %pM hit %02x:%02x " | ||
317 | "wss %p tag 0x%02x state %u\n", | ||
318 | virt_addr, | ||
319 | itr->dev_addr.data[1], | ||
320 | itr->dev_addr.data[0], itr->wss, | ||
321 | itr->tag, itr->state); | ||
322 | result = (*function)(wlp, itr, priv); | 313 | result = (*function)(wlp, itr, priv); |
323 | *dev_addr = itr->dev_addr; | 314 | *dev_addr = itr->dev_addr; |
324 | found = 1; | 315 | found = 1; |
325 | break; | 316 | break; |
326 | } else | 317 | } |
327 | d_printf(6, dev, "EDA: looking for %pM against %pM miss\n", | ||
328 | virt_addr, itr->virt_addr); | ||
329 | } | 318 | } |
330 | if (!found) { | 319 | if (!found) |
331 | if (printk_ratelimit()) | ||
332 | dev_err(dev, "EDA: Eth addr %pM not found.\n", | ||
333 | virt_addr); | ||
334 | result = -ENODEV; | 320 | result = -ENODEV; |
335 | } | ||
336 | spin_unlock_irqrestore(&eda->lock, flags); | 321 | spin_unlock_irqrestore(&eda->lock, flags); |
337 | return result; | 322 | return result; |
338 | } | 323 | } |
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c index a64cb8241713..aa42fcee4c4f 100644 --- a/drivers/uwb/wlp/messages.c +++ b/drivers/uwb/wlp/messages.c | |||
@@ -24,8 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/wlp.h> | 26 | #include <linux/wlp.h> |
27 | #define D_LOCAL 6 | 27 | |
28 | #include <linux/uwb/debug.h> | ||
29 | #include "wlp-internal.h" | 28 | #include "wlp-internal.h" |
30 | 29 | ||
31 | static | 30 | static |
@@ -105,24 +104,18 @@ static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type, | |||
105 | #define wlp_set(type, type_code, name) \ | 104 | #define wlp_set(type, type_code, name) \ |
106 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | 105 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ |
107 | { \ | 106 | { \ |
108 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
109 | wlp_set_attr_hdr(&attr->hdr, type_code, \ | 107 | wlp_set_attr_hdr(&attr->hdr, type_code, \ |
110 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ | 108 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ |
111 | attr->name = value; \ | 109 | attr->name = value; \ |
112 | d_dump(6, NULL, attr, sizeof(*attr)); \ | ||
113 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
114 | return sizeof(*attr); \ | 110 | return sizeof(*attr); \ |
115 | } | 111 | } |
116 | 112 | ||
117 | #define wlp_pset(type, type_code, name) \ | 113 | #define wlp_pset(type, type_code, name) \ |
118 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | 114 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ |
119 | { \ | 115 | { \ |
120 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
121 | wlp_set_attr_hdr(&attr->hdr, type_code, \ | 116 | wlp_set_attr_hdr(&attr->hdr, type_code, \ |
122 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ | 117 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ |
123 | attr->name = *value; \ | 118 | attr->name = *value; \ |
124 | d_dump(6, NULL, attr, sizeof(*attr)); \ | ||
125 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
126 | return sizeof(*attr); \ | 119 | return sizeof(*attr); \ |
127 | } | 120 | } |
128 | 121 | ||
@@ -139,11 +132,8 @@ static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | |||
139 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ | 132 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ |
140 | size_t len) \ | 133 | size_t len) \ |
141 | { \ | 134 | { \ |
142 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
143 | wlp_set_attr_hdr(&attr->hdr, type_code, len); \ | 135 | wlp_set_attr_hdr(&attr->hdr, type_code, len); \ |
144 | memcpy(attr->name, value, len); \ | 136 | memcpy(attr->name, value, len); \ |
145 | d_dump(6, NULL, attr, sizeof(*attr) + len); \ | ||
146 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
147 | return sizeof(*attr) + len; \ | 137 | return sizeof(*attr) + len; \ |
148 | } | 138 | } |
149 | 139 | ||
@@ -182,7 +172,7 @@ static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr, | |||
182 | size_t datalen; | 172 | size_t datalen; |
183 | void *ptr = attr->wss_info; | 173 | void *ptr = attr->wss_info; |
184 | size_t used = sizeof(*attr); | 174 | size_t used = sizeof(*attr); |
185 | d_fnstart(6, NULL, "(attribute %p)\n", attr); | 175 | |
186 | datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); | 176 | datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); |
187 | wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); | 177 | wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); |
188 | used = wlp_set_wssid(ptr, &wss->wssid); | 178 | used = wlp_set_wssid(ptr, &wss->wssid); |
@@ -190,9 +180,6 @@ static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr, | |||
190 | used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); | 180 | used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); |
191 | used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); | 181 | used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); |
192 | used += wlp_set_wss_bcast(ptr + used, &wss->bcast); | 182 | used += wlp_set_wss_bcast(ptr + used, &wss->bcast); |
193 | d_dump(6, NULL, attr, sizeof(*attr) + datalen); | ||
194 | d_fnend(6, NULL, "(attribute %p, used %d)\n", | ||
195 | attr, (int)(sizeof(*attr) + used)); | ||
196 | return sizeof(*attr) + used; | 183 | return sizeof(*attr) + used; |
197 | } | 184 | } |
198 | 185 | ||
@@ -414,7 +401,6 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
414 | size_t used = 0; | 401 | size_t used = 0; |
415 | ssize_t result = -EINVAL; | 402 | ssize_t result = -EINVAL; |
416 | 403 | ||
417 | d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n"); | ||
418 | result = wlp_get_wss_name(wlp, ptr, info->name, buflen); | 404 | result = wlp_get_wss_name(wlp, ptr, info->name, buflen); |
419 | if (result < 0) { | 405 | if (result < 0) { |
420 | dev_err(dev, "WLP: unable to obtain WSS name from " | 406 | dev_err(dev, "WLP: unable to obtain WSS name from " |
@@ -422,7 +408,7 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
422 | goto error_parse; | 408 | goto error_parse; |
423 | } | 409 | } |
424 | used += result; | 410 | used += result; |
425 | d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n"); | 411 | |
426 | result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, | 412 | result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, |
427 | buflen - used); | 413 | buflen - used); |
428 | if (result < 0) { | 414 | if (result < 0) { |
@@ -437,7 +423,7 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
437 | goto error_parse; | 423 | goto error_parse; |
438 | } | 424 | } |
439 | used += result; | 425 | used += result; |
440 | d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n"); | 426 | |
441 | result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, | 427 | result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, |
442 | buflen - used); | 428 | buflen - used); |
443 | if (result < 0) { | 429 | if (result < 0) { |
@@ -452,7 +438,7 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
452 | goto error_parse; | 438 | goto error_parse; |
453 | } | 439 | } |
454 | used += result; | 440 | used += result; |
455 | d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n"); | 441 | |
456 | result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, | 442 | result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, |
457 | buflen - used); | 443 | buflen - used); |
458 | if (result < 0) { | 444 | if (result < 0) { |
@@ -530,7 +516,7 @@ static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr, | |||
530 | len = result; | 516 | len = result; |
531 | used = sizeof(*attr); | 517 | used = sizeof(*attr); |
532 | ptr = attr; | 518 | ptr = attr; |
533 | d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n"); | 519 | |
534 | result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); | 520 | result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); |
535 | if (result < 0) { | 521 | if (result < 0) { |
536 | dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); | 522 | dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); |
@@ -553,8 +539,6 @@ static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr, | |||
553 | goto out; | 539 | goto out; |
554 | } | 540 | } |
555 | result = used; | 541 | result = used; |
556 | d_printf(6, dev, "WLP: Successfully parsed WLP information " | ||
557 | "attribute. used %zu bytes\n", used); | ||
558 | out: | 542 | out: |
559 | return result; | 543 | return result; |
560 | } | 544 | } |
@@ -598,8 +582,6 @@ static ssize_t wlp_get_all_wss_info(struct wlp *wlp, | |||
598 | struct wlp_wssid_e *wssid_e; | 582 | struct wlp_wssid_e *wssid_e; |
599 | char buf[WLP_WSS_UUID_STRSIZE]; | 583 | char buf[WLP_WSS_UUID_STRSIZE]; |
600 | 584 | ||
601 | d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n", | ||
602 | wlp, attr, neighbor, wss, (int)buflen); | ||
603 | if (buflen < 0) | 585 | if (buflen < 0) |
604 | goto out; | 586 | goto out; |
605 | 587 | ||
@@ -638,8 +620,7 @@ static ssize_t wlp_get_all_wss_info(struct wlp *wlp, | |||
638 | wss->accept_enroll = wss_info.accept_enroll; | 620 | wss->accept_enroll = wss_info.accept_enroll; |
639 | wss->state = WLP_WSS_STATE_PART_ENROLLED; | 621 | wss->state = WLP_WSS_STATE_PART_ENROLLED; |
640 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | 622 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); |
641 | d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n", | 623 | dev_dbg(dev, "WLP: Found WSS %s. Enrolling.\n", buf); |
642 | buf); | ||
643 | } else { | 624 | } else { |
644 | wssid_e = wlp_create_wssid_e(wlp, neighbor); | 625 | wssid_e = wlp_create_wssid_e(wlp, neighbor); |
645 | if (wssid_e == NULL) { | 626 | if (wssid_e == NULL) { |
@@ -660,9 +641,6 @@ error_parse: | |||
660 | if (result < 0 && !enroll) /* this was a discovery */ | 641 | if (result < 0 && !enroll) /* this was a discovery */ |
661 | wlp_remove_neighbor_tmp_info(neighbor); | 642 | wlp_remove_neighbor_tmp_info(neighbor); |
662 | out: | 643 | out: |
663 | d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, " | ||
664 | "result %d \n", wlp, attr, neighbor, wss, (int)buflen, | ||
665 | (int)result); | ||
666 | return result; | 644 | return result; |
667 | 645 | ||
668 | } | 646 | } |
@@ -718,7 +696,6 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
718 | struct sk_buff *_skb; | 696 | struct sk_buff *_skb; |
719 | void *d1_itr; | 697 | void *d1_itr; |
720 | 698 | ||
721 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
722 | if (wlp->dev_info == NULL) { | 699 | if (wlp->dev_info == NULL) { |
723 | result = __wlp_setup_device_info(wlp); | 700 | result = __wlp_setup_device_info(wlp); |
724 | if (result < 0) { | 701 | if (result < 0) { |
@@ -728,24 +705,6 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
728 | } | 705 | } |
729 | } | 706 | } |
730 | info = wlp->dev_info; | 707 | info = wlp->dev_info; |
731 | d_printf(6, dev, "Local properties:\n" | ||
732 | "Device name (%d bytes): %s\n" | ||
733 | "Model name (%d bytes): %s\n" | ||
734 | "Manufacturer (%d bytes): %s\n" | ||
735 | "Model number (%d bytes): %s\n" | ||
736 | "Serial number (%d bytes): %s\n" | ||
737 | "Primary device type: \n" | ||
738 | " Category: %d \n" | ||
739 | " OUI: %02x:%02x:%02x \n" | ||
740 | " OUI Subdivision: %u \n", | ||
741 | (int)strlen(info->name), info->name, | ||
742 | (int)strlen(info->model_name), info->model_name, | ||
743 | (int)strlen(info->manufacturer), info->manufacturer, | ||
744 | (int)strlen(info->model_nr), info->model_nr, | ||
745 | (int)strlen(info->serial), info->serial, | ||
746 | info->prim_dev_type.category, | ||
747 | info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], | ||
748 | info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); | ||
749 | _skb = dev_alloc_skb(sizeof(*_d1) | 708 | _skb = dev_alloc_skb(sizeof(*_d1) |
750 | + sizeof(struct wlp_attr_uuid_e) | 709 | + sizeof(struct wlp_attr_uuid_e) |
751 | + sizeof(struct wlp_attr_wss_sel_mthd) | 710 | + sizeof(struct wlp_attr_wss_sel_mthd) |
@@ -768,7 +727,6 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
768 | goto error; | 727 | goto error; |
769 | } | 728 | } |
770 | _d1 = (void *) _skb->data; | 729 | _d1 = (void *) _skb->data; |
771 | d_printf(6, dev, "D1 starts at %p \n", _d1); | ||
772 | _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 730 | _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
773 | _d1->hdr.type = WLP_FRAME_ASSOCIATION; | 731 | _d1->hdr.type = WLP_FRAME_ASSOCIATION; |
774 | _d1->type = WLP_ASSOC_D1; | 732 | _d1->type = WLP_ASSOC_D1; |
@@ -791,25 +749,8 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
791 | used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); | 749 | used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); |
792 | used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); | 750 | used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); |
793 | skb_put(_skb, sizeof(*_d1) + used); | 751 | skb_put(_skb, sizeof(*_d1) + used); |
794 | d_printf(6, dev, "D1 message:\n"); | ||
795 | d_dump(6, dev, _d1, sizeof(*_d1) | ||
796 | + sizeof(struct wlp_attr_uuid_e) | ||
797 | + sizeof(struct wlp_attr_wss_sel_mthd) | ||
798 | + sizeof(struct wlp_attr_dev_name) | ||
799 | + strlen(info->name) | ||
800 | + sizeof(struct wlp_attr_manufacturer) | ||
801 | + strlen(info->manufacturer) | ||
802 | + sizeof(struct wlp_attr_model_name) | ||
803 | + strlen(info->model_name) | ||
804 | + sizeof(struct wlp_attr_model_nr) | ||
805 | + strlen(info->model_nr) | ||
806 | + sizeof(struct wlp_attr_serial) | ||
807 | + strlen(info->serial) | ||
808 | + sizeof(struct wlp_attr_prim_dev_type) | ||
809 | + sizeof(struct wlp_attr_wlp_assc_err)); | ||
810 | *skb = _skb; | 752 | *skb = _skb; |
811 | error: | 753 | error: |
812 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
813 | return result; | 754 | return result; |
814 | } | 755 | } |
815 | 756 | ||
@@ -837,7 +778,6 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
837 | void *d2_itr; | 778 | void *d2_itr; |
838 | size_t mem_needed; | 779 | size_t mem_needed; |
839 | 780 | ||
840 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
841 | if (wlp->dev_info == NULL) { | 781 | if (wlp->dev_info == NULL) { |
842 | result = __wlp_setup_device_info(wlp); | 782 | result = __wlp_setup_device_info(wlp); |
843 | if (result < 0) { | 783 | if (result < 0) { |
@@ -847,24 +787,6 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
847 | } | 787 | } |
848 | } | 788 | } |
849 | info = wlp->dev_info; | 789 | info = wlp->dev_info; |
850 | d_printf(6, dev, "Local properties:\n" | ||
851 | "Device name (%d bytes): %s\n" | ||
852 | "Model name (%d bytes): %s\n" | ||
853 | "Manufacturer (%d bytes): %s\n" | ||
854 | "Model number (%d bytes): %s\n" | ||
855 | "Serial number (%d bytes): %s\n" | ||
856 | "Primary device type: \n" | ||
857 | " Category: %d \n" | ||
858 | " OUI: %02x:%02x:%02x \n" | ||
859 | " OUI Subdivision: %u \n", | ||
860 | (int)strlen(info->name), info->name, | ||
861 | (int)strlen(info->model_name), info->model_name, | ||
862 | (int)strlen(info->manufacturer), info->manufacturer, | ||
863 | (int)strlen(info->model_nr), info->model_nr, | ||
864 | (int)strlen(info->serial), info->serial, | ||
865 | info->prim_dev_type.category, | ||
866 | info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], | ||
867 | info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); | ||
868 | mem_needed = sizeof(*_d2) | 790 | mem_needed = sizeof(*_d2) |
869 | + sizeof(struct wlp_attr_uuid_e) | 791 | + sizeof(struct wlp_attr_uuid_e) |
870 | + sizeof(struct wlp_attr_uuid_r) | 792 | + sizeof(struct wlp_attr_uuid_r) |
@@ -892,7 +814,6 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
892 | goto error; | 814 | goto error; |
893 | } | 815 | } |
894 | _d2 = (void *) _skb->data; | 816 | _d2 = (void *) _skb->data; |
895 | d_printf(6, dev, "D2 starts at %p \n", _d2); | ||
896 | _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 817 | _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
897 | _d2->hdr.type = WLP_FRAME_ASSOCIATION; | 818 | _d2->hdr.type = WLP_FRAME_ASSOCIATION; |
898 | _d2->type = WLP_ASSOC_D2; | 819 | _d2->type = WLP_ASSOC_D2; |
@@ -917,11 +838,8 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
917 | used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); | 838 | used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); |
918 | used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); | 839 | used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); |
919 | skb_put(_skb, sizeof(*_d2) + used); | 840 | skb_put(_skb, sizeof(*_d2) + used); |
920 | d_printf(6, dev, "D2 message:\n"); | ||
921 | d_dump(6, dev, _d2, mem_needed); | ||
922 | *skb = _skb; | 841 | *skb = _skb; |
923 | error: | 842 | error: |
924 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
925 | return result; | 843 | return result; |
926 | } | 844 | } |
927 | 845 | ||
@@ -947,7 +865,6 @@ int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, | |||
947 | struct sk_buff *_skb; | 865 | struct sk_buff *_skb; |
948 | struct wlp_nonce tmp; | 866 | struct wlp_nonce tmp; |
949 | 867 | ||
950 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
951 | _skb = dev_alloc_skb(sizeof(*f0)); | 868 | _skb = dev_alloc_skb(sizeof(*f0)); |
952 | if (_skb == NULL) { | 869 | if (_skb == NULL) { |
953 | dev_err(dev, "WLP: Unable to allocate memory for F0 " | 870 | dev_err(dev, "WLP: Unable to allocate memory for F0 " |
@@ -955,7 +872,6 @@ int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, | |||
955 | goto error_alloc; | 872 | goto error_alloc; |
956 | } | 873 | } |
957 | f0 = (void *) _skb->data; | 874 | f0 = (void *) _skb->data; |
958 | d_printf(6, dev, "F0 starts at %p \n", f0); | ||
959 | f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 875 | f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
960 | f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | 876 | f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; |
961 | f0->f0_hdr.type = WLP_ASSOC_F0; | 877 | f0->f0_hdr.type = WLP_ASSOC_F0; |
@@ -969,7 +885,6 @@ int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, | |||
969 | *skb = _skb; | 885 | *skb = _skb; |
970 | result = 0; | 886 | result = 0; |
971 | error_alloc: | 887 | error_alloc: |
972 | d_fnend(6, dev, "wlp %p, result %d \n", wlp, result); | ||
973 | return result; | 888 | return result; |
974 | } | 889 | } |
975 | 890 | ||
@@ -1242,12 +1157,9 @@ void wlp_handle_d1_frame(struct work_struct *ws) | |||
1242 | enum wlp_wss_sel_mthd sel_mthd = 0; | 1157 | enum wlp_wss_sel_mthd sel_mthd = 0; |
1243 | struct wlp_device_info dev_info; | 1158 | struct wlp_device_info dev_info; |
1244 | enum wlp_assc_error assc_err; | 1159 | enum wlp_assc_error assc_err; |
1245 | char uuid[WLP_WSS_UUID_STRSIZE]; | ||
1246 | struct sk_buff *resp = NULL; | 1160 | struct sk_buff *resp = NULL; |
1247 | 1161 | ||
1248 | /* Parse D1 frame */ | 1162 | /* Parse D1 frame */ |
1249 | d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n", | ||
1250 | wlp, skb); | ||
1251 | mutex_lock(&wss->mutex); | 1163 | mutex_lock(&wss->mutex); |
1252 | mutex_lock(&wlp->mutex); /* to access wlp->uuid */ | 1164 | mutex_lock(&wlp->mutex); /* to access wlp->uuid */ |
1253 | memset(&dev_info, 0, sizeof(dev_info)); | 1165 | memset(&dev_info, 0, sizeof(dev_info)); |
@@ -1258,30 +1170,6 @@ void wlp_handle_d1_frame(struct work_struct *ws) | |||
1258 | kfree_skb(skb); | 1170 | kfree_skb(skb); |
1259 | goto out; | 1171 | goto out; |
1260 | } | 1172 | } |
1261 | wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e); | ||
1262 | d_printf(6, dev, "From D1 frame:\n" | ||
1263 | "UUID-E: %s\n" | ||
1264 | "Selection method: %d\n" | ||
1265 | "Device name (%d bytes): %s\n" | ||
1266 | "Model name (%d bytes): %s\n" | ||
1267 | "Manufacturer (%d bytes): %s\n" | ||
1268 | "Model number (%d bytes): %s\n" | ||
1269 | "Serial number (%d bytes): %s\n" | ||
1270 | "Primary device type: \n" | ||
1271 | " Category: %d \n" | ||
1272 | " OUI: %02x:%02x:%02x \n" | ||
1273 | " OUI Subdivision: %u \n", | ||
1274 | uuid, sel_mthd, | ||
1275 | (int)strlen(dev_info.name), dev_info.name, | ||
1276 | (int)strlen(dev_info.model_name), dev_info.model_name, | ||
1277 | (int)strlen(dev_info.manufacturer), dev_info.manufacturer, | ||
1278 | (int)strlen(dev_info.model_nr), dev_info.model_nr, | ||
1279 | (int)strlen(dev_info.serial), dev_info.serial, | ||
1280 | dev_info.prim_dev_type.category, | ||
1281 | dev_info.prim_dev_type.OUI[0], | ||
1282 | dev_info.prim_dev_type.OUI[1], | ||
1283 | dev_info.prim_dev_type.OUI[2], | ||
1284 | dev_info.prim_dev_type.OUIsubdiv); | ||
1285 | 1173 | ||
1286 | kfree_skb(skb); | 1174 | kfree_skb(skb); |
1287 | if (!wlp_uuid_is_set(&wlp->uuid)) { | 1175 | if (!wlp_uuid_is_set(&wlp->uuid)) { |
@@ -1316,7 +1204,6 @@ out: | |||
1316 | kfree(frame_ctx); | 1204 | kfree(frame_ctx); |
1317 | mutex_unlock(&wlp->mutex); | 1205 | mutex_unlock(&wlp->mutex); |
1318 | mutex_unlock(&wss->mutex); | 1206 | mutex_unlock(&wss->mutex); |
1319 | d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp); | ||
1320 | } | 1207 | } |
1321 | 1208 | ||
1322 | /** | 1209 | /** |
@@ -1546,10 +1433,8 @@ int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb, | |||
1546 | void *ptr = skb->data; | 1433 | void *ptr = skb->data; |
1547 | size_t len = skb->len; | 1434 | size_t len = skb->len; |
1548 | size_t used; | 1435 | size_t used; |
1549 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1550 | struct wlp_frame_assoc *assoc = ptr; | 1436 | struct wlp_frame_assoc *assoc = ptr; |
1551 | 1437 | ||
1552 | d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); | ||
1553 | used = sizeof(*assoc); | 1438 | used = sizeof(*assoc); |
1554 | result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); | 1439 | result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); |
1555 | if (result < 0) { | 1440 | if (result < 0) { |
@@ -1572,14 +1457,7 @@ int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb, | |||
1572 | wlp_assoc_frame_str(assoc->type)); | 1457 | wlp_assoc_frame_str(assoc->type)); |
1573 | goto error_parse; | 1458 | goto error_parse; |
1574 | } | 1459 | } |
1575 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
1576 | d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt " | ||
1577 | "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag, | ||
1578 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
1579 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); | ||
1580 | |||
1581 | error_parse: | 1460 | error_parse: |
1582 | d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); | ||
1583 | return result; | 1461 | return result; |
1584 | } | 1462 | } |
1585 | 1463 | ||
@@ -1600,7 +1478,6 @@ int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, | |||
1600 | } *c; | 1478 | } *c; |
1601 | struct sk_buff *_skb; | 1479 | struct sk_buff *_skb; |
1602 | 1480 | ||
1603 | d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); | ||
1604 | _skb = dev_alloc_skb(sizeof(*c)); | 1481 | _skb = dev_alloc_skb(sizeof(*c)); |
1605 | if (_skb == NULL) { | 1482 | if (_skb == NULL) { |
1606 | dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " | 1483 | dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " |
@@ -1608,7 +1485,6 @@ int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, | |||
1608 | goto error_alloc; | 1485 | goto error_alloc; |
1609 | } | 1486 | } |
1610 | c = (void *) _skb->data; | 1487 | c = (void *) _skb->data; |
1611 | d_printf(6, dev, "C1/C2 starts at %p \n", c); | ||
1612 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 1488 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
1613 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | 1489 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; |
1614 | c->c_hdr.type = type; | 1490 | c->c_hdr.type = type; |
@@ -1616,12 +1492,9 @@ int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, | |||
1616 | wlp_set_msg_type(&c->c_hdr.msg_type, type); | 1492 | wlp_set_msg_type(&c->c_hdr.msg_type, type); |
1617 | wlp_set_wssid(&c->wssid, &wss->wssid); | 1493 | wlp_set_wssid(&c->wssid, &wss->wssid); |
1618 | skb_put(_skb, sizeof(*c)); | 1494 | skb_put(_skb, sizeof(*c)); |
1619 | d_printf(6, dev, "C1/C2 message:\n"); | ||
1620 | d_dump(6, dev, c, sizeof(*c)); | ||
1621 | *skb = _skb; | 1495 | *skb = _skb; |
1622 | result = 0; | 1496 | result = 0; |
1623 | error_alloc: | 1497 | error_alloc: |
1624 | d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); | ||
1625 | return result; | 1498 | return result; |
1626 | } | 1499 | } |
1627 | 1500 | ||
@@ -1660,7 +1533,6 @@ int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, | |||
1660 | } *c; | 1533 | } *c; |
1661 | struct sk_buff *_skb; | 1534 | struct sk_buff *_skb; |
1662 | 1535 | ||
1663 | d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); | ||
1664 | _skb = dev_alloc_skb(sizeof(*c)); | 1536 | _skb = dev_alloc_skb(sizeof(*c)); |
1665 | if (_skb == NULL) { | 1537 | if (_skb == NULL) { |
1666 | dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " | 1538 | dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " |
@@ -1668,7 +1540,6 @@ int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, | |||
1668 | goto error_alloc; | 1540 | goto error_alloc; |
1669 | } | 1541 | } |
1670 | c = (void *) _skb->data; | 1542 | c = (void *) _skb->data; |
1671 | d_printf(6, dev, "C3/C4 starts at %p \n", c); | ||
1672 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 1543 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
1673 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | 1544 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; |
1674 | c->c_hdr.type = type; | 1545 | c->c_hdr.type = type; |
@@ -1678,12 +1549,9 @@ int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, | |||
1678 | wlp_set_wss_tag(&c->wss_tag, wss->tag); | 1549 | wlp_set_wss_tag(&c->wss_tag, wss->tag); |
1679 | wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); | 1550 | wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); |
1680 | skb_put(_skb, sizeof(*c)); | 1551 | skb_put(_skb, sizeof(*c)); |
1681 | d_printf(6, dev, "C3/C4 message:\n"); | ||
1682 | d_dump(6, dev, c, sizeof(*c)); | ||
1683 | *skb = _skb; | 1552 | *skb = _skb; |
1684 | result = 0; | 1553 | result = 0; |
1685 | error_alloc: | 1554 | error_alloc: |
1686 | d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); | ||
1687 | return result; | 1555 | return result; |
1688 | } | 1556 | } |
1689 | 1557 | ||
@@ -1709,10 +1577,7 @@ static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \ | |||
1709 | struct device *dev = &wlp->rc->uwb_dev.dev; \ | 1577 | struct device *dev = &wlp->rc->uwb_dev.dev; \ |
1710 | int result; \ | 1578 | int result; \ |
1711 | struct sk_buff *skb = NULL; \ | 1579 | struct sk_buff *skb = NULL; \ |
1712 | d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ | 1580 | \ |
1713 | wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ | ||
1714 | d_printf(6, dev, "WLP: Constructing %s frame. \n", \ | ||
1715 | wlp_assoc_frame_str(id)); \ | ||
1716 | /* Build the frame */ \ | 1581 | /* Build the frame */ \ |
1717 | result = wlp_build_assoc_##type(wlp, wss, &skb); \ | 1582 | result = wlp_build_assoc_##type(wlp, wss, &skb); \ |
1718 | if (result < 0) { \ | 1583 | if (result < 0) { \ |
@@ -1721,9 +1586,6 @@ static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \ | |||
1721 | goto error_build_assoc; \ | 1586 | goto error_build_assoc; \ |
1722 | } \ | 1587 | } \ |
1723 | /* Send the frame */ \ | 1588 | /* Send the frame */ \ |
1724 | d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \ | ||
1725 | wlp_assoc_frame_str(id), \ | ||
1726 | dev_addr->data[1], dev_addr->data[0]); \ | ||
1727 | BUG_ON(wlp->xmit_frame == NULL); \ | 1589 | BUG_ON(wlp->xmit_frame == NULL); \ |
1728 | result = wlp->xmit_frame(wlp, skb, dev_addr); \ | 1590 | result = wlp->xmit_frame(wlp, skb, dev_addr); \ |
1729 | if (result < 0) { \ | 1591 | if (result < 0) { \ |
@@ -1740,8 +1602,6 @@ error_xmit: \ | |||
1740 | /* We could try again ... */ \ | 1602 | /* We could try again ... */ \ |
1741 | dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ | 1603 | dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ |
1742 | error_build_assoc: \ | 1604 | error_build_assoc: \ |
1743 | d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ | ||
1744 | wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ | ||
1745 | return result; \ | 1605 | return result; \ |
1746 | } | 1606 | } |
1747 | 1607 | ||
@@ -1794,12 +1654,9 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1794 | struct uwb_dev_addr *src = &frame_ctx->src; | 1654 | struct uwb_dev_addr *src = &frame_ctx->src; |
1795 | int result; | 1655 | int result; |
1796 | struct wlp_uuid wssid; | 1656 | struct wlp_uuid wssid; |
1797 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1798 | struct sk_buff *resp = NULL; | 1657 | struct sk_buff *resp = NULL; |
1799 | 1658 | ||
1800 | /* Parse C1 frame */ | 1659 | /* Parse C1 frame */ |
1801 | d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n", | ||
1802 | wlp, c1); | ||
1803 | mutex_lock(&wss->mutex); | 1660 | mutex_lock(&wss->mutex); |
1804 | result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, | 1661 | result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, |
1805 | len - sizeof(*c1)); | 1662 | len - sizeof(*c1)); |
@@ -1807,12 +1664,8 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1807 | dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); | 1664 | dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); |
1808 | goto out; | 1665 | goto out; |
1809 | } | 1666 | } |
1810 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | ||
1811 | d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf); | ||
1812 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) | 1667 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) |
1813 | && wss->state == WLP_WSS_STATE_ACTIVE) { | 1668 | && wss->state == WLP_WSS_STATE_ACTIVE) { |
1814 | d_printf(6, dev, "WSSID from C1 frame is known locally " | ||
1815 | "and is active\n"); | ||
1816 | /* Construct C2 frame */ | 1669 | /* Construct C2 frame */ |
1817 | result = wlp_build_assoc_c2(wlp, wss, &resp); | 1670 | result = wlp_build_assoc_c2(wlp, wss, &resp); |
1818 | if (result < 0) { | 1671 | if (result < 0) { |
@@ -1820,8 +1673,6 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1820 | goto out; | 1673 | goto out; |
1821 | } | 1674 | } |
1822 | } else { | 1675 | } else { |
1823 | d_printf(6, dev, "WSSID from C1 frame is not known locally " | ||
1824 | "or is not active\n"); | ||
1825 | /* Construct F0 frame */ | 1676 | /* Construct F0 frame */ |
1826 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); | 1677 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); |
1827 | if (result < 0) { | 1678 | if (result < 0) { |
@@ -1830,8 +1681,6 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1830 | } | 1681 | } |
1831 | } | 1682 | } |
1832 | /* Send C2 frame */ | 1683 | /* Send C2 frame */ |
1833 | d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n", | ||
1834 | src->data[1], src->data[0]); | ||
1835 | BUG_ON(wlp->xmit_frame == NULL); | 1684 | BUG_ON(wlp->xmit_frame == NULL); |
1836 | result = wlp->xmit_frame(wlp, resp, src); | 1685 | result = wlp->xmit_frame(wlp, resp, src); |
1837 | if (result < 0) { | 1686 | if (result < 0) { |
@@ -1846,7 +1695,6 @@ out: | |||
1846 | kfree_skb(frame_ctx->skb); | 1695 | kfree_skb(frame_ctx->skb); |
1847 | kfree(frame_ctx); | 1696 | kfree(frame_ctx); |
1848 | mutex_unlock(&wss->mutex); | 1697 | mutex_unlock(&wss->mutex); |
1849 | d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp); | ||
1850 | } | 1698 | } |
1851 | 1699 | ||
1852 | /** | 1700 | /** |
@@ -1868,27 +1716,20 @@ void wlp_handle_c3_frame(struct work_struct *ws) | |||
1868 | struct sk_buff *skb = frame_ctx->skb; | 1716 | struct sk_buff *skb = frame_ctx->skb; |
1869 | struct uwb_dev_addr *src = &frame_ctx->src; | 1717 | struct uwb_dev_addr *src = &frame_ctx->src; |
1870 | int result; | 1718 | int result; |
1871 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1872 | struct sk_buff *resp = NULL; | 1719 | struct sk_buff *resp = NULL; |
1873 | struct wlp_uuid wssid; | 1720 | struct wlp_uuid wssid; |
1874 | u8 tag; | 1721 | u8 tag; |
1875 | struct uwb_mac_addr virt_addr; | 1722 | struct uwb_mac_addr virt_addr; |
1876 | 1723 | ||
1877 | /* Parse C3 frame */ | 1724 | /* Parse C3 frame */ |
1878 | d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", | ||
1879 | wlp, skb); | ||
1880 | mutex_lock(&wss->mutex); | 1725 | mutex_lock(&wss->mutex); |
1881 | result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); | 1726 | result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); |
1882 | if (result < 0) { | 1727 | if (result < 0) { |
1883 | dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); | 1728 | dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); |
1884 | goto out; | 1729 | goto out; |
1885 | } | 1730 | } |
1886 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | ||
1887 | d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf); | ||
1888 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) | 1731 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) |
1889 | && wss->state >= WLP_WSS_STATE_ACTIVE) { | 1732 | && wss->state >= WLP_WSS_STATE_ACTIVE) { |
1890 | d_printf(6, dev, "WSSID from C3 frame is known locally " | ||
1891 | "and is active\n"); | ||
1892 | result = wlp_eda_update_node(&wlp->eda, src, wss, | 1733 | result = wlp_eda_update_node(&wlp->eda, src, wss, |
1893 | (void *) virt_addr.data, tag, | 1734 | (void *) virt_addr.data, tag, |
1894 | WLP_WSS_CONNECTED); | 1735 | WLP_WSS_CONNECTED); |
@@ -1913,8 +1754,6 @@ void wlp_handle_c3_frame(struct work_struct *ws) | |||
1913 | } | 1754 | } |
1914 | } | 1755 | } |
1915 | } else { | 1756 | } else { |
1916 | d_printf(6, dev, "WSSID from C3 frame is not known locally " | ||
1917 | "or is not active\n"); | ||
1918 | /* Construct F0 frame */ | 1757 | /* Construct F0 frame */ |
1919 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); | 1758 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); |
1920 | if (result < 0) { | 1759 | if (result < 0) { |
@@ -1923,8 +1762,6 @@ void wlp_handle_c3_frame(struct work_struct *ws) | |||
1923 | } | 1762 | } |
1924 | } | 1763 | } |
1925 | /* Send C4 frame */ | 1764 | /* Send C4 frame */ |
1926 | d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n", | ||
1927 | src->data[1], src->data[0]); | ||
1928 | BUG_ON(wlp->xmit_frame == NULL); | 1765 | BUG_ON(wlp->xmit_frame == NULL); |
1929 | result = wlp->xmit_frame(wlp, resp, src); | 1766 | result = wlp->xmit_frame(wlp, resp, src); |
1930 | if (result < 0) { | 1767 | if (result < 0) { |
@@ -1939,8 +1776,6 @@ out: | |||
1939 | kfree_skb(frame_ctx->skb); | 1776 | kfree_skb(frame_ctx->skb); |
1940 | kfree(frame_ctx); | 1777 | kfree(frame_ctx); |
1941 | mutex_unlock(&wss->mutex); | 1778 | mutex_unlock(&wss->mutex); |
1942 | d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", | ||
1943 | wlp, skb); | ||
1944 | } | 1779 | } |
1945 | 1780 | ||
1946 | 1781 | ||
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c index 1bb9b1f97d47..0370399ff4bb 100644 --- a/drivers/uwb/wlp/sysfs.c +++ b/drivers/uwb/wlp/sysfs.c | |||
@@ -23,8 +23,8 @@ | |||
23 | * FIXME: Docs | 23 | * FIXME: Docs |
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | |||
27 | #include <linux/wlp.h> | 26 | #include <linux/wlp.h> |
27 | |||
28 | #include "wlp-internal.h" | 28 | #include "wlp-internal.h" |
29 | 29 | ||
30 | static | 30 | static |
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c index c701bd1a2887..cd2035768b47 100644 --- a/drivers/uwb/wlp/txrx.c +++ b/drivers/uwb/wlp/txrx.c | |||
@@ -26,12 +26,10 @@ | |||
26 | 26 | ||
27 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
28 | #include <linux/wlp.h> | 28 | #include <linux/wlp.h> |
29 | #define D_LOCAL 5 | ||
30 | #include <linux/uwb/debug.h> | ||
31 | #include "wlp-internal.h" | ||
32 | 29 | ||
30 | #include "wlp-internal.h" | ||
33 | 31 | ||
34 | /** | 32 | /* |
35 | * Direct incoming association msg to correct parsing routine | 33 | * Direct incoming association msg to correct parsing routine |
36 | * | 34 | * |
37 | * We only expect D1, E1, C1, C3 messages as new. All other incoming | 35 | * We only expect D1, E1, C1, C3 messages as new. All other incoming |
@@ -48,35 +46,31 @@ void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
48 | struct device *dev = &wlp->rc->uwb_dev.dev; | 46 | struct device *dev = &wlp->rc->uwb_dev.dev; |
49 | struct wlp_frame_assoc *assoc = (void *) skb->data; | 47 | struct wlp_frame_assoc *assoc = (void *) skb->data; |
50 | struct wlp_assoc_frame_ctx *frame_ctx; | 48 | struct wlp_assoc_frame_ctx *frame_ctx; |
51 | d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); | 49 | |
52 | frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); | 50 | frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); |
53 | if (frame_ctx == NULL) { | 51 | if (frame_ctx == NULL) { |
54 | dev_err(dev, "WLP: Unable to allocate memory for association " | 52 | dev_err(dev, "WLP: Unable to allocate memory for association " |
55 | "frame handling.\n"); | 53 | "frame handling.\n"); |
56 | kfree_skb(skb); | 54 | kfree_skb(skb); |
57 | goto out; | 55 | return; |
58 | } | 56 | } |
59 | frame_ctx->wlp = wlp; | 57 | frame_ctx->wlp = wlp; |
60 | frame_ctx->skb = skb; | 58 | frame_ctx->skb = skb; |
61 | frame_ctx->src = *src; | 59 | frame_ctx->src = *src; |
62 | switch (assoc->type) { | 60 | switch (assoc->type) { |
63 | case WLP_ASSOC_D1: | 61 | case WLP_ASSOC_D1: |
64 | d_printf(5, dev, "Received a D1 frame.\n"); | ||
65 | INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); | 62 | INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); |
66 | schedule_work(&frame_ctx->ws); | 63 | schedule_work(&frame_ctx->ws); |
67 | break; | 64 | break; |
68 | case WLP_ASSOC_E1: | 65 | case WLP_ASSOC_E1: |
69 | d_printf(5, dev, "Received a E1 frame. FIXME?\n"); | ||
70 | kfree_skb(skb); /* Temporary until we handle it */ | 66 | kfree_skb(skb); /* Temporary until we handle it */ |
71 | kfree(frame_ctx); /* Temporary until we handle it */ | 67 | kfree(frame_ctx); /* Temporary until we handle it */ |
72 | break; | 68 | break; |
73 | case WLP_ASSOC_C1: | 69 | case WLP_ASSOC_C1: |
74 | d_printf(5, dev, "Received a C1 frame.\n"); | ||
75 | INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); | 70 | INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); |
76 | schedule_work(&frame_ctx->ws); | 71 | schedule_work(&frame_ctx->ws); |
77 | break; | 72 | break; |
78 | case WLP_ASSOC_C3: | 73 | case WLP_ASSOC_C3: |
79 | d_printf(5, dev, "Received a C3 frame.\n"); | ||
80 | INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); | 74 | INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); |
81 | schedule_work(&frame_ctx->ws); | 75 | schedule_work(&frame_ctx->ws); |
82 | break; | 76 | break; |
@@ -87,11 +81,9 @@ void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
87 | kfree(frame_ctx); | 81 | kfree(frame_ctx); |
88 | break; | 82 | break; |
89 | } | 83 | } |
90 | out: | ||
91 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
92 | } | 84 | } |
93 | 85 | ||
94 | /** | 86 | /* |
95 | * Process incoming association frame | 87 | * Process incoming association frame |
96 | * | 88 | * |
97 | * Although it could be possible to deal with some incoming association | 89 | * Although it could be possible to deal with some incoming association |
@@ -112,7 +104,6 @@ void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
112 | struct wlp_frame_assoc *assoc = (void *) skb->data; | 104 | struct wlp_frame_assoc *assoc = (void *) skb->data; |
113 | struct wlp_session *session = wlp->session; | 105 | struct wlp_session *session = wlp->session; |
114 | u8 version; | 106 | u8 version; |
115 | d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); | ||
116 | 107 | ||
117 | if (wlp_get_version(wlp, &assoc->version, &version, | 108 | if (wlp_get_version(wlp, &assoc->version, &version, |
118 | sizeof(assoc->version)) < 0) | 109 | sizeof(assoc->version)) < 0) |
@@ -150,14 +141,12 @@ void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
150 | } else { | 141 | } else { |
151 | wlp_direct_assoc_frame(wlp, skb, src); | 142 | wlp_direct_assoc_frame(wlp, skb, src); |
152 | } | 143 | } |
153 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
154 | return; | 144 | return; |
155 | error: | 145 | error: |
156 | kfree_skb(skb); | 146 | kfree_skb(skb); |
157 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
158 | } | 147 | } |
159 | 148 | ||
160 | /** | 149 | /* |
161 | * Verify incoming frame is from connected neighbor, prep to pass to WLP client | 150 | * Verify incoming frame is from connected neighbor, prep to pass to WLP client |
162 | * | 151 | * |
163 | * Verification proceeds according to WLP 0.99 [7.3.1]. The source address | 152 | * Verification proceeds according to WLP 0.99 [7.3.1]. The source address |
@@ -176,7 +165,6 @@ int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb, | |||
176 | struct wlp_eda_node eda_entry; | 165 | struct wlp_eda_node eda_entry; |
177 | struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; | 166 | struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; |
178 | 167 | ||
179 | d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); | ||
180 | /*verify*/ | 168 | /*verify*/ |
181 | result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); | 169 | result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); |
182 | if (result < 0) { | 170 | if (result < 0) { |
@@ -207,11 +195,10 @@ int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb, | |||
207 | /*prep*/ | 195 | /*prep*/ |
208 | skb_pull(skb, sizeof(*hdr)); | 196 | skb_pull(skb, sizeof(*hdr)); |
209 | out: | 197 | out: |
210 | d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); | ||
211 | return result; | 198 | return result; |
212 | } | 199 | } |
213 | 200 | ||
214 | /** | 201 | /* |
215 | * Receive a WLP frame from device | 202 | * Receive a WLP frame from device |
216 | * | 203 | * |
217 | * @returns: 1 if calling function should free the skb | 204 | * @returns: 1 if calling function should free the skb |
@@ -226,14 +213,12 @@ int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb, | |||
226 | struct wlp_frame_hdr *hdr; | 213 | struct wlp_frame_hdr *hdr; |
227 | int result = 0; | 214 | int result = 0; |
228 | 215 | ||
229 | d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len); | ||
230 | if (len < sizeof(*hdr)) { | 216 | if (len < sizeof(*hdr)) { |
231 | dev_err(dev, "Not enough data to parse WLP header.\n"); | 217 | dev_err(dev, "Not enough data to parse WLP header.\n"); |
232 | result = -EINVAL; | 218 | result = -EINVAL; |
233 | goto out; | 219 | goto out; |
234 | } | 220 | } |
235 | hdr = ptr; | 221 | hdr = ptr; |
236 | d_dump(6, dev, hdr, sizeof(*hdr)); | ||
237 | if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { | 222 | if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { |
238 | dev_err(dev, "Not a WLP frame type.\n"); | 223 | dev_err(dev, "Not a WLP frame type.\n"); |
239 | result = -EINVAL; | 224 | result = -EINVAL; |
@@ -270,7 +255,6 @@ int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb, | |||
270 | "WLP header.\n"); | 255 | "WLP header.\n"); |
271 | goto out; | 256 | goto out; |
272 | } | 257 | } |
273 | d_printf(5, dev, "Association frame received.\n"); | ||
274 | wlp_receive_assoc_frame(wlp, skb, src); | 258 | wlp_receive_assoc_frame(wlp, skb, src); |
275 | break; | 259 | break; |
276 | default: | 260 | default: |
@@ -283,13 +267,12 @@ out: | |||
283 | kfree_skb(skb); | 267 | kfree_skb(skb); |
284 | result = 0; | 268 | result = 0; |
285 | } | 269 | } |
286 | d_fnend(6, dev, "skb (%p)\n", skb); | ||
287 | return result; | 270 | return result; |
288 | } | 271 | } |
289 | EXPORT_SYMBOL_GPL(wlp_receive_frame); | 272 | EXPORT_SYMBOL_GPL(wlp_receive_frame); |
290 | 273 | ||
291 | 274 | ||
292 | /** | 275 | /* |
293 | * Verify frame from network stack, prepare for further transmission | 276 | * Verify frame from network stack, prepare for further transmission |
294 | * | 277 | * |
295 | * @skb: the socket buffer that needs to be prepared for transmission (it | 278 | * @skb: the socket buffer that needs to be prepared for transmission (it |
@@ -343,9 +326,7 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
343 | int result = -EINVAL; | 326 | int result = -EINVAL; |
344 | struct ethhdr *eth_hdr = (void *) skb->data; | 327 | struct ethhdr *eth_hdr = (void *) skb->data; |
345 | 328 | ||
346 | d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb); | ||
347 | if (is_broadcast_ether_addr(eth_hdr->h_dest)) { | 329 | if (is_broadcast_ether_addr(eth_hdr->h_dest)) { |
348 | d_printf(6, dev, "WLP: handling broadcast frame. \n"); | ||
349 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); | 330 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); |
350 | if (result < 0) { | 331 | if (result < 0) { |
351 | if (printk_ratelimit()) | 332 | if (printk_ratelimit()) |
@@ -357,7 +338,6 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
357 | result = 1; | 338 | result = 1; |
358 | /* Frame will be transmitted by WLP. */ | 339 | /* Frame will be transmitted by WLP. */ |
359 | } else { | 340 | } else { |
360 | d_printf(6, dev, "WLP: handling unicast frame. \n"); | ||
361 | result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, | 341 | result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, |
362 | wlp_wss_prep_hdr, skb); | 342 | wlp_wss_prep_hdr, skb); |
363 | if (unlikely(result < 0)) { | 343 | if (unlikely(result < 0)) { |
@@ -368,7 +348,6 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
368 | } | 348 | } |
369 | } | 349 | } |
370 | out: | 350 | out: |
371 | d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result); | ||
372 | return result; | 351 | return result; |
373 | } | 352 | } |
374 | EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame); | 353 | EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame); |
diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h index 1c94fabfb1a7..3e8d5de7c5b9 100644 --- a/drivers/uwb/wlp/wlp-internal.h +++ b/drivers/uwb/wlp/wlp-internal.h | |||
@@ -42,10 +42,6 @@ enum wlp_wss_connect { | |||
42 | extern struct kobj_type wss_ktype; | 42 | extern struct kobj_type wss_ktype; |
43 | extern struct attribute_group wss_attr_group; | 43 | extern struct attribute_group wss_attr_group; |
44 | 44 | ||
45 | extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t); | ||
46 | extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); | ||
47 | |||
48 | |||
49 | /* This should be changed to a dynamic array where entries are sorted | 45 | /* This should be changed to a dynamic array where entries are sorted |
50 | * by eth_addr and search is done in a binary form | 46 | * by eth_addr and search is done in a binary form |
51 | * | 47 | * |
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c index 0799402e73fb..13db739c4e39 100644 --- a/drivers/uwb/wlp/wlp-lc.c +++ b/drivers/uwb/wlp/wlp-lc.c | |||
@@ -21,12 +21,9 @@ | |||
21 | * | 21 | * |
22 | * FIXME: docs | 22 | * FIXME: docs |
23 | */ | 23 | */ |
24 | |||
25 | #include <linux/wlp.h> | 24 | #include <linux/wlp.h> |
26 | #define D_LOCAL 6 | ||
27 | #include <linux/uwb/debug.h> | ||
28 | #include "wlp-internal.h" | ||
29 | 25 | ||
26 | #include "wlp-internal.h" | ||
30 | 27 | ||
31 | static | 28 | static |
32 | void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) | 29 | void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) |
@@ -61,11 +58,6 @@ int __wlp_alloc_device_info(struct wlp *wlp) | |||
61 | static | 58 | static |
62 | void __wlp_fill_device_info(struct wlp *wlp) | 59 | void __wlp_fill_device_info(struct wlp *wlp) |
63 | { | 60 | { |
64 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
65 | |||
66 | BUG_ON(wlp->fill_device_info == NULL); | ||
67 | d_printf(6, dev, "Retrieving device information " | ||
68 | "from device driver.\n"); | ||
69 | wlp->fill_device_info(wlp, wlp->dev_info); | 61 | wlp->fill_device_info(wlp, wlp->dev_info); |
70 | } | 62 | } |
71 | 63 | ||
@@ -127,7 +119,7 @@ void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor) | |||
127 | } | 119 | } |
128 | } | 120 | } |
129 | 121 | ||
130 | /** | 122 | /* |
131 | * Populate WLP neighborhood cache with neighbor information | 123 | * Populate WLP neighborhood cache with neighbor information |
132 | * | 124 | * |
133 | * A new neighbor is found. If it is discoverable then we add it to the | 125 | * A new neighbor is found. If it is discoverable then we add it to the |
@@ -141,10 +133,7 @@ int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev) | |||
141 | int discoverable; | 133 | int discoverable; |
142 | struct wlp_neighbor_e *neighbor; | 134 | struct wlp_neighbor_e *neighbor; |
143 | 135 | ||
144 | d_fnstart(6, &dev->dev, "uwb %p \n", dev); | 136 | /* |
145 | d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n", | ||
146 | dev->dev_addr.data[1], dev->dev_addr.data[0]); | ||
147 | /** | ||
148 | * FIXME: | 137 | * FIXME: |
149 | * Use contents of WLP IE found in beacon cache to determine if | 138 | * Use contents of WLP IE found in beacon cache to determine if |
150 | * neighbor is discoverable. | 139 | * neighbor is discoverable. |
@@ -167,7 +156,6 @@ int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev) | |||
167 | list_add(&neighbor->node, &wlp->neighbors); | 156 | list_add(&neighbor->node, &wlp->neighbors); |
168 | } | 157 | } |
169 | error_no_mem: | 158 | error_no_mem: |
170 | d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result); | ||
171 | return result; | 159 | return result; |
172 | } | 160 | } |
173 | 161 | ||
@@ -255,8 +243,6 @@ int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
255 | dev_err(dev, "Unable to send D1 frame to neighbor " | 243 | dev_err(dev, "Unable to send D1 frame to neighbor " |
256 | "%02x:%02x (%d)\n", dev_addr->data[1], | 244 | "%02x:%02x (%d)\n", dev_addr->data[1], |
257 | dev_addr->data[0], result); | 245 | dev_addr->data[0], result); |
258 | d_printf(6, dev, "Add placeholders into buffer next to " | ||
259 | "neighbor information we have (dev address).\n"); | ||
260 | goto out; | 246 | goto out; |
261 | } | 247 | } |
262 | /* Create session, wait for response */ | 248 | /* Create session, wait for response */ |
@@ -284,8 +270,6 @@ int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
284 | /* Parse message in session->data: it will be either D2 or F0 */ | 270 | /* Parse message in session->data: it will be either D2 or F0 */ |
285 | skb = session.data; | 271 | skb = session.data; |
286 | resp = (void *) skb->data; | 272 | resp = (void *) skb->data; |
287 | d_printf(6, dev, "Received response to D1 frame. \n"); | ||
288 | d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
289 | 273 | ||
290 | if (resp->type == WLP_ASSOC_F0) { | 274 | if (resp->type == WLP_ASSOC_F0) { |
291 | result = wlp_parse_f0(wlp, skb); | 275 | result = wlp_parse_f0(wlp, skb); |
@@ -337,10 +321,9 @@ int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
337 | struct device *dev = &wlp->rc->uwb_dev.dev; | 321 | struct device *dev = &wlp->rc->uwb_dev.dev; |
338 | char buf[WLP_WSS_UUID_STRSIZE]; | 322 | char buf[WLP_WSS_UUID_STRSIZE]; |
339 | struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; | 323 | struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; |
324 | |||
340 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 325 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); |
341 | d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", | 326 | |
342 | wlp, neighbor, wss, wssid, buf); | ||
343 | d_printf(6, dev, "Complete me.\n"); | ||
344 | result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); | 327 | result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); |
345 | if (result < 0) { | 328 | if (result < 0) { |
346 | dev_err(dev, "WLP: D1/D2 message exchange for enrollment " | 329 | dev_err(dev, "WLP: D1/D2 message exchange for enrollment " |
@@ -360,13 +343,10 @@ int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
360 | goto error; | 343 | goto error; |
361 | } else { | 344 | } else { |
362 | wss->state = WLP_WSS_STATE_ENROLLED; | 345 | wss->state = WLP_WSS_STATE_ENROLLED; |
363 | d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS " | 346 | dev_dbg(dev, "WLP: Success Enrollment into unsecure WSS " |
364 | "%s using neighbor %02x:%02x. \n", buf, | 347 | "%s using neighbor %02x:%02x. \n", |
365 | dev_addr->data[1], dev_addr->data[0]); | 348 | buf, dev_addr->data[1], dev_addr->data[0]); |
366 | } | 349 | } |
367 | |||
368 | d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", | ||
369 | wlp, neighbor, wss, wssid, buf); | ||
370 | out: | 350 | out: |
371 | return result; | 351 | return result; |
372 | error: | 352 | error: |
@@ -449,7 +429,6 @@ ssize_t wlp_discover(struct wlp *wlp) | |||
449 | int result = 0; | 429 | int result = 0; |
450 | struct device *dev = &wlp->rc->uwb_dev.dev; | 430 | struct device *dev = &wlp->rc->uwb_dev.dev; |
451 | 431 | ||
452 | d_fnstart(6, dev, "wlp %p \n", wlp); | ||
453 | mutex_lock(&wlp->nbmutex); | 432 | mutex_lock(&wlp->nbmutex); |
454 | /* Clear current neighborhood cache. */ | 433 | /* Clear current neighborhood cache. */ |
455 | __wlp_neighbors_release(wlp); | 434 | __wlp_neighbors_release(wlp); |
@@ -469,7 +448,6 @@ ssize_t wlp_discover(struct wlp *wlp) | |||
469 | } | 448 | } |
470 | error_dev_for_each: | 449 | error_dev_for_each: |
471 | mutex_unlock(&wlp->nbmutex); | 450 | mutex_unlock(&wlp->nbmutex); |
472 | d_fnend(6, dev, "wlp %p \n", wlp); | ||
473 | return result; | 451 | return result; |
474 | } | 452 | } |
475 | 453 | ||
@@ -492,9 +470,6 @@ void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, | |||
492 | int result; | 470 | int result; |
493 | switch (event) { | 471 | switch (event) { |
494 | case UWB_NOTIF_ONAIR: | 472 | case UWB_NOTIF_ONAIR: |
495 | d_printf(6, dev, "UWB device %02x:%02x is onair\n", | ||
496 | uwb_dev->dev_addr.data[1], | ||
497 | uwb_dev->dev_addr.data[0]); | ||
498 | result = wlp_eda_create_node(&wlp->eda, | 473 | result = wlp_eda_create_node(&wlp->eda, |
499 | uwb_dev->mac_addr.data, | 474 | uwb_dev->mac_addr.data, |
500 | &uwb_dev->dev_addr); | 475 | &uwb_dev->dev_addr); |
@@ -505,18 +480,11 @@ void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, | |||
505 | uwb_dev->dev_addr.data[0]); | 480 | uwb_dev->dev_addr.data[0]); |
506 | break; | 481 | break; |
507 | case UWB_NOTIF_OFFAIR: | 482 | case UWB_NOTIF_OFFAIR: |
508 | d_printf(6, dev, "UWB device %02x:%02x is offair\n", | ||
509 | uwb_dev->dev_addr.data[1], | ||
510 | uwb_dev->dev_addr.data[0]); | ||
511 | wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); | 483 | wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); |
512 | mutex_lock(&wlp->nbmutex); | 484 | mutex_lock(&wlp->nbmutex); |
513 | list_for_each_entry_safe(neighbor, next, &wlp->neighbors, | 485 | list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { |
514 | node) { | 486 | if (neighbor->uwb_dev == uwb_dev) |
515 | if (neighbor->uwb_dev == uwb_dev) { | ||
516 | d_printf(6, dev, "Removing device from " | ||
517 | "neighborhood.\n"); | ||
518 | __wlp_neighbor_release(neighbor); | 487 | __wlp_neighbor_release(neighbor); |
519 | } | ||
520 | } | 488 | } |
521 | mutex_unlock(&wlp->nbmutex); | 489 | mutex_unlock(&wlp->nbmutex); |
522 | break; | 490 | break; |
@@ -526,38 +494,47 @@ void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, | |||
526 | } | 494 | } |
527 | } | 495 | } |
528 | 496 | ||
529 | int wlp_setup(struct wlp *wlp, struct uwb_rc *rc) | 497 | static void wlp_channel_changed(struct uwb_pal *pal, int channel) |
498 | { | ||
499 | struct wlp *wlp = container_of(pal, struct wlp, pal); | ||
500 | |||
501 | if (channel < 0) | ||
502 | netif_carrier_off(wlp->ndev); | ||
503 | else | ||
504 | netif_carrier_on(wlp->ndev); | ||
505 | } | ||
506 | |||
507 | int wlp_setup(struct wlp *wlp, struct uwb_rc *rc, struct net_device *ndev) | ||
530 | { | 508 | { |
531 | struct device *dev = &rc->uwb_dev.dev; | ||
532 | int result; | 509 | int result; |
533 | 510 | ||
534 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
535 | BUG_ON(wlp->fill_device_info == NULL); | 511 | BUG_ON(wlp->fill_device_info == NULL); |
536 | BUG_ON(wlp->xmit_frame == NULL); | 512 | BUG_ON(wlp->xmit_frame == NULL); |
537 | BUG_ON(wlp->stop_queue == NULL); | 513 | BUG_ON(wlp->stop_queue == NULL); |
538 | BUG_ON(wlp->start_queue == NULL); | 514 | BUG_ON(wlp->start_queue == NULL); |
515 | |||
539 | wlp->rc = rc; | 516 | wlp->rc = rc; |
517 | wlp->ndev = ndev; | ||
540 | wlp_eda_init(&wlp->eda);/* Set up address cache */ | 518 | wlp_eda_init(&wlp->eda);/* Set up address cache */ |
541 | wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; | 519 | wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; |
542 | wlp->uwb_notifs_handler.data = wlp; | 520 | wlp->uwb_notifs_handler.data = wlp; |
543 | uwb_notifs_register(rc, &wlp->uwb_notifs_handler); | 521 | uwb_notifs_register(rc, &wlp->uwb_notifs_handler); |
544 | 522 | ||
545 | uwb_pal_init(&wlp->pal); | 523 | uwb_pal_init(&wlp->pal); |
546 | result = uwb_pal_register(rc, &wlp->pal); | 524 | wlp->pal.rc = rc; |
525 | wlp->pal.channel_changed = wlp_channel_changed; | ||
526 | result = uwb_pal_register(&wlp->pal); | ||
547 | if (result < 0) | 527 | if (result < 0) |
548 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); | 528 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); |
549 | 529 | ||
550 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
551 | return result; | 530 | return result; |
552 | } | 531 | } |
553 | EXPORT_SYMBOL_GPL(wlp_setup); | 532 | EXPORT_SYMBOL_GPL(wlp_setup); |
554 | 533 | ||
555 | void wlp_remove(struct wlp *wlp) | 534 | void wlp_remove(struct wlp *wlp) |
556 | { | 535 | { |
557 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
558 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
559 | wlp_neighbors_release(wlp); | 536 | wlp_neighbors_release(wlp); |
560 | uwb_pal_unregister(wlp->rc, &wlp->pal); | 537 | uwb_pal_unregister(&wlp->pal); |
561 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); | 538 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); |
562 | wlp_eda_release(&wlp->eda); | 539 | wlp_eda_release(&wlp->eda); |
563 | mutex_lock(&wlp->mutex); | 540 | mutex_lock(&wlp->mutex); |
@@ -565,9 +542,6 @@ void wlp_remove(struct wlp *wlp) | |||
565 | kfree(wlp->dev_info); | 542 | kfree(wlp->dev_info); |
566 | mutex_unlock(&wlp->mutex); | 543 | mutex_unlock(&wlp->mutex); |
567 | wlp->rc = NULL; | 544 | wlp->rc = NULL; |
568 | /* We have to use NULL here because this function can be called | ||
569 | * when the device disappeared. */ | ||
570 | d_fnend(6, NULL, "wlp %p\n", wlp); | ||
571 | } | 545 | } |
572 | EXPORT_SYMBOL_GPL(wlp_remove); | 546 | EXPORT_SYMBOL_GPL(wlp_remove); |
573 | 547 | ||
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c index 96b18c9bd6e9..5913c7a5d922 100644 --- a/drivers/uwb/wlp/wss-lc.c +++ b/drivers/uwb/wlp/wss-lc.c | |||
@@ -43,14 +43,11 @@ | |||
43 | * wlp_wss_release() | 43 | * wlp_wss_release() |
44 | * wlp_wss_reset() | 44 | * wlp_wss_reset() |
45 | */ | 45 | */ |
46 | |||
47 | #include <linux/etherdevice.h> /* for is_valid_ether_addr */ | 46 | #include <linux/etherdevice.h> /* for is_valid_ether_addr */ |
48 | #include <linux/skbuff.h> | 47 | #include <linux/skbuff.h> |
49 | #include <linux/wlp.h> | 48 | #include <linux/wlp.h> |
50 | #define D_LOCAL 5 | ||
51 | #include <linux/uwb/debug.h> | ||
52 | #include "wlp-internal.h" | ||
53 | 49 | ||
50 | #include "wlp-internal.h" | ||
54 | 51 | ||
55 | size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) | 52 | size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) |
56 | { | 53 | { |
@@ -116,9 +113,6 @@ struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss) | |||
116 | */ | 113 | */ |
117 | void wlp_wss_reset(struct wlp_wss *wss) | 114 | void wlp_wss_reset(struct wlp_wss *wss) |
118 | { | 115 | { |
119 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
120 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
121 | d_fnstart(5, dev, "wss (%p) \n", wss); | ||
122 | memset(&wss->wssid, 0, sizeof(wss->wssid)); | 116 | memset(&wss->wssid, 0, sizeof(wss->wssid)); |
123 | wss->hash = 0; | 117 | wss->hash = 0; |
124 | memset(&wss->name[0], 0, sizeof(wss->name)); | 118 | memset(&wss->name[0], 0, sizeof(wss->name)); |
@@ -127,7 +121,6 @@ void wlp_wss_reset(struct wlp_wss *wss) | |||
127 | memset(&wss->master_key[0], 0, sizeof(wss->master_key)); | 121 | memset(&wss->master_key[0], 0, sizeof(wss->master_key)); |
128 | wss->tag = 0; | 122 | wss->tag = 0; |
129 | wss->state = WLP_WSS_STATE_NONE; | 123 | wss->state = WLP_WSS_STATE_NONE; |
130 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
131 | } | 124 | } |
132 | 125 | ||
133 | /** | 126 | /** |
@@ -145,7 +138,6 @@ int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str) | |||
145 | struct device *dev = &wlp->rc->uwb_dev.dev; | 138 | struct device *dev = &wlp->rc->uwb_dev.dev; |
146 | int result; | 139 | int result; |
147 | 140 | ||
148 | d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str); | ||
149 | result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); | 141 | result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); |
150 | if (result < 0) | 142 | if (result < 0) |
151 | return result; | 143 | return result; |
@@ -162,7 +154,6 @@ int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str) | |||
162 | result); | 154 | result); |
163 | goto error_sysfs_create_group; | 155 | goto error_sysfs_create_group; |
164 | } | 156 | } |
165 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
166 | return 0; | 157 | return 0; |
167 | error_sysfs_create_group: | 158 | error_sysfs_create_group: |
168 | 159 | ||
@@ -214,22 +205,14 @@ int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
214 | struct wlp *wlp = container_of(wss, struct wlp, wss); | 205 | struct wlp *wlp = container_of(wss, struct wlp, wss); |
215 | struct device *dev = &wlp->rc->uwb_dev.dev; | 206 | struct device *dev = &wlp->rc->uwb_dev.dev; |
216 | struct wlp_neighbor_e *neighbor; | 207 | struct wlp_neighbor_e *neighbor; |
217 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
218 | int result = -ENXIO; | 208 | int result = -ENXIO; |
219 | struct uwb_dev_addr *dev_addr; | 209 | struct uwb_dev_addr *dev_addr; |
220 | 210 | ||
221 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
222 | d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n", | ||
223 | wss, buf, dest->data[1], dest->data[0]); | ||
224 | mutex_lock(&wlp->nbmutex); | 211 | mutex_lock(&wlp->nbmutex); |
225 | list_for_each_entry(neighbor, &wlp->neighbors, node) { | 212 | list_for_each_entry(neighbor, &wlp->neighbors, node) { |
226 | dev_addr = &neighbor->uwb_dev->dev_addr; | 213 | dev_addr = &neighbor->uwb_dev->dev_addr; |
227 | if (!memcmp(dest, dev_addr, sizeof(*dest))) { | 214 | if (!memcmp(dest, dev_addr, sizeof(*dest))) { |
228 | d_printf(5, dev, "Neighbor %02x:%02x is valid, " | 215 | result = wlp_enroll_neighbor(wlp, neighbor, wss, wssid); |
229 | "enrolling. \n", | ||
230 | dev_addr->data[1], dev_addr->data[0]); | ||
231 | result = wlp_enroll_neighbor(wlp, neighbor, wss, | ||
232 | wssid); | ||
233 | break; | 216 | break; |
234 | } | 217 | } |
235 | } | 218 | } |
@@ -237,8 +220,6 @@ int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
237 | dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", | 220 | dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", |
238 | dest->data[1], dest->data[0]); | 221 | dest->data[1], dest->data[0]); |
239 | mutex_unlock(&wlp->nbmutex); | 222 | mutex_unlock(&wlp->nbmutex); |
240 | d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n", | ||
241 | wss, buf, dest->data[1], dest->data[0], result); | ||
242 | return result; | 223 | return result; |
243 | } | 224 | } |
244 | 225 | ||
@@ -260,16 +241,11 @@ int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid) | |||
260 | char buf[WLP_WSS_UUID_STRSIZE]; | 241 | char buf[WLP_WSS_UUID_STRSIZE]; |
261 | int result = -ENXIO; | 242 | int result = -ENXIO; |
262 | 243 | ||
263 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 244 | |
264 | d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf); | ||
265 | mutex_lock(&wlp->nbmutex); | 245 | mutex_lock(&wlp->nbmutex); |
266 | list_for_each_entry(neighbor, &wlp->neighbors, node) { | 246 | list_for_each_entry(neighbor, &wlp->neighbors, node) { |
267 | list_for_each_entry(wssid_e, &neighbor->wssid, node) { | 247 | list_for_each_entry(wssid_e, &neighbor->wssid, node) { |
268 | if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { | 248 | if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { |
269 | d_printf(5, dev, "Found WSSID %s in neighbor " | ||
270 | "%02x:%02x cache. \n", buf, | ||
271 | neighbor->uwb_dev->dev_addr.data[1], | ||
272 | neighbor->uwb_dev->dev_addr.data[0]); | ||
273 | result = wlp_enroll_neighbor(wlp, neighbor, | 249 | result = wlp_enroll_neighbor(wlp, neighbor, |
274 | wss, wssid); | 250 | wss, wssid); |
275 | if (result == 0) /* enrollment success */ | 251 | if (result == 0) /* enrollment success */ |
@@ -279,10 +255,11 @@ int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid) | |||
279 | } | 255 | } |
280 | } | 256 | } |
281 | out: | 257 | out: |
282 | if (result == -ENXIO) | 258 | if (result == -ENXIO) { |
259 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
283 | dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); | 260 | dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); |
261 | } | ||
284 | mutex_unlock(&wlp->nbmutex); | 262 | mutex_unlock(&wlp->nbmutex); |
285 | d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result); | ||
286 | return result; | 263 | return result; |
287 | } | 264 | } |
288 | 265 | ||
@@ -307,27 +284,22 @@ int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
307 | struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; | 284 | struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; |
308 | 285 | ||
309 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 286 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); |
287 | |||
310 | if (wss->state != WLP_WSS_STATE_NONE) { | 288 | if (wss->state != WLP_WSS_STATE_NONE) { |
311 | dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); | 289 | dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); |
312 | result = -EEXIST; | 290 | result = -EEXIST; |
313 | goto error; | 291 | goto error; |
314 | } | 292 | } |
315 | if (!memcmp(&bcast, devaddr, sizeof(bcast))) { | 293 | if (!memcmp(&bcast, devaddr, sizeof(bcast))) |
316 | d_printf(5, dev, "Request to enroll in discovered WSS " | ||
317 | "with WSSID %s \n", buf); | ||
318 | result = wlp_wss_enroll_discovered(wss, wssid); | 294 | result = wlp_wss_enroll_discovered(wss, wssid); |
319 | } else { | 295 | else |
320 | d_printf(5, dev, "Request to enroll in WSSID %s with " | ||
321 | "registrar %02x:%02x\n", buf, devaddr->data[1], | ||
322 | devaddr->data[0]); | ||
323 | result = wlp_wss_enroll_target(wss, wssid, devaddr); | 296 | result = wlp_wss_enroll_target(wss, wssid, devaddr); |
324 | } | ||
325 | if (result < 0) { | 297 | if (result < 0) { |
326 | dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", | 298 | dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", |
327 | buf, result); | 299 | buf, result); |
328 | goto error; | 300 | goto error; |
329 | } | 301 | } |
330 | d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf); | 302 | dev_dbg(dev, "Successfully enrolled into WSS %s \n", buf); |
331 | result = wlp_wss_sysfs_add(wss, buf); | 303 | result = wlp_wss_sysfs_add(wss, buf); |
332 | if (result < 0) { | 304 | if (result < 0) { |
333 | dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); | 305 | dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); |
@@ -363,7 +335,6 @@ int wlp_wss_activate(struct wlp_wss *wss) | |||
363 | u8 hash; /* only include one hash */ | 335 | u8 hash; /* only include one hash */ |
364 | } ie_data; | 336 | } ie_data; |
365 | 337 | ||
366 | d_fnstart(5, dev, "Activating WSS %p. \n", wss); | ||
367 | BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); | 338 | BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); |
368 | wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); | 339 | wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); |
369 | wss->tag = wss->hash; | 340 | wss->tag = wss->hash; |
@@ -382,7 +353,6 @@ int wlp_wss_activate(struct wlp_wss *wss) | |||
382 | wss->state = WLP_WSS_STATE_ACTIVE; | 353 | wss->state = WLP_WSS_STATE_ACTIVE; |
383 | result = 0; | 354 | result = 0; |
384 | error_wlp_ie: | 355 | error_wlp_ie: |
385 | d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result); | ||
386 | return result; | 356 | return result; |
387 | } | 357 | } |
388 | 358 | ||
@@ -405,7 +375,6 @@ int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
405 | int result = 0; | 375 | int result = 0; |
406 | char buf[WLP_WSS_UUID_STRSIZE]; | 376 | char buf[WLP_WSS_UUID_STRSIZE]; |
407 | 377 | ||
408 | d_fnstart(5, dev, "Enrollment and activation requested. \n"); | ||
409 | mutex_lock(&wss->mutex); | 378 | mutex_lock(&wss->mutex); |
410 | result = wlp_wss_enroll(wss, wssid, devaddr); | 379 | result = wlp_wss_enroll(wss, wssid, devaddr); |
411 | if (result < 0) { | 380 | if (result < 0) { |
@@ -424,7 +393,6 @@ int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
424 | error_activate: | 393 | error_activate: |
425 | error_enroll: | 394 | error_enroll: |
426 | mutex_unlock(&wss->mutex); | 395 | mutex_unlock(&wss->mutex); |
427 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
428 | return result; | 396 | return result; |
429 | } | 397 | } |
430 | 398 | ||
@@ -447,11 +415,9 @@ int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
447 | struct device *dev = &wlp->rc->uwb_dev.dev; | 415 | struct device *dev = &wlp->rc->uwb_dev.dev; |
448 | int result = 0; | 416 | int result = 0; |
449 | char buf[WLP_WSS_UUID_STRSIZE]; | 417 | char buf[WLP_WSS_UUID_STRSIZE]; |
450 | d_fnstart(5, dev, "Request to create new WSS.\n"); | 418 | |
451 | result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 419 | result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); |
452 | d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, " | 420 | |
453 | "sec_status=%u, accepting enrollment=%u \n", | ||
454 | buf, name, sec_status, accept); | ||
455 | if (!mutex_trylock(&wss->mutex)) { | 421 | if (!mutex_trylock(&wss->mutex)) { |
456 | dev_err(dev, "WLP: WLP association session in progress.\n"); | 422 | dev_err(dev, "WLP: WLP association session in progress.\n"); |
457 | return -EBUSY; | 423 | return -EBUSY; |
@@ -498,7 +464,6 @@ int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
498 | result = 0; | 464 | result = 0; |
499 | out: | 465 | out: |
500 | mutex_unlock(&wss->mutex); | 466 | mutex_unlock(&wss->mutex); |
501 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
502 | return result; | 467 | return result; |
503 | } | 468 | } |
504 | 469 | ||
@@ -520,16 +485,12 @@ int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, | |||
520 | { | 485 | { |
521 | int result = 0; | 486 | int result = 0; |
522 | struct device *dev = &wlp->rc->uwb_dev.dev; | 487 | struct device *dev = &wlp->rc->uwb_dev.dev; |
523 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
524 | DECLARE_COMPLETION_ONSTACK(completion); | 488 | DECLARE_COMPLETION_ONSTACK(completion); |
525 | struct wlp_session session; | 489 | struct wlp_session session; |
526 | struct sk_buff *skb; | 490 | struct sk_buff *skb; |
527 | struct wlp_frame_assoc *resp; | 491 | struct wlp_frame_assoc *resp; |
528 | struct wlp_uuid wssid; | 492 | struct wlp_uuid wssid; |
529 | 493 | ||
530 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
531 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
532 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
533 | mutex_lock(&wlp->mutex); | 494 | mutex_lock(&wlp->mutex); |
534 | /* Send C1 association frame */ | 495 | /* Send C1 association frame */ |
535 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); | 496 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); |
@@ -565,8 +526,6 @@ int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, | |||
565 | /* Parse message in session->data: it will be either C2 or F0 */ | 526 | /* Parse message in session->data: it will be either C2 or F0 */ |
566 | skb = session.data; | 527 | skb = session.data; |
567 | resp = (void *) skb->data; | 528 | resp = (void *) skb->data; |
568 | d_printf(5, dev, "Received response to C1 frame. \n"); | ||
569 | d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
570 | if (resp->type == WLP_ASSOC_F0) { | 529 | if (resp->type == WLP_ASSOC_F0) { |
571 | result = wlp_parse_f0(wlp, skb); | 530 | result = wlp_parse_f0(wlp, skb); |
572 | if (result < 0) | 531 | if (result < 0) |
@@ -584,11 +543,9 @@ int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, | |||
584 | result = 0; | 543 | result = 0; |
585 | goto error_resp_parse; | 544 | goto error_resp_parse; |
586 | } | 545 | } |
587 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) { | 546 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) |
588 | d_printf(5, dev, "WSSID in C2 frame matches local " | ||
589 | "active WSS.\n"); | ||
590 | result = 1; | 547 | result = 1; |
591 | } else { | 548 | else { |
592 | dev_err(dev, "WLP: Received a C2 frame without matching " | 549 | dev_err(dev, "WLP: Received a C2 frame without matching " |
593 | "WSSID.\n"); | 550 | "WSSID.\n"); |
594 | result = 0; | 551 | result = 0; |
@@ -598,8 +555,6 @@ error_resp_parse: | |||
598 | out: | 555 | out: |
599 | wlp->session = NULL; | 556 | wlp->session = NULL; |
600 | mutex_unlock(&wlp->mutex); | 557 | mutex_unlock(&wlp->mutex); |
601 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
602 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
603 | return result; | 558 | return result; |
604 | } | 559 | } |
605 | 560 | ||
@@ -620,16 +575,8 @@ int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss, | |||
620 | { | 575 | { |
621 | struct device *dev = &wlp->rc->uwb_dev.dev; | 576 | struct device *dev = &wlp->rc->uwb_dev.dev; |
622 | int result = 0; | 577 | int result = 0; |
623 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
624 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
625 | d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " | ||
626 | "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag, | ||
627 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
628 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); | ||
629 | 578 | ||
630 | if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { | 579 | if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { |
631 | d_printf(5, dev, "WSSID from neighbor frame matches local " | ||
632 | "active WSS.\n"); | ||
633 | /* Update EDA cache */ | 580 | /* Update EDA cache */ |
634 | result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, | 581 | result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, |
635 | (void *) virt_addr->data, *tag, | 582 | (void *) virt_addr->data, *tag, |
@@ -638,18 +585,9 @@ int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss, | |||
638 | dev_err(dev, "WLP: Unable to update EDA cache " | 585 | dev_err(dev, "WLP: Unable to update EDA cache " |
639 | "with new connected neighbor information.\n"); | 586 | "with new connected neighbor information.\n"); |
640 | } else { | 587 | } else { |
641 | dev_err(dev, "WLP: Neighbor does not have matching " | 588 | dev_err(dev, "WLP: Neighbor does not have matching WSSID.\n"); |
642 | "WSSID.\n"); | ||
643 | result = -EINVAL; | 589 | result = -EINVAL; |
644 | } | 590 | } |
645 | |||
646 | d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " | ||
647 | "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n", | ||
648 | wlp, wss, buf, *tag, | ||
649 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
650 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5], | ||
651 | result); | ||
652 | |||
653 | return result; | 591 | return result; |
654 | } | 592 | } |
655 | 593 | ||
@@ -665,7 +603,6 @@ int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, | |||
665 | { | 603 | { |
666 | int result; | 604 | int result; |
667 | struct device *dev = &wlp->rc->uwb_dev.dev; | 605 | struct device *dev = &wlp->rc->uwb_dev.dev; |
668 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
669 | struct wlp_uuid wssid; | 606 | struct wlp_uuid wssid; |
670 | u8 tag; | 607 | u8 tag; |
671 | struct uwb_mac_addr virt_addr; | 608 | struct uwb_mac_addr virt_addr; |
@@ -674,9 +611,6 @@ int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, | |||
674 | struct wlp_frame_assoc *resp; | 611 | struct wlp_frame_assoc *resp; |
675 | struct sk_buff *skb; | 612 | struct sk_buff *skb; |
676 | 613 | ||
677 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
678 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
679 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
680 | mutex_lock(&wlp->mutex); | 614 | mutex_lock(&wlp->mutex); |
681 | /* Send C3 association frame */ | 615 | /* Send C3 association frame */ |
682 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); | 616 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); |
@@ -711,8 +645,6 @@ int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, | |||
711 | /* Parse message in session->data: it will be either C4 or F0 */ | 645 | /* Parse message in session->data: it will be either C4 or F0 */ |
712 | skb = session.data; | 646 | skb = session.data; |
713 | resp = (void *) skb->data; | 647 | resp = (void *) skb->data; |
714 | d_printf(5, dev, "Received response to C3 frame. \n"); | ||
715 | d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
716 | if (resp->type == WLP_ASSOC_F0) { | 648 | if (resp->type == WLP_ASSOC_F0) { |
717 | result = wlp_parse_f0(wlp, skb); | 649 | result = wlp_parse_f0(wlp, skb); |
718 | if (result < 0) | 650 | if (result < 0) |
@@ -744,8 +676,6 @@ out: | |||
744 | WLP_WSS_CONNECT_FAILED); | 676 | WLP_WSS_CONNECT_FAILED); |
745 | wlp->session = NULL; | 677 | wlp->session = NULL; |
746 | mutex_unlock(&wlp->mutex); | 678 | mutex_unlock(&wlp->mutex); |
747 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
748 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
749 | return result; | 679 | return result; |
750 | } | 680 | } |
751 | 681 | ||
@@ -780,12 +710,8 @@ void wlp_wss_connect_send(struct work_struct *ws) | |||
780 | struct wlp_wss *wss = &wlp->wss; | 710 | struct wlp_wss *wss = &wlp->wss; |
781 | int result; | 711 | int result; |
782 | struct device *dev = &wlp->rc->uwb_dev.dev; | 712 | struct device *dev = &wlp->rc->uwb_dev.dev; |
783 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
784 | 713 | ||
785 | mutex_lock(&wss->mutex); | 714 | mutex_lock(&wss->mutex); |
786 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
787 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
788 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
789 | if (wss->state < WLP_WSS_STATE_ACTIVE) { | 715 | if (wss->state < WLP_WSS_STATE_ACTIVE) { |
790 | if (printk_ratelimit()) | 716 | if (printk_ratelimit()) |
791 | dev_err(dev, "WLP: Attempting to connect with " | 717 | dev_err(dev, "WLP: Attempting to connect with " |
@@ -836,7 +762,6 @@ out: | |||
836 | BUG_ON(wlp->start_queue == NULL); | 762 | BUG_ON(wlp->start_queue == NULL); |
837 | wlp->start_queue(wlp); | 763 | wlp->start_queue(wlp); |
838 | mutex_unlock(&wss->mutex); | 764 | mutex_unlock(&wss->mutex); |
839 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf); | ||
840 | } | 765 | } |
841 | 766 | ||
842 | /** | 767 | /** |
@@ -855,7 +780,6 @@ int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
855 | struct sk_buff *skb = _skb; | 780 | struct sk_buff *skb = _skb; |
856 | struct wlp_frame_std_abbrv_hdr *std_hdr; | 781 | struct wlp_frame_std_abbrv_hdr *std_hdr; |
857 | 782 | ||
858 | d_fnstart(6, dev, "wlp %p \n", wlp); | ||
859 | if (eda_entry->state == WLP_WSS_CONNECTED) { | 783 | if (eda_entry->state == WLP_WSS_CONNECTED) { |
860 | /* Add WLP header */ | 784 | /* Add WLP header */ |
861 | BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); | 785 | BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); |
@@ -873,7 +797,6 @@ int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
873 | dev_addr->data[0]); | 797 | dev_addr->data[0]); |
874 | result = -EINVAL; | 798 | result = -EINVAL; |
875 | } | 799 | } |
876 | d_fnend(6, dev, "wlp %p \n", wlp); | ||
877 | return result; | 800 | return result; |
878 | } | 801 | } |
879 | 802 | ||
@@ -893,16 +816,9 @@ int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
893 | { | 816 | { |
894 | int result = 0; | 817 | int result = 0; |
895 | struct device *dev = &wlp->rc->uwb_dev.dev; | 818 | struct device *dev = &wlp->rc->uwb_dev.dev; |
896 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | ||
897 | unsigned char *eth_addr = eda_entry->eth_addr; | ||
898 | struct sk_buff *skb = _skb; | 819 | struct sk_buff *skb = _skb; |
899 | struct wlp_assoc_conn_ctx *conn_ctx; | 820 | struct wlp_assoc_conn_ctx *conn_ctx; |
900 | 821 | ||
901 | d_fnstart(5, dev, "wlp %p\n", wlp); | ||
902 | d_printf(5, dev, "To neighbor %02x:%02x with eth " | ||
903 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1], | ||
904 | dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2], | ||
905 | eth_addr[3], eth_addr[4], eth_addr[5]); | ||
906 | if (eda_entry->state == WLP_WSS_UNCONNECTED) { | 822 | if (eda_entry->state == WLP_WSS_UNCONNECTED) { |
907 | /* We don't want any more packets while we set up connection */ | 823 | /* We don't want any more packets while we set up connection */ |
908 | BUG_ON(wlp->stop_queue == NULL); | 824 | BUG_ON(wlp->stop_queue == NULL); |
@@ -929,12 +845,9 @@ int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
929 | "previously. Not retrying. \n"); | 845 | "previously. Not retrying. \n"); |
930 | result = -ENONET; | 846 | result = -ENONET; |
931 | goto out; | 847 | goto out; |
932 | } else { /* eda_entry->state == WLP_WSS_CONNECTED */ | 848 | } else /* eda_entry->state == WLP_WSS_CONNECTED */ |
933 | d_printf(5, dev, "Neighbor is connected, preparing frame.\n"); | ||
934 | result = wlp_wss_prep_hdr(wlp, eda_entry, skb); | 849 | result = wlp_wss_prep_hdr(wlp, eda_entry, skb); |
935 | } | ||
936 | out: | 850 | out: |
937 | d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result); | ||
938 | return result; | 851 | return result; |
939 | } | 852 | } |
940 | 853 | ||
@@ -957,8 +870,6 @@ int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
957 | struct sk_buff *copy; | 870 | struct sk_buff *copy; |
958 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | 871 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; |
959 | 872 | ||
960 | d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n", | ||
961 | dev_addr->data[1], dev_addr->data[0], skb); | ||
962 | copy = skb_copy(skb, GFP_ATOMIC); | 873 | copy = skb_copy(skb, GFP_ATOMIC); |
963 | if (copy == NULL) { | 874 | if (copy == NULL) { |
964 | if (printk_ratelimit()) | 875 | if (printk_ratelimit()) |
@@ -988,8 +899,6 @@ int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
988 | dev_kfree_skb_irq(copy);/*we need to free if tx fails */ | 899 | dev_kfree_skb_irq(copy);/*we need to free if tx fails */ |
989 | } | 900 | } |
990 | out: | 901 | out: |
991 | d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1], | ||
992 | dev_addr->data[0]); | ||
993 | return result; | 902 | return result; |
994 | } | 903 | } |
995 | 904 | ||
@@ -1005,7 +914,7 @@ int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss) | |||
1005 | struct wlp *wlp = container_of(wss, struct wlp, wss); | 914 | struct wlp *wlp = container_of(wss, struct wlp, wss); |
1006 | struct device *dev = &wlp->rc->uwb_dev.dev; | 915 | struct device *dev = &wlp->rc->uwb_dev.dev; |
1007 | int result = 0; | 916 | int result = 0; |
1008 | d_fnstart(5, dev, "wss (%p) \n", wss); | 917 | |
1009 | mutex_lock(&wss->mutex); | 918 | mutex_lock(&wss->mutex); |
1010 | wss->kobj.parent = &net_dev->dev.kobj; | 919 | wss->kobj.parent = &net_dev->dev.kobj; |
1011 | if (!is_valid_ether_addr(net_dev->dev_addr)) { | 920 | if (!is_valid_ether_addr(net_dev->dev_addr)) { |
@@ -1018,7 +927,6 @@ int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss) | |||
1018 | sizeof(wss->virtual_addr.data)); | 927 | sizeof(wss->virtual_addr.data)); |
1019 | out: | 928 | out: |
1020 | mutex_unlock(&wss->mutex); | 929 | mutex_unlock(&wss->mutex); |
1021 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
1022 | return result; | 930 | return result; |
1023 | } | 931 | } |
1024 | EXPORT_SYMBOL_GPL(wlp_wss_setup); | 932 | EXPORT_SYMBOL_GPL(wlp_wss_setup); |
@@ -1035,8 +943,7 @@ EXPORT_SYMBOL_GPL(wlp_wss_setup); | |||
1035 | void wlp_wss_remove(struct wlp_wss *wss) | 943 | void wlp_wss_remove(struct wlp_wss *wss) |
1036 | { | 944 | { |
1037 | struct wlp *wlp = container_of(wss, struct wlp, wss); | 945 | struct wlp *wlp = container_of(wss, struct wlp, wss); |
1038 | struct device *dev = &wlp->rc->uwb_dev.dev; | 946 | |
1039 | d_fnstart(5, dev, "wss (%p) \n", wss); | ||
1040 | mutex_lock(&wss->mutex); | 947 | mutex_lock(&wss->mutex); |
1041 | if (wss->state == WLP_WSS_STATE_ACTIVE) | 948 | if (wss->state == WLP_WSS_STATE_ACTIVE) |
1042 | uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); | 949 | uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); |
@@ -1050,6 +957,5 @@ void wlp_wss_remove(struct wlp_wss *wss) | |||
1050 | wlp_eda_release(&wlp->eda); | 957 | wlp_eda_release(&wlp->eda); |
1051 | wlp_eda_init(&wlp->eda); | 958 | wlp_eda_init(&wlp->eda); |
1052 | mutex_unlock(&wss->mutex); | 959 | mutex_unlock(&wss->mutex); |
1053 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
1054 | } | 960 | } |
1055 | EXPORT_SYMBOL_GPL(wlp_wss_remove); | 961 | EXPORT_SYMBOL_GPL(wlp_wss_remove); |