diff options
Diffstat (limited to 'drivers/uwb/drp.c')
-rw-r--r-- | drivers/uwb/drp.c | 695 |
1 files changed, 524 insertions, 171 deletions
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c index c0b1e5e2bd08..2b4f9406789d 100644 --- a/drivers/uwb/drp.c +++ b/drivers/uwb/drp.c | |||
@@ -23,6 +23,59 @@ | |||
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include "uwb-internal.h" | 24 | #include "uwb-internal.h" |
25 | 25 | ||
26 | |||
27 | /* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ | ||
28 | enum uwb_drp_conflict_action { | ||
29 | /* Reservation is mantained, no action needed */ | ||
30 | UWB_DRP_CONFLICT_MANTAIN = 0, | ||
31 | |||
32 | /* the device shall not transmit frames in conflicting MASs in | ||
33 | * the following superframe. If the device is the reservation | ||
34 | * target, it shall also set the Reason Code in its DRP IE to | ||
35 | * Conflict in its beacon in the following superframe. | ||
36 | */ | ||
37 | UWB_DRP_CONFLICT_ACT1, | ||
38 | |||
39 | /* the device shall not set the Reservation Status bit to ONE | ||
40 | * and shall not transmit frames in conflicting MASs. If the | ||
41 | * device is the reservation target, it shall also set the | ||
42 | * Reason Code in its DRP IE to Conflict. | ||
43 | */ | ||
44 | UWB_DRP_CONFLICT_ACT2, | ||
45 | |||
46 | /* the device shall not transmit frames in conflicting MASs in | ||
47 | * the following superframe. It shall remove the conflicting | ||
48 | * MASs from the reservation or set the Reservation Status to | ||
49 | * ZERO in its beacon in the following superframe. If the | ||
50 | * device is the reservation target, it shall also set the | ||
51 | * Reason Code in its DRP IE to Conflict. | ||
52 | */ | ||
53 | UWB_DRP_CONFLICT_ACT3, | ||
54 | }; | ||
55 | |||
56 | |||
57 | static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, | ||
58 | struct uwb_rceb *reply, ssize_t reply_size) | ||
59 | { | ||
60 | struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; | ||
61 | |||
62 | if (r != NULL) { | ||
63 | if (r->bResultCode != UWB_RC_RES_SUCCESS) | ||
64 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", | ||
65 | uwb_rc_strerror(r->bResultCode), r->bResultCode); | ||
66 | } else | ||
67 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); | ||
68 | |||
69 | spin_lock(&rc->rsvs_lock); | ||
70 | if (rc->set_drp_ie_pending > 1) { | ||
71 | rc->set_drp_ie_pending = 0; | ||
72 | uwb_rsv_queue_update(rc); | ||
73 | } else { | ||
74 | rc->set_drp_ie_pending = 0; | ||
75 | } | ||
76 | spin_unlock(&rc->rsvs_lock); | ||
77 | } | ||
78 | |||
26 | /** | 79 | /** |
27 | * Construct and send the SET DRP IE | 80 | * Construct and send the SET DRP IE |
28 | * | 81 | * |
@@ -37,28 +90,32 @@ | |||
37 | * | 90 | * |
38 | * A DRP Availability IE is appended. | 91 | * A DRP Availability IE is appended. |
39 | * | 92 | * |
40 | * rc->uwb_dev.mutex is held | 93 | * rc->rsvs_mutex is held |
41 | * | 94 | * |
42 | * FIXME We currently ignore the returned value indicating the remaining space | 95 | * FIXME We currently ignore the returned value indicating the remaining space |
43 | * in beacon. This could be used to deny reservation requests earlier if | 96 | * in beacon. This could be used to deny reservation requests earlier if |
44 | * determined that they would cause the beacon space to be exceeded. | 97 | * determined that they would cause the beacon space to be exceeded. |
45 | */ | 98 | */ |
46 | static | 99 | int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) |
47 | int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) | ||
48 | { | 100 | { |
49 | int result; | 101 | int result; |
50 | struct device *dev = &rc->uwb_dev.dev; | ||
51 | struct uwb_rc_cmd_set_drp_ie *cmd; | 102 | struct uwb_rc_cmd_set_drp_ie *cmd; |
52 | struct uwb_rc_evt_set_drp_ie reply; | ||
53 | struct uwb_rsv *rsv; | 103 | struct uwb_rsv *rsv; |
104 | struct uwb_rsv_move *mv; | ||
54 | int num_bytes = 0; | 105 | int num_bytes = 0; |
55 | u8 *IEDataptr; | 106 | u8 *IEDataptr; |
56 | 107 | ||
57 | result = -ENOMEM; | 108 | result = -ENOMEM; |
58 | /* First traverse all reservations to determine memory needed. */ | 109 | /* First traverse all reservations to determine memory needed. */ |
59 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | 110 | list_for_each_entry(rsv, &rc->reservations, rc_node) { |
60 | if (rsv->drp_ie != NULL) | 111 | if (rsv->drp_ie != NULL) { |
61 | num_bytes += rsv->drp_ie->hdr.length + 2; | 112 | num_bytes += rsv->drp_ie->hdr.length + 2; |
113 | if (uwb_rsv_has_two_drp_ies(rsv) && | ||
114 | (rsv->mv.companion_drp_ie != NULL)) { | ||
115 | mv = &rsv->mv; | ||
116 | num_bytes += mv->companion_drp_ie->hdr.length + 2; | ||
117 | } | ||
118 | } | ||
62 | } | 119 | } |
63 | num_bytes += sizeof(rc->drp_avail.ie); | 120 | num_bytes += sizeof(rc->drp_avail.ie); |
64 | cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); | 121 | cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); |
@@ -69,128 +126,322 @@ int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) | |||
69 | cmd->wIELength = num_bytes; | 126 | cmd->wIELength = num_bytes; |
70 | IEDataptr = (u8 *)&cmd->IEData[0]; | 127 | IEDataptr = (u8 *)&cmd->IEData[0]; |
71 | 128 | ||
129 | /* FIXME: DRV avail IE is not always needed */ | ||
130 | /* put DRP avail IE first */ | ||
131 | memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); | ||
132 | IEDataptr += sizeof(struct uwb_ie_drp_avail); | ||
133 | |||
72 | /* Next traverse all reservations to place IEs in allocated memory. */ | 134 | /* Next traverse all reservations to place IEs in allocated memory. */ |
73 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | 135 | list_for_each_entry(rsv, &rc->reservations, rc_node) { |
74 | if (rsv->drp_ie != NULL) { | 136 | if (rsv->drp_ie != NULL) { |
75 | memcpy(IEDataptr, rsv->drp_ie, | 137 | memcpy(IEDataptr, rsv->drp_ie, |
76 | rsv->drp_ie->hdr.length + 2); | 138 | rsv->drp_ie->hdr.length + 2); |
77 | IEDataptr += rsv->drp_ie->hdr.length + 2; | 139 | IEDataptr += rsv->drp_ie->hdr.length + 2; |
140 | |||
141 | if (uwb_rsv_has_two_drp_ies(rsv) && | ||
142 | (rsv->mv.companion_drp_ie != NULL)) { | ||
143 | mv = &rsv->mv; | ||
144 | memcpy(IEDataptr, mv->companion_drp_ie, | ||
145 | mv->companion_drp_ie->hdr.length + 2); | ||
146 | IEDataptr += mv->companion_drp_ie->hdr.length + 2; | ||
147 | } | ||
78 | } | 148 | } |
79 | } | 149 | } |
80 | memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); | ||
81 | 150 | ||
82 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | 151 | result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes, |
83 | reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; | 152 | UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, |
84 | result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, | 153 | uwb_rc_set_drp_cmd_done, NULL); |
85 | sizeof(*cmd) + num_bytes, &reply.rceb, | 154 | |
86 | sizeof(reply)); | 155 | rc->set_drp_ie_pending = 1; |
87 | if (result < 0) | 156 | |
88 | goto error_cmd; | ||
89 | result = le16_to_cpu(reply.wRemainingSpace); | ||
90 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
91 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " | ||
92 | "failed: %s (%d). RemainingSpace in beacon " | ||
93 | "= %d\n", uwb_rc_strerror(reply.bResultCode), | ||
94 | reply.bResultCode, result); | ||
95 | result = -EIO; | ||
96 | } else { | ||
97 | dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " | ||
98 | "= %d.\n", result); | ||
99 | result = 0; | ||
100 | } | ||
101 | error_cmd: | ||
102 | kfree(cmd); | 157 | kfree(cmd); |
103 | error: | 158 | error: |
104 | return result; | 159 | return result; |
105 | |||
106 | } | 160 | } |
107 | /** | 161 | |
108 | * Send all DRP IEs associated with this host | 162 | /* |
109 | * | 163 | * Evaluate the action to perform using conflict resolution rules |
110 | * @returns: >= 0 number of bytes still available in the beacon | ||
111 | * < 0 errno code on error. | ||
112 | * | 164 | * |
113 | * As per the protocol we obtain the host controller device lock to access | 165 | * Return a uwb_drp_conflict_action. |
114 | * bandwidth structures. | ||
115 | */ | 166 | */ |
116 | int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) | 167 | static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, |
168 | struct uwb_rsv *rsv, int our_status) | ||
117 | { | 169 | { |
118 | int result; | 170 | int our_tie_breaker = rsv->tiebreaker; |
171 | int our_type = rsv->type; | ||
172 | int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; | ||
173 | |||
174 | int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); | ||
175 | int ext_status = uwb_ie_drp_status(ext_drp_ie); | ||
176 | int ext_type = uwb_ie_drp_type(ext_drp_ie); | ||
177 | |||
178 | |||
179 | /* [ECMA-368 2nd Edition] 17.4.6 */ | ||
180 | if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { | ||
181 | return UWB_DRP_CONFLICT_MANTAIN; | ||
182 | } | ||
119 | 183 | ||
120 | mutex_lock(&rc->uwb_dev.mutex); | 184 | /* [ECMA-368 2nd Edition] 17.4.6-1 */ |
121 | result = uwb_rc_gen_send_drp_ie(rc); | 185 | if (our_type == UWB_DRP_TYPE_ALIEN_BP) { |
122 | mutex_unlock(&rc->uwb_dev.mutex); | 186 | return UWB_DRP_CONFLICT_MANTAIN; |
123 | return result; | 187 | } |
188 | |||
189 | /* [ECMA-368 2nd Edition] 17.4.6-2 */ | ||
190 | if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { | ||
191 | /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ | ||
192 | return UWB_DRP_CONFLICT_ACT1; | ||
193 | } | ||
194 | |||
195 | /* [ECMA-368 2nd Edition] 17.4.6-3 */ | ||
196 | if (our_status == 0 && ext_status == 1) { | ||
197 | return UWB_DRP_CONFLICT_ACT2; | ||
198 | } | ||
199 | |||
200 | /* [ECMA-368 2nd Edition] 17.4.6-4 */ | ||
201 | if (our_status == 1 && ext_status == 0) { | ||
202 | return UWB_DRP_CONFLICT_MANTAIN; | ||
203 | } | ||
204 | |||
205 | /* [ECMA-368 2nd Edition] 17.4.6-5a */ | ||
206 | if (our_tie_breaker == ext_tie_breaker && | ||
207 | our_beacon_slot < ext_beacon_slot) { | ||
208 | return UWB_DRP_CONFLICT_MANTAIN; | ||
209 | } | ||
210 | |||
211 | /* [ECMA-368 2nd Edition] 17.4.6-5b */ | ||
212 | if (our_tie_breaker != ext_tie_breaker && | ||
213 | our_beacon_slot > ext_beacon_slot) { | ||
214 | return UWB_DRP_CONFLICT_MANTAIN; | ||
215 | } | ||
216 | |||
217 | if (our_status == 0) { | ||
218 | if (our_tie_breaker == ext_tie_breaker) { | ||
219 | /* [ECMA-368 2nd Edition] 17.4.6-6a */ | ||
220 | if (our_beacon_slot > ext_beacon_slot) { | ||
221 | return UWB_DRP_CONFLICT_ACT2; | ||
222 | } | ||
223 | } else { | ||
224 | /* [ECMA-368 2nd Edition] 17.4.6-6b */ | ||
225 | if (our_beacon_slot < ext_beacon_slot) { | ||
226 | return UWB_DRP_CONFLICT_ACT2; | ||
227 | } | ||
228 | } | ||
229 | } else { | ||
230 | if (our_tie_breaker == ext_tie_breaker) { | ||
231 | /* [ECMA-368 2nd Edition] 17.4.6-7a */ | ||
232 | if (our_beacon_slot > ext_beacon_slot) { | ||
233 | return UWB_DRP_CONFLICT_ACT3; | ||
234 | } | ||
235 | } else { | ||
236 | /* [ECMA-368 2nd Edition] 17.4.6-7b */ | ||
237 | if (our_beacon_slot < ext_beacon_slot) { | ||
238 | return UWB_DRP_CONFLICT_ACT3; | ||
239 | } | ||
240 | } | ||
241 | } | ||
242 | return UWB_DRP_CONFLICT_MANTAIN; | ||
124 | } | 243 | } |
125 | 244 | ||
126 | void uwb_drp_handle_timeout(struct uwb_rsv *rsv) | 245 | static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, |
246 | int ext_beacon_slot, | ||
247 | struct uwb_rsv *rsv, | ||
248 | struct uwb_mas_bm *conflicting_mas) | ||
127 | { | 249 | { |
128 | struct device *dev = &rsv->rc->uwb_dev.dev; | 250 | struct uwb_rc *rc = rsv->rc; |
251 | struct uwb_rsv_move *mv = &rsv->mv; | ||
252 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
253 | int action; | ||
254 | |||
255 | action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); | ||
256 | |||
257 | if (uwb_rsv_is_owner(rsv)) { | ||
258 | switch(action) { | ||
259 | case UWB_DRP_CONFLICT_ACT2: | ||
260 | /* try move */ | ||
261 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); | ||
262 | if (bow->can_reserve_extra_mases == false) | ||
263 | uwb_rsv_backoff_win_increment(rc); | ||
264 | |||
265 | break; | ||
266 | case UWB_DRP_CONFLICT_ACT3: | ||
267 | uwb_rsv_backoff_win_increment(rc); | ||
268 | /* drop some mases with reason modified */ | ||
269 | /* put in the companion the mases to be dropped */ | ||
270 | bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); | ||
271 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
272 | default: | ||
273 | break; | ||
274 | } | ||
275 | } else { | ||
276 | switch(action) { | ||
277 | case UWB_DRP_CONFLICT_ACT2: | ||
278 | case UWB_DRP_CONFLICT_ACT3: | ||
279 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
280 | default: | ||
281 | break; | ||
282 | } | ||
129 | 283 | ||
130 | dev_dbg(dev, "reservation timeout in state %s (%d)\n", | 284 | } |
131 | uwb_rsv_state_str(rsv->state), rsv->state); | 285 | |
286 | } | ||
132 | 287 | ||
133 | switch (rsv->state) { | 288 | static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, |
134 | case UWB_RSV_STATE_O_INITIATED: | 289 | struct uwb_rsv *rsv, bool companion_only, |
135 | if (rsv->is_multicast) { | 290 | struct uwb_mas_bm *conflicting_mas) |
136 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 291 | { |
137 | return; | 292 | struct uwb_rc *rc = rsv->rc; |
293 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
294 | struct uwb_rsv_move *mv = &rsv->mv; | ||
295 | int action; | ||
296 | |||
297 | if (companion_only) { | ||
298 | /* status of companion is 0 at this point */ | ||
299 | action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); | ||
300 | if (uwb_rsv_is_owner(rsv)) { | ||
301 | switch(action) { | ||
302 | case UWB_DRP_CONFLICT_ACT2: | ||
303 | case UWB_DRP_CONFLICT_ACT3: | ||
304 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
305 | rsv->needs_release_companion_mas = false; | ||
306 | if (bow->can_reserve_extra_mases == false) | ||
307 | uwb_rsv_backoff_win_increment(rc); | ||
308 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
309 | } | ||
310 | } else { /* rsv is target */ | ||
311 | switch(action) { | ||
312 | case UWB_DRP_CONFLICT_ACT2: | ||
313 | case UWB_DRP_CONFLICT_ACT3: | ||
314 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT); | ||
315 | /* send_drp_avail_ie = true; */ | ||
316 | } | ||
138 | } | 317 | } |
139 | break; | 318 | } else { /* also base part of the reservation is conflicting */ |
140 | case UWB_RSV_STATE_O_ESTABLISHED: | 319 | if (uwb_rsv_is_owner(rsv)) { |
141 | if (rsv->is_multicast) | 320 | uwb_rsv_backoff_win_increment(rc); |
142 | return; | 321 | /* remove companion part */ |
143 | break; | 322 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); |
144 | default: | 323 | |
145 | break; | 324 | /* drop some mases with reason modified */ |
325 | |||
326 | /* put in the companion the mases to be dropped */ | ||
327 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); | ||
328 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
329 | } else { /* it is a target rsv */ | ||
330 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
331 | /* send_drp_avail_ie = true; */ | ||
332 | } | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, | ||
337 | struct uwb_rc_evt_drp *drp_evt, | ||
338 | struct uwb_ie_drp *drp_ie, | ||
339 | struct uwb_mas_bm *conflicting_mas) | ||
340 | { | ||
341 | struct uwb_rsv_move *mv; | ||
342 | |||
343 | /* check if the conflicting reservation has two drp_ies */ | ||
344 | if (uwb_rsv_has_two_drp_ies(rsv)) { | ||
345 | mv = &rsv->mv; | ||
346 | if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
347 | handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, | ||
348 | rsv, false, conflicting_mas); | ||
349 | } else { | ||
350 | if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
351 | handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, | ||
352 | rsv, true, conflicting_mas); | ||
353 | } | ||
354 | } | ||
355 | } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
356 | handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas); | ||
146 | } | 357 | } |
147 | uwb_rsv_remove(rsv); | ||
148 | } | 358 | } |
149 | 359 | ||
360 | static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, | ||
361 | struct uwb_rc_evt_drp *drp_evt, | ||
362 | struct uwb_ie_drp *drp_ie, | ||
363 | struct uwb_mas_bm *conflicting_mas) | ||
364 | { | ||
365 | struct uwb_rsv *rsv; | ||
366 | |||
367 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
368 | uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas); | ||
369 | } | ||
370 | } | ||
371 | |||
150 | /* | 372 | /* |
151 | * Based on the DRP IE, transition a target reservation to a new | 373 | * Based on the DRP IE, transition a target reservation to a new |
152 | * state. | 374 | * state. |
153 | */ | 375 | */ |
154 | static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, | 376 | static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, |
155 | struct uwb_ie_drp *drp_ie) | 377 | struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) |
156 | { | 378 | { |
157 | struct device *dev = &rc->uwb_dev.dev; | 379 | struct device *dev = &rc->uwb_dev.dev; |
380 | struct uwb_rsv_move *mv = &rsv->mv; | ||
158 | int status; | 381 | int status; |
159 | enum uwb_drp_reason reason_code; | 382 | enum uwb_drp_reason reason_code; |
160 | 383 | struct uwb_mas_bm mas; | |
384 | |||
161 | status = uwb_ie_drp_status(drp_ie); | 385 | status = uwb_ie_drp_status(drp_ie); |
162 | reason_code = uwb_ie_drp_reason_code(drp_ie); | 386 | reason_code = uwb_ie_drp_reason_code(drp_ie); |
387 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
163 | 388 | ||
164 | if (status) { | 389 | switch (reason_code) { |
165 | switch (reason_code) { | 390 | case UWB_DRP_REASON_ACCEPTED: |
166 | case UWB_DRP_REASON_ACCEPTED: | 391 | |
167 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | 392 | if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { |
168 | break; | 393 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); |
169 | case UWB_DRP_REASON_MODIFIED: | ||
170 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | ||
171 | reason_code, status); | ||
172 | break; | 394 | break; |
173 | default: | ||
174 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
175 | reason_code, status); | ||
176 | } | 395 | } |
177 | } else { | 396 | |
178 | switch (reason_code) { | 397 | if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { |
179 | case UWB_DRP_REASON_ACCEPTED: | 398 | /* drp_ie is companion */ |
180 | /* New reservations are handled in uwb_rsv_find(). */ | 399 | if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) |
181 | break; | 400 | /* stroke companion */ |
182 | case UWB_DRP_REASON_DENIED: | 401 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); |
183 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 402 | } else { |
184 | break; | 403 | if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { |
185 | case UWB_DRP_REASON_CONFLICT: | 404 | if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) { |
186 | case UWB_DRP_REASON_MODIFIED: | 405 | /* FIXME: there is a conflict, find |
187 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 406 | * the conflicting reservations and |
188 | reason_code, status); | 407 | * take a sensible action. Consider |
408 | * that in drp_ie there is the | ||
409 | * "neighbour" */ | ||
410 | uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); | ||
411 | } else { | ||
412 | /* accept the extra reservation */ | ||
413 | bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS); | ||
414 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); | ||
415 | } | ||
416 | } else { | ||
417 | if (status) { | ||
418 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | } | ||
423 | break; | ||
424 | |||
425 | case UWB_DRP_REASON_MODIFIED: | ||
426 | /* check to see if we have already modified the reservation */ | ||
427 | if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { | ||
428 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
189 | break; | 429 | break; |
190 | default: | ||
191 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
192 | reason_code, status); | ||
193 | } | 430 | } |
431 | |||
432 | /* find if the owner wants to expand or reduce */ | ||
433 | if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
434 | /* owner is reducing */ | ||
435 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS); | ||
436 | uwb_drp_avail_release(rsv->rc, &mv->companion_mas); | ||
437 | } | ||
438 | |||
439 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | ||
440 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); | ||
441 | break; | ||
442 | default: | ||
443 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
444 | reason_code, status); | ||
194 | } | 445 | } |
195 | } | 446 | } |
196 | 447 | ||
@@ -199,23 +450,60 @@ static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
199 | * state. | 450 | * state. |
200 | */ | 451 | */ |
201 | static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | 452 | static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, |
202 | struct uwb_ie_drp *drp_ie) | 453 | struct uwb_dev *src, struct uwb_ie_drp *drp_ie, |
454 | struct uwb_rc_evt_drp *drp_evt) | ||
203 | { | 455 | { |
204 | struct device *dev = &rc->uwb_dev.dev; | 456 | struct device *dev = &rc->uwb_dev.dev; |
457 | struct uwb_rsv_move *mv = &rsv->mv; | ||
205 | int status; | 458 | int status; |
206 | enum uwb_drp_reason reason_code; | 459 | enum uwb_drp_reason reason_code; |
460 | struct uwb_mas_bm mas; | ||
207 | 461 | ||
208 | status = uwb_ie_drp_status(drp_ie); | 462 | status = uwb_ie_drp_status(drp_ie); |
209 | reason_code = uwb_ie_drp_reason_code(drp_ie); | 463 | reason_code = uwb_ie_drp_reason_code(drp_ie); |
464 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
210 | 465 | ||
211 | if (status) { | 466 | if (status) { |
212 | switch (reason_code) { | 467 | switch (reason_code) { |
213 | case UWB_DRP_REASON_ACCEPTED: | 468 | case UWB_DRP_REASON_ACCEPTED: |
214 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 469 | switch (rsv->state) { |
215 | break; | 470 | case UWB_RSV_STATE_O_PENDING: |
216 | case UWB_DRP_REASON_MODIFIED: | 471 | case UWB_RSV_STATE_O_INITIATED: |
217 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 472 | case UWB_RSV_STATE_O_ESTABLISHED: |
218 | reason_code, status); | 473 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); |
474 | break; | ||
475 | case UWB_RSV_STATE_O_MODIFIED: | ||
476 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
477 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
478 | } else { | ||
479 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
480 | } | ||
481 | break; | ||
482 | |||
483 | case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ | ||
484 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
485 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
486 | } else { | ||
487 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
488 | } | ||
489 | break; | ||
490 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
491 | if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) { | ||
492 | /* Companion reservation accepted */ | ||
493 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
494 | } else { | ||
495 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
496 | } | ||
497 | break; | ||
498 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
499 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) | ||
500 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
501 | else | ||
502 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
503 | break; | ||
504 | default: | ||
505 | break; | ||
506 | } | ||
219 | break; | 507 | break; |
220 | default: | 508 | default: |
221 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | 509 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", |
@@ -230,9 +518,10 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
230 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 518 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
231 | break; | 519 | break; |
232 | case UWB_DRP_REASON_CONFLICT: | 520 | case UWB_DRP_REASON_CONFLICT: |
233 | case UWB_DRP_REASON_MODIFIED: | 521 | /* resolve the conflict */ |
234 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 522 | bitmap_complement(mas.bm, src->last_availability_bm, |
235 | reason_code, status); | 523 | UWB_NUM_MAS); |
524 | uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); | ||
236 | break; | 525 | break; |
237 | default: | 526 | default: |
238 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | 527 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", |
@@ -241,12 +530,110 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
241 | } | 530 | } |
242 | } | 531 | } |
243 | 532 | ||
533 | static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) | ||
534 | { | ||
535 | unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; | ||
536 | mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
537 | } | ||
538 | |||
539 | static void uwb_cnflt_update_work(struct work_struct *work) | ||
540 | { | ||
541 | struct uwb_cnflt_alien *cnflt = container_of(work, | ||
542 | struct uwb_cnflt_alien, | ||
543 | cnflt_update_work); | ||
544 | struct uwb_cnflt_alien *c; | ||
545 | struct uwb_rc *rc = cnflt->rc; | ||
546 | |||
547 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
548 | |||
549 | mutex_lock(&rc->rsvs_mutex); | ||
550 | |||
551 | list_del(&cnflt->rc_node); | ||
552 | |||
553 | /* update rc global conflicting alien bitmap */ | ||
554 | bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); | ||
555 | |||
556 | list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { | ||
557 | bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS); | ||
558 | } | ||
559 | |||
560 | queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); | ||
561 | |||
562 | kfree(cnflt); | ||
563 | mutex_unlock(&rc->rsvs_mutex); | ||
564 | } | ||
565 | |||
566 | static void uwb_cnflt_timer(unsigned long arg) | ||
567 | { | ||
568 | struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg; | ||
569 | |||
570 | queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); | ||
571 | } | ||
572 | |||
244 | /* | 573 | /* |
245 | * Process a received DRP IE, it's either for a reservation owned by | 574 | * We have received an DRP_IE of type Alien BP and we need to make |
246 | * the RC or targeted at it (or it's for a WUSB cluster reservation). | 575 | * sure we do not transmit in conflicting MASs. |
247 | */ | 576 | */ |
248 | static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | 577 | static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) |
249 | struct uwb_ie_drp *drp_ie) | 578 | { |
579 | struct device *dev = &rc->uwb_dev.dev; | ||
580 | struct uwb_mas_bm mas; | ||
581 | struct uwb_cnflt_alien *cnflt; | ||
582 | char buf[72]; | ||
583 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
584 | |||
585 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
586 | bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); | ||
587 | |||
588 | list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { | ||
589 | if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { | ||
590 | /* Existing alien BP reservation conflicting | ||
591 | * bitmap, just reset the timer */ | ||
592 | uwb_cnflt_alien_stroke_timer(cnflt); | ||
593 | return; | ||
594 | } | ||
595 | } | ||
596 | |||
597 | /* New alien BP reservation conflicting bitmap */ | ||
598 | |||
599 | /* alloc and initialize new uwb_cnflt_alien */ | ||
600 | cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); | ||
601 | if (!cnflt) | ||
602 | dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); | ||
603 | INIT_LIST_HEAD(&cnflt->rc_node); | ||
604 | init_timer(&cnflt->timer); | ||
605 | cnflt->timer.function = uwb_cnflt_timer; | ||
606 | cnflt->timer.data = (unsigned long)cnflt; | ||
607 | |||
608 | cnflt->rc = rc; | ||
609 | INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); | ||
610 | |||
611 | bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); | ||
612 | |||
613 | list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); | ||
614 | |||
615 | /* update rc global conflicting alien bitmap */ | ||
616 | bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); | ||
617 | |||
618 | queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); | ||
619 | |||
620 | /* start the timer */ | ||
621 | uwb_cnflt_alien_stroke_timer(cnflt); | ||
622 | } | ||
623 | |||
624 | static void uwb_drp_process_not_involved(struct uwb_rc *rc, | ||
625 | struct uwb_rc_evt_drp *drp_evt, | ||
626 | struct uwb_ie_drp *drp_ie) | ||
627 | { | ||
628 | struct uwb_mas_bm mas; | ||
629 | |||
630 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
631 | uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); | ||
632 | } | ||
633 | |||
634 | static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, | ||
635 | struct uwb_rc_evt_drp *drp_evt, | ||
636 | struct uwb_ie_drp *drp_ie) | ||
250 | { | 637 | { |
251 | struct uwb_rsv *rsv; | 638 | struct uwb_rsv *rsv; |
252 | 639 | ||
@@ -259,7 +646,7 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | |||
259 | */ | 646 | */ |
260 | return; | 647 | return; |
261 | } | 648 | } |
262 | 649 | ||
263 | /* | 650 | /* |
264 | * Do nothing with DRP IEs for reservations that have been | 651 | * Do nothing with DRP IEs for reservations that have been |
265 | * terminated. | 652 | * terminated. |
@@ -268,13 +655,43 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | |||
268 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 655 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
269 | return; | 656 | return; |
270 | } | 657 | } |
271 | 658 | ||
272 | if (uwb_ie_drp_owner(drp_ie)) | 659 | if (uwb_ie_drp_owner(drp_ie)) |
273 | uwb_drp_process_target(rc, rsv, drp_ie); | 660 | uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); |
661 | else | ||
662 | uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); | ||
663 | |||
664 | } | ||
665 | |||
666 | |||
667 | static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) | ||
668 | { | ||
669 | return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * Process a received DRP IE. | ||
674 | */ | ||
675 | static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
676 | struct uwb_dev *src, struct uwb_ie_drp *drp_ie) | ||
677 | { | ||
678 | if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) | ||
679 | uwb_drp_handle_alien_drp(rc, drp_ie); | ||
680 | else if (uwb_drp_involves_us(rc, drp_ie)) | ||
681 | uwb_drp_process_involved(rc, src, drp_evt, drp_ie); | ||
274 | else | 682 | else |
275 | uwb_drp_process_owner(rc, rsv, drp_ie); | 683 | uwb_drp_process_not_involved(rc, drp_evt, drp_ie); |
276 | } | 684 | } |
277 | 685 | ||
686 | /* | ||
687 | * Process a received DRP Availability IE | ||
688 | */ | ||
689 | static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, | ||
690 | struct uwb_ie_drp_avail *drp_availability_ie) | ||
691 | { | ||
692 | bitmap_copy(src->last_availability_bm, | ||
693 | drp_availability_ie->bmp, UWB_NUM_MAS); | ||
694 | } | ||
278 | 695 | ||
279 | /* | 696 | /* |
280 | * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) | 697 | * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) |
@@ -296,10 +713,10 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | |||
296 | 713 | ||
297 | switch (ie_hdr->element_id) { | 714 | switch (ie_hdr->element_id) { |
298 | case UWB_IE_DRP_AVAILABILITY: | 715 | case UWB_IE_DRP_AVAILABILITY: |
299 | /* FIXME: does something need to be done with this? */ | 716 | uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); |
300 | break; | 717 | break; |
301 | case UWB_IE_DRP: | 718 | case UWB_IE_DRP: |
302 | uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); | 719 | uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); |
303 | break; | 720 | break; |
304 | default: | 721 | default: |
305 | dev_warn(dev, "unexpected IE in DRP notification\n"); | 722 | dev_warn(dev, "unexpected IE in DRP notification\n"); |
@@ -312,55 +729,6 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | |||
312 | (int)ielen); | 729 | (int)ielen); |
313 | } | 730 | } |
314 | 731 | ||
315 | |||
316 | /* | ||
317 | * Go through all the DRP IEs and find the ones that conflict with our | ||
318 | * reservations. | ||
319 | * | ||
320 | * FIXME: must resolve the conflict according the the rules in | ||
321 | * [ECMA-368]. | ||
322 | */ | ||
323 | static | ||
324 | void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
325 | size_t ielen, struct uwb_dev *src_dev) | ||
326 | { | ||
327 | struct device *dev = &rc->uwb_dev.dev; | ||
328 | struct uwb_ie_hdr *ie_hdr; | ||
329 | struct uwb_ie_drp *drp_ie; | ||
330 | void *ptr; | ||
331 | |||
332 | ptr = drp_evt->ie_data; | ||
333 | for (;;) { | ||
334 | ie_hdr = uwb_ie_next(&ptr, &ielen); | ||
335 | if (!ie_hdr) | ||
336 | break; | ||
337 | |||
338 | drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); | ||
339 | |||
340 | /* FIXME: check if this DRP IE conflicts. */ | ||
341 | } | ||
342 | |||
343 | if (ielen > 0) | ||
344 | dev_warn(dev, "%d octets remaining in DRP notification\n", | ||
345 | (int)ielen); | ||
346 | } | ||
347 | |||
348 | |||
349 | /* | ||
350 | * Terminate all reservations owned by, or targeted at, 'uwb_dev'. | ||
351 | */ | ||
352 | static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) | ||
353 | { | ||
354 | struct uwb_rsv *rsv; | ||
355 | |||
356 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
357 | if (rsv->owner == uwb_dev | ||
358 | || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) | ||
359 | uwb_rsv_remove(rsv); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | |||
364 | /** | 732 | /** |
365 | * uwbd_evt_handle_rc_drp - handle a DRP_IE event | 733 | * uwbd_evt_handle_rc_drp - handle a DRP_IE event |
366 | * @evt: the DRP_IE event from the radio controller | 734 | * @evt: the DRP_IE event from the radio controller |
@@ -401,7 +769,6 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt) | |||
401 | size_t ielength, bytes_left; | 769 | size_t ielength, bytes_left; |
402 | struct uwb_dev_addr src_addr; | 770 | struct uwb_dev_addr src_addr; |
403 | struct uwb_dev *src_dev; | 771 | struct uwb_dev *src_dev; |
404 | int reason; | ||
405 | 772 | ||
406 | /* Is there enough data to decode the event (and any IEs in | 773 | /* Is there enough data to decode the event (and any IEs in |
407 | its payload)? */ | 774 | its payload)? */ |
@@ -437,22 +804,8 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt) | |||
437 | 804 | ||
438 | mutex_lock(&rc->rsvs_mutex); | 805 | mutex_lock(&rc->rsvs_mutex); |
439 | 806 | ||
440 | reason = uwb_rc_evt_drp_reason(drp_evt); | 807 | /* We do not distinguish from the reason */ |
441 | 808 | uwb_drp_process_all(rc, drp_evt, ielength, src_dev); | |
442 | switch (reason) { | ||
443 | case UWB_DRP_NOTIF_DRP_IE_RCVD: | ||
444 | uwb_drp_process_all(rc, drp_evt, ielength, src_dev); | ||
445 | break; | ||
446 | case UWB_DRP_NOTIF_CONFLICT: | ||
447 | uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); | ||
448 | break; | ||
449 | case UWB_DRP_NOTIF_TERMINATE: | ||
450 | uwb_drp_terminate_all(rc, src_dev); | ||
451 | break; | ||
452 | default: | ||
453 | dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); | ||
454 | break; | ||
455 | } | ||
456 | 809 | ||
457 | mutex_unlock(&rc->rsvs_mutex); | 810 | mutex_unlock(&rc->rsvs_mutex); |
458 | 811 | ||