Merge tag 'cleanup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[deliverable/linux.git] / drivers / uwb / drp.c
CommitLineData
8cc13a09
DV
1/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
5a0e3ad6 23#include <linux/slab.h>
8cc13a09
DV
24#include <linux/delay.h>
25#include "uwb-internal.h"
26
5b37717a
SP
27
28/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
29enum uwb_drp_conflict_action {
25985edc 30 /* Reservation is maintained, no action needed */
5b37717a
SP
31 UWB_DRP_CONFLICT_MANTAIN = 0,
32
33 /* the device shall not transmit frames in conflicting MASs in
34 * the following superframe. If the device is the reservation
35 * target, it shall also set the Reason Code in its DRP IE to
36 * Conflict in its beacon in the following superframe.
37 */
38 UWB_DRP_CONFLICT_ACT1,
39
40 /* the device shall not set the Reservation Status bit to ONE
41 * and shall not transmit frames in conflicting MASs. If the
42 * device is the reservation target, it shall also set the
43 * Reason Code in its DRP IE to Conflict.
44 */
45 UWB_DRP_CONFLICT_ACT2,
46
47 /* the device shall not transmit frames in conflicting MASs in
48 * the following superframe. It shall remove the conflicting
49 * MASs from the reservation or set the Reservation Status to
50 * ZERO in its beacon in the following superframe. If the
51 * device is the reservation target, it shall also set the
52 * Reason Code in its DRP IE to Conflict.
53 */
54 UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size)
60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62
63 if (r != NULL) {
64 if (r->bResultCode != UWB_RC_RES_SUCCESS)
65 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
66 uwb_rc_strerror(r->bResultCode), r->bResultCode);
67 } else
68 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
69
8f5140a6 70 spin_lock_bh(&rc->rsvs_lock);
5b37717a
SP
71 if (rc->set_drp_ie_pending > 1) {
72 rc->set_drp_ie_pending = 0;
73 uwb_rsv_queue_update(rc);
74 } else {
75 rc->set_drp_ie_pending = 0;
76 }
8f5140a6 77 spin_unlock_bh(&rc->rsvs_lock);
5b37717a
SP
78}
79
8cc13a09
DV
80/**
81 * Construct and send the SET DRP IE
82 *
83 * @rc: UWB Host controller
84 * @returns: >= 0 number of bytes still available in the beacon
85 * < 0 errno code on error.
86 *
87 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
88 * device to include in its beacon at the same time. We thus have to
89 * traverse all reservations and include the DRP IEs of all PENDING
90 * and NEGOTIATED reservations in a SET DRP command for transmission.
91 *
92 * A DRP Availability IE is appended.
93 *
6fae35f9 94 * rc->rsvs_mutex is held
8cc13a09
DV
95 *
96 * FIXME We currently ignore the returned value indicating the remaining space
97 * in beacon. This could be used to deny reservation requests earlier if
98 * determined that they would cause the beacon space to be exceeded.
99 */
6fae35f9 100int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
8cc13a09
DV
101{
102 int result;
8cc13a09 103 struct uwb_rc_cmd_set_drp_ie *cmd;
8cc13a09 104 struct uwb_rsv *rsv;
5b37717a 105 struct uwb_rsv_move *mv;
8cc13a09
DV
106 int num_bytes = 0;
107 u8 *IEDataptr;
108
109 result = -ENOMEM;
110 /* First traverse all reservations to determine memory needed. */
111 list_for_each_entry(rsv, &rc->reservations, rc_node) {
5b37717a 112 if (rsv->drp_ie != NULL) {
8cc13a09 113 num_bytes += rsv->drp_ie->hdr.length + 2;
5b37717a
SP
114 if (uwb_rsv_has_two_drp_ies(rsv) &&
115 (rsv->mv.companion_drp_ie != NULL)) {
116 mv = &rsv->mv;
117 num_bytes += mv->companion_drp_ie->hdr.length + 2;
118 }
119 }
8cc13a09
DV
120 }
121 num_bytes += sizeof(rc->drp_avail.ie);
122 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
123 if (cmd == NULL)
124 goto error;
125 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
126 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
127 cmd->wIELength = num_bytes;
128 IEDataptr = (u8 *)&cmd->IEData[0];
129
5b37717a
SP
130 /* FIXME: DRV avail IE is not always needed */
131 /* put DRP avail IE first */
132 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
133 IEDataptr += sizeof(struct uwb_ie_drp_avail);
134
8cc13a09
DV
135 /* Next traverse all reservations to place IEs in allocated memory. */
136 list_for_each_entry(rsv, &rc->reservations, rc_node) {
137 if (rsv->drp_ie != NULL) {
138 memcpy(IEDataptr, rsv->drp_ie,
139 rsv->drp_ie->hdr.length + 2);
140 IEDataptr += rsv->drp_ie->hdr.length + 2;
5b37717a
SP
141
142 if (uwb_rsv_has_two_drp_ies(rsv) &&
143 (rsv->mv.companion_drp_ie != NULL)) {
144 mv = &rsv->mv;
145 memcpy(IEDataptr, mv->companion_drp_ie,
146 mv->companion_drp_ie->hdr.length + 2);
147 IEDataptr += mv->companion_drp_ie->hdr.length + 2;
148 }
8cc13a09
DV
149 }
150 }
8cc13a09 151
5b37717a
SP
152 result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes,
153 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
154 uwb_rc_set_drp_cmd_done, NULL);
155
156 rc->set_drp_ie_pending = 1;
157
8cc13a09
DV
158 kfree(cmd);
159error:
160 return result;
8cc13a09
DV
161}
162
5b37717a
SP
163/*
164 * Evaluate the action to perform using conflict resolution rules
165 *
166 * Return a uwb_drp_conflict_action.
167 */
168static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
169 struct uwb_rsv *rsv, int our_status)
8cc13a09 170{
5b37717a
SP
171 int our_tie_breaker = rsv->tiebreaker;
172 int our_type = rsv->type;
173 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
174
175 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
176 int ext_status = uwb_ie_drp_status(ext_drp_ie);
177 int ext_type = uwb_ie_drp_type(ext_drp_ie);
178
179
180 /* [ECMA-368 2nd Edition] 17.4.6 */
181 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
182 return UWB_DRP_CONFLICT_MANTAIN;
183 }
184
185 /* [ECMA-368 2nd Edition] 17.4.6-1 */
186 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
187 return UWB_DRP_CONFLICT_MANTAIN;
188 }
189
190 /* [ECMA-368 2nd Edition] 17.4.6-2 */
191 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
192 /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
193 return UWB_DRP_CONFLICT_ACT1;
194 }
195
196 /* [ECMA-368 2nd Edition] 17.4.6-3 */
197 if (our_status == 0 && ext_status == 1) {
198 return UWB_DRP_CONFLICT_ACT2;
199 }
8cc13a09 200
5b37717a
SP
201 /* [ECMA-368 2nd Edition] 17.4.6-4 */
202 if (our_status == 1 && ext_status == 0) {
203 return UWB_DRP_CONFLICT_MANTAIN;
204 }
8cc13a09 205
5b37717a
SP
206 /* [ECMA-368 2nd Edition] 17.4.6-5a */
207 if (our_tie_breaker == ext_tie_breaker &&
208 our_beacon_slot < ext_beacon_slot) {
209 return UWB_DRP_CONFLICT_MANTAIN;
210 }
211
212 /* [ECMA-368 2nd Edition] 17.4.6-5b */
213 if (our_tie_breaker != ext_tie_breaker &&
214 our_beacon_slot > ext_beacon_slot) {
215 return UWB_DRP_CONFLICT_MANTAIN;
216 }
217
218 if (our_status == 0) {
219 if (our_tie_breaker == ext_tie_breaker) {
220 /* [ECMA-368 2nd Edition] 17.4.6-6a */
221 if (our_beacon_slot > ext_beacon_slot) {
222 return UWB_DRP_CONFLICT_ACT2;
223 }
224 } else {
225 /* [ECMA-368 2nd Edition] 17.4.6-6b */
226 if (our_beacon_slot < ext_beacon_slot) {
227 return UWB_DRP_CONFLICT_ACT2;
228 }
8cc13a09 229 }
5b37717a
SP
230 } else {
231 if (our_tie_breaker == ext_tie_breaker) {
232 /* [ECMA-368 2nd Edition] 17.4.6-7a */
233 if (our_beacon_slot > ext_beacon_slot) {
234 return UWB_DRP_CONFLICT_ACT3;
235 }
236 } else {
237 /* [ECMA-368 2nd Edition] 17.4.6-7b */
238 if (our_beacon_slot < ext_beacon_slot) {
239 return UWB_DRP_CONFLICT_ACT3;
240 }
241 }
242 }
243 return UWB_DRP_CONFLICT_MANTAIN;
244}
245
246static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
247 int ext_beacon_slot,
248 struct uwb_rsv *rsv,
249 struct uwb_mas_bm *conflicting_mas)
250{
251 struct uwb_rc *rc = rsv->rc;
252 struct uwb_rsv_move *mv = &rsv->mv;
253 struct uwb_drp_backoff_win *bow = &rc->bow;
254 int action;
255
256 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
257
258 if (uwb_rsv_is_owner(rsv)) {
259 switch(action) {
260 case UWB_DRP_CONFLICT_ACT2:
261 /* try move */
262 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
263 if (bow->can_reserve_extra_mases == false)
264 uwb_rsv_backoff_win_increment(rc);
265
266 break;
267 case UWB_DRP_CONFLICT_ACT3:
268 uwb_rsv_backoff_win_increment(rc);
269 /* drop some mases with reason modified */
270 /* put in the companion the mases to be dropped */
271 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
272 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
273 default:
274 break;
275 }
276 } else {
277 switch(action) {
278 case UWB_DRP_CONFLICT_ACT2:
279 case UWB_DRP_CONFLICT_ACT3:
280 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
281 default:
282 break;
283 }
284
285 }
286
287}
288
289static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
290 struct uwb_rsv *rsv, bool companion_only,
291 struct uwb_mas_bm *conflicting_mas)
292{
293 struct uwb_rc *rc = rsv->rc;
294 struct uwb_drp_backoff_win *bow = &rc->bow;
295 struct uwb_rsv_move *mv = &rsv->mv;
296 int action;
297
298 if (companion_only) {
299 /* status of companion is 0 at this point */
300 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
301 if (uwb_rsv_is_owner(rsv)) {
302 switch(action) {
303 case UWB_DRP_CONFLICT_ACT2:
304 case UWB_DRP_CONFLICT_ACT3:
305 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
306 rsv->needs_release_companion_mas = false;
307 if (bow->can_reserve_extra_mases == false)
308 uwb_rsv_backoff_win_increment(rc);
309 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
310 }
311 } else { /* rsv is target */
312 switch(action) {
313 case UWB_DRP_CONFLICT_ACT2:
314 case UWB_DRP_CONFLICT_ACT3:
315 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT);
316 /* send_drp_avail_ie = true; */
317 }
318 }
319 } else { /* also base part of the reservation is conflicting */
320 if (uwb_rsv_is_owner(rsv)) {
321 uwb_rsv_backoff_win_increment(rc);
322 /* remove companion part */
323 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
324
325 /* drop some mases with reason modified */
326
327 /* put in the companion the mases to be dropped */
328 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
329 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
330 } else { /* it is a target rsv */
331 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
332 /* send_drp_avail_ie = true; */
333 }
334 }
335}
336
337static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
338 struct uwb_rc_evt_drp *drp_evt,
339 struct uwb_ie_drp *drp_ie,
340 struct uwb_mas_bm *conflicting_mas)
341{
342 struct uwb_rsv_move *mv;
343
344 /* check if the conflicting reservation has two drp_ies */
345 if (uwb_rsv_has_two_drp_ies(rsv)) {
346 mv = &rsv->mv;
347 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
348 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
349 rsv, false, conflicting_mas);
350 } else {
351 if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
352 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
353 rsv, true, conflicting_mas);
354 }
355 }
356 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
357 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas);
8cc13a09 358 }
8cc13a09
DV
359}
360
5b37717a
SP
361static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
362 struct uwb_rc_evt_drp *drp_evt,
363 struct uwb_ie_drp *drp_ie,
364 struct uwb_mas_bm *conflicting_mas)
365{
366 struct uwb_rsv *rsv;
367
368 list_for_each_entry(rsv, &rc->reservations, rc_node) {
369 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas);
370 }
371}
372
8cc13a09
DV
373/*
374 * Based on the DRP IE, transition a target reservation to a new
375 * state.
376 */
377static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
5b37717a 378 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
8cc13a09
DV
379{
380 struct device *dev = &rc->uwb_dev.dev;
5b37717a 381 struct uwb_rsv_move *mv = &rsv->mv;
8cc13a09
DV
382 int status;
383 enum uwb_drp_reason reason_code;
5b37717a
SP
384 struct uwb_mas_bm mas;
385
8cc13a09
DV
386 status = uwb_ie_drp_status(drp_ie);
387 reason_code = uwb_ie_drp_reason_code(drp_ie);
5b37717a 388 uwb_drp_ie_to_bm(&mas, drp_ie);
8cc13a09 389
5b37717a
SP
390 switch (reason_code) {
391 case UWB_DRP_REASON_ACCEPTED:
392
393 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
394 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
8cc13a09 395 break;
8cc13a09 396 }
5b37717a
SP
397
398 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
399 /* drp_ie is companion */
400 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS))
401 /* stroke companion */
402 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
403 } else {
404 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
405 if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) {
406 /* FIXME: there is a conflict, find
407 * the conflicting reservations and
408 * take a sensible action. Consider
409 * that in drp_ie there is the
410 * "neighbour" */
411 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
412 } else {
413 /* accept the extra reservation */
414 bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS);
415 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
416 }
417 } else {
418 if (status) {
419 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
420 }
421 }
422
423 }
424 break;
425
426 case UWB_DRP_REASON_MODIFIED:
427 /* check to see if we have already modified the reservation */
428 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
429 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
8cc13a09 430 break;
8cc13a09 431 }
5b37717a
SP
432
433 /* find if the owner wants to expand or reduce */
434 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
435 /* owner is reducing */
436 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS);
437 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
438 }
439
440 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
441 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
442 break;
443 default:
444 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
445 reason_code, status);
8cc13a09
DV
446 }
447}
448
449/*
450 * Based on the DRP IE, transition an owner reservation to a new
451 * state.
452 */
453static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
5b37717a
SP
454 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
455 struct uwb_rc_evt_drp *drp_evt)
8cc13a09
DV
456{
457 struct device *dev = &rc->uwb_dev.dev;
5b37717a 458 struct uwb_rsv_move *mv = &rsv->mv;
8cc13a09
DV
459 int status;
460 enum uwb_drp_reason reason_code;
5b37717a 461 struct uwb_mas_bm mas;
8cc13a09
DV
462
463 status = uwb_ie_drp_status(drp_ie);
464 reason_code = uwb_ie_drp_reason_code(drp_ie);
5b37717a 465 uwb_drp_ie_to_bm(&mas, drp_ie);
8cc13a09
DV
466
467 if (status) {
468 switch (reason_code) {
469 case UWB_DRP_REASON_ACCEPTED:
5b37717a
SP
470 switch (rsv->state) {
471 case UWB_RSV_STATE_O_PENDING:
472 case UWB_RSV_STATE_O_INITIATED:
473 case UWB_RSV_STATE_O_ESTABLISHED:
474 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
475 break;
476 case UWB_RSV_STATE_O_MODIFIED:
477 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
478 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
479 } else {
480 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
481 }
482 break;
483
484 case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
485 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
486 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
487 } else {
488 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
489 }
490 break;
491 case UWB_RSV_STATE_O_MOVE_EXPANDING:
492 if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
493 /* Companion reservation accepted */
494 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
495 } else {
496 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
497 }
498 break;
499 case UWB_RSV_STATE_O_MOVE_COMBINING:
500 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS))
501 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
502 else
503 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
504 break;
505 default:
506 break;
507 }
8cc13a09
DV
508 break;
509 default:
510 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
511 reason_code, status);
512 }
513 } else {
514 switch (reason_code) {
515 case UWB_DRP_REASON_PENDING:
516 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
517 break;
518 case UWB_DRP_REASON_DENIED:
519 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
520 break;
521 case UWB_DRP_REASON_CONFLICT:
5b37717a
SP
522 /* resolve the conflict */
523 bitmap_complement(mas.bm, src->last_availability_bm,
524 UWB_NUM_MAS);
525 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
8cc13a09
DV
526 break;
527 default:
528 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
529 reason_code, status);
530 }
531 }
532}
533
5b37717a
SP
534static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
535{
536 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
537 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
538}
539
540static void uwb_cnflt_update_work(struct work_struct *work)
541{
542 struct uwb_cnflt_alien *cnflt = container_of(work,
543 struct uwb_cnflt_alien,
544 cnflt_update_work);
545 struct uwb_cnflt_alien *c;
546 struct uwb_rc *rc = cnflt->rc;
547
548 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
549
550 mutex_lock(&rc->rsvs_mutex);
551
552 list_del(&cnflt->rc_node);
553
554 /* update rc global conflicting alien bitmap */
555 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
556
557 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
558 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS);
559 }
560
561 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
562
563 kfree(cnflt);
564 mutex_unlock(&rc->rsvs_mutex);
565}
566
567static void uwb_cnflt_timer(unsigned long arg)
568{
569 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
570
571 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
572}
573
8cc13a09 574/*
5b37717a
SP
575 * We have received an DRP_IE of type Alien BP and we need to make
576 * sure we do not transmit in conflicting MASs.
8cc13a09 577 */
5b37717a
SP
578static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
579{
580 struct device *dev = &rc->uwb_dev.dev;
581 struct uwb_mas_bm mas;
582 struct uwb_cnflt_alien *cnflt;
583 char buf[72];
584 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
585
586 uwb_drp_ie_to_bm(&mas, drp_ie);
587 bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
588
589 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
590 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
591 /* Existing alien BP reservation conflicting
592 * bitmap, just reset the timer */
593 uwb_cnflt_alien_stroke_timer(cnflt);
594 return;
595 }
596 }
597
598 /* New alien BP reservation conflicting bitmap */
599
600 /* alloc and initialize new uwb_cnflt_alien */
601 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
602 if (!cnflt)
603 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
604 INIT_LIST_HEAD(&cnflt->rc_node);
605 init_timer(&cnflt->timer);
606 cnflt->timer.function = uwb_cnflt_timer;
607 cnflt->timer.data = (unsigned long)cnflt;
608
609 cnflt->rc = rc;
610 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
611
612 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
613
614 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
615
616 /* update rc global conflicting alien bitmap */
617 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
618
619 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
620
621 /* start the timer */
622 uwb_cnflt_alien_stroke_timer(cnflt);
623}
624
625static void uwb_drp_process_not_involved(struct uwb_rc *rc,
626 struct uwb_rc_evt_drp *drp_evt,
627 struct uwb_ie_drp *drp_ie)
628{
629 struct uwb_mas_bm mas;
630
631 uwb_drp_ie_to_bm(&mas, drp_ie);
632 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
633}
634
635static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
636 struct uwb_rc_evt_drp *drp_evt,
637 struct uwb_ie_drp *drp_ie)
8cc13a09
DV
638{
639 struct uwb_rsv *rsv;
640
641 rsv = uwb_rsv_find(rc, src, drp_ie);
642 if (!rsv) {
643 /*
644 * No reservation? It's either for a recently
645 * terminated reservation; or the DRP IE couldn't be
646 * processed (e.g., an invalid IE or out of memory).
647 */
648 return;
649 }
5b37717a 650
8cc13a09
DV
651 /*
652 * Do nothing with DRP IEs for reservations that have been
653 * terminated.
654 */
655 if (rsv->state == UWB_RSV_STATE_NONE) {
656 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
657 return;
658 }
5b37717a 659
8cc13a09 660 if (uwb_ie_drp_owner(drp_ie))
5b37717a
SP
661 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
662 else
663 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
664
665}
666
667
668static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
669{
670 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
671}
672
673/*
674 * Process a received DRP IE.
675 */
676static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
677 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
678{
679 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
680 uwb_drp_handle_alien_drp(rc, drp_ie);
681 else if (uwb_drp_involves_us(rc, drp_ie))
682 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
8cc13a09 683 else
5b37717a 684 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
8cc13a09
DV
685}
686
5b37717a
SP
687/*
688 * Process a received DRP Availability IE
689 */
690static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
691 struct uwb_ie_drp_avail *drp_availability_ie)
692{
693 bitmap_copy(src->last_availability_bm,
694 drp_availability_ie->bmp, UWB_NUM_MAS);
695}
8cc13a09
DV
696
697/*
698 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
699 * from a device.
700 */
701static
702void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
703 size_t ielen, struct uwb_dev *src_dev)
704{
705 struct device *dev = &rc->uwb_dev.dev;
706 struct uwb_ie_hdr *ie_hdr;
707 void *ptr;
708
709 ptr = drp_evt->ie_data;
710 for (;;) {
711 ie_hdr = uwb_ie_next(&ptr, &ielen);
712 if (!ie_hdr)
713 break;
714
715 switch (ie_hdr->element_id) {
716 case UWB_IE_DRP_AVAILABILITY:
5b37717a 717 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
8cc13a09
DV
718 break;
719 case UWB_IE_DRP:
5b37717a 720 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
8cc13a09
DV
721 break;
722 default:
723 dev_warn(dev, "unexpected IE in DRP notification\n");
724 break;
725 }
726 }
727
728 if (ielen > 0)
729 dev_warn(dev, "%d octets remaining in DRP notification\n",
730 (int)ielen);
731}
732
8cc13a09
DV
733/**
734 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
735 * @evt: the DRP_IE event from the radio controller
736 *
737 * This processes DRP notifications from the radio controller, either
738 * initiating a new reservation or transitioning an existing
739 * reservation into a different state.
740 *
741 * DRP notifications can occur for three different reasons:
742 *
743 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
25985edc 744 * the target or source have been received.
8cc13a09
DV
745 *
746 * These DRP IEs could be new or for an existing reservation.
747 *
748 * If the DRP IE for an existing reservation ceases to be to
25985edc 749 * received for at least mMaxLostBeacons, the reservation should be
8cc13a09
DV
750 * considered to be terminated. Note that the TERMINATE reason (see
751 * below) may not always be signalled (e.g., the remote device has
752 * two or more reservations established with the RC).
753 *
754 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
755 * group conflict with the RC's reservations.
756 *
757 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
758 * from a device (i.e., it's terminated all reservations).
759 *
760 * Only the software state of the reservations is changed; the setting
761 * of the radio controller's DRP IEs is done after all the events in
762 * an event buffer are processed. This saves waiting multiple times
763 * for the SET_DRP_IE command to complete.
764 */
765int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
766{
767 struct device *dev = &evt->rc->uwb_dev.dev;
768 struct uwb_rc *rc = evt->rc;
769 struct uwb_rc_evt_drp *drp_evt;
770 size_t ielength, bytes_left;
771 struct uwb_dev_addr src_addr;
772 struct uwb_dev *src_dev;
8cc13a09
DV
773
774 /* Is there enough data to decode the event (and any IEs in
775 its payload)? */
776 if (evt->notif.size < sizeof(*drp_evt)) {
777 dev_err(dev, "DRP event: Not enough data to decode event "
778 "[%zu bytes left, %zu needed]\n",
779 evt->notif.size, sizeof(*drp_evt));
780 return 0;
781 }
782 bytes_left = evt->notif.size - sizeof(*drp_evt);
783 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
784 ielength = le16_to_cpu(drp_evt->ie_length);
785 if (bytes_left != ielength) {
786 dev_err(dev, "DRP event: Not enough data in payload [%zu"
787 "bytes left, %zu declared in the event]\n",
788 bytes_left, ielength);
789 return 0;
790 }
791
792 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
793 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
794 if (!src_dev) {
795 /*
796 * A DRP notification from an unrecognized device.
797 *
798 * This is probably from a WUSB device that doesn't
799 * have an EUI-48 and therefore doesn't show up in the
800 * UWB device database. It's safe to simply ignore
801 * these.
802 */
803 return 0;
804 }
805
806 mutex_lock(&rc->rsvs_mutex);
807
5b37717a
SP
808 /* We do not distinguish from the reason */
809 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
8cc13a09
DV
810
811 mutex_unlock(&rc->rsvs_mutex);
812
813 uwb_dev_put(src_dev);
814 return 0;
815}
This page took 0.358974 seconds and 5 git commands to generate.