ath9k: Clear NoA schedule properly
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / channel.c
1 /*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "ath9k.h"
18
19 /* Set/change channels. If the channel is really being changed, it's done
20 * by reseting the chip. To accomplish this we must first cleanup any pending
21 * DMA, then restart stuff.
22 */
23 static int ath_set_channel(struct ath_softc *sc)
24 {
25 struct ath_hw *ah = sc->sc_ah;
26 struct ath_common *common = ath9k_hw_common(ah);
27 struct ieee80211_hw *hw = sc->hw;
28 struct ath9k_channel *hchan;
29 struct cfg80211_chan_def *chandef = &sc->cur_chan->chandef;
30 struct ieee80211_channel *chan = chandef->chan;
31 int pos = chan->hw_value;
32 int old_pos = -1;
33 int r;
34
35 if (test_bit(ATH_OP_INVALID, &common->op_flags))
36 return -EIO;
37
38 if (ah->curchan)
39 old_pos = ah->curchan - &ah->channels[0];
40
41 ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
42 chan->center_freq, chandef->width);
43
44 /* update survey stats for the old channel before switching */
45 spin_lock_bh(&common->cc_lock);
46 ath_update_survey_stats(sc);
47 spin_unlock_bh(&common->cc_lock);
48
49 ath9k_cmn_get_channel(hw, ah, chandef);
50
51 /* If the operating channel changes, change the survey in-use flags
52 * along with it.
53 * Reset the survey data for the new channel, unless we're switching
54 * back to the operating channel from an off-channel operation.
55 */
56 if (!sc->cur_chan->offchannel && sc->cur_survey != &sc->survey[pos]) {
57 if (sc->cur_survey)
58 sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
59
60 sc->cur_survey = &sc->survey[pos];
61
62 memset(sc->cur_survey, 0, sizeof(struct survey_info));
63 sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
64 } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
65 memset(&sc->survey[pos], 0, sizeof(struct survey_info));
66 }
67
68 hchan = &sc->sc_ah->channels[pos];
69 r = ath_reset(sc, hchan);
70 if (r)
71 return r;
72
73 /* The most recent snapshot of channel->noisefloor for the old
74 * channel is only available after the hardware reset. Copy it to
75 * the survey stats now.
76 */
77 if (old_pos >= 0)
78 ath_update_survey_nf(sc, old_pos);
79
80 /* Enable radar pulse detection if on a DFS channel. Spectral
81 * scanning and radar detection can not be used concurrently.
82 */
83 if (hw->conf.radar_enabled) {
84 u32 rxfilter;
85
86 rxfilter = ath9k_hw_getrxfilter(ah);
87 rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
88 ATH9K_RX_FILTER_PHYERR;
89 ath9k_hw_setrxfilter(ah, rxfilter);
90 ath_dbg(common, DFS, "DFS enabled at freq %d\n",
91 chan->center_freq);
92 } else {
93 /* perform spectral scan if requested. */
94 if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
95 sc->spectral_mode == SPECTRAL_CHANSCAN)
96 ath9k_spectral_scan_trigger(hw);
97 }
98
99 return 0;
100 }
101
102 void ath_chanctx_init(struct ath_softc *sc)
103 {
104 struct ath_chanctx *ctx;
105 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
106 struct ieee80211_supported_band *sband;
107 struct ieee80211_channel *chan;
108 int i, j;
109
110 sband = &common->sbands[IEEE80211_BAND_2GHZ];
111 if (!sband->n_channels)
112 sband = &common->sbands[IEEE80211_BAND_5GHZ];
113
114 chan = &sband->channels[0];
115 for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
116 ctx = &sc->chanctx[i];
117 cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
118 INIT_LIST_HEAD(&ctx->vifs);
119 ctx->txpower = ATH_TXPOWER_MAX;
120 for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
121 INIT_LIST_HEAD(&ctx->acq[j]);
122 }
123 }
124
125 void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx,
126 struct cfg80211_chan_def *chandef)
127 {
128 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
129 bool cur_chan;
130
131 spin_lock_bh(&sc->chan_lock);
132 if (chandef)
133 memcpy(&ctx->chandef, chandef, sizeof(*chandef));
134 cur_chan = sc->cur_chan == ctx;
135 spin_unlock_bh(&sc->chan_lock);
136
137 if (!cur_chan) {
138 ath_dbg(common, CHAN_CTX,
139 "Current context differs from the new context\n");
140 return;
141 }
142
143 ath_set_channel(sc);
144 }
145
146 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
147
148 /**********************************************************/
149 /* Functions to handle the channel context state machine. */
150 /**********************************************************/
151
152 static const char *offchannel_state_string(enum ath_offchannel_state state)
153 {
154 switch (state) {
155 case_rtn_string(ATH_OFFCHANNEL_IDLE);
156 case_rtn_string(ATH_OFFCHANNEL_PROBE_SEND);
157 case_rtn_string(ATH_OFFCHANNEL_PROBE_WAIT);
158 case_rtn_string(ATH_OFFCHANNEL_SUSPEND);
159 case_rtn_string(ATH_OFFCHANNEL_ROC_START);
160 case_rtn_string(ATH_OFFCHANNEL_ROC_WAIT);
161 case_rtn_string(ATH_OFFCHANNEL_ROC_DONE);
162 default:
163 return "unknown";
164 }
165 }
166
167 static const char *chanctx_event_string(enum ath_chanctx_event ev)
168 {
169 switch (ev) {
170 case_rtn_string(ATH_CHANCTX_EVENT_BEACON_PREPARE);
171 case_rtn_string(ATH_CHANCTX_EVENT_BEACON_SENT);
172 case_rtn_string(ATH_CHANCTX_EVENT_TSF_TIMER);
173 case_rtn_string(ATH_CHANCTX_EVENT_BEACON_RECEIVED);
174 case_rtn_string(ATH_CHANCTX_EVENT_AUTHORIZED);
175 case_rtn_string(ATH_CHANCTX_EVENT_SWITCH);
176 case_rtn_string(ATH_CHANCTX_EVENT_ASSIGN);
177 case_rtn_string(ATH_CHANCTX_EVENT_UNASSIGN);
178 case_rtn_string(ATH_CHANCTX_EVENT_CHANGE);
179 case_rtn_string(ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL);
180 default:
181 return "unknown";
182 }
183 }
184
185 static const char *chanctx_state_string(enum ath_chanctx_state state)
186 {
187 switch (state) {
188 case_rtn_string(ATH_CHANCTX_STATE_IDLE);
189 case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_BEACON);
190 case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_TIMER);
191 case_rtn_string(ATH_CHANCTX_STATE_SWITCH);
192 case_rtn_string(ATH_CHANCTX_STATE_FORCE_ACTIVE);
193 default:
194 return "unknown";
195 }
196 }
197
198 void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
199 {
200 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
201 struct ath_vif *avp;
202 bool active = false;
203 u8 n_active = 0;
204
205 if (!ctx)
206 return;
207
208 list_for_each_entry(avp, &ctx->vifs, list) {
209 struct ieee80211_vif *vif = avp->vif;
210
211 switch (vif->type) {
212 case NL80211_IFTYPE_P2P_CLIENT:
213 case NL80211_IFTYPE_STATION:
214 if (avp->assoc)
215 active = true;
216 break;
217 default:
218 active = true;
219 break;
220 }
221 }
222 ctx->active = active;
223
224 ath_for_each_chanctx(sc, ctx) {
225 if (!ctx->assigned || list_empty(&ctx->vifs))
226 continue;
227 n_active++;
228 }
229
230 if (n_active <= 1) {
231 clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags);
232 return;
233 }
234 if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
235 return;
236
237 if (ath9k_is_chanctx_enabled()) {
238 ath_chanctx_event(sc, NULL,
239 ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL);
240 }
241 }
242
243 static struct ath_chanctx *
244 ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
245 {
246 int idx = ctx - &sc->chanctx[0];
247
248 return &sc->chanctx[!idx];
249 }
250
251 static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
252 {
253 struct ath_chanctx *prev, *cur;
254 struct timespec ts;
255 u32 cur_tsf, prev_tsf, beacon_int;
256 s32 offset;
257
258 beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
259
260 cur = sc->cur_chan;
261 prev = ath_chanctx_get_next(sc, cur);
262
263 if (!prev->switch_after_beacon)
264 return;
265
266 getrawmonotonic(&ts);
267 cur_tsf = (u32) cur->tsf_val +
268 ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
269
270 prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf;
271 prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts);
272
273 /* Adjust the TSF time of the AP chanctx to keep its beacons
274 * at half beacon interval offset relative to the STA chanctx.
275 */
276 offset = cur_tsf - prev_tsf;
277
278 /* Ignore stale data or spurious timestamps */
279 if (offset < 0 || offset > 3 * beacon_int)
280 return;
281
282 offset = beacon_int / 2 - (offset % beacon_int);
283 prev->tsf_val += offset;
284 }
285
286 /* Configure the TSF based hardware timer for a channel switch.
287 * Also set up backup software timer, in case the gen timer fails.
288 * This could be caused by a hardware reset.
289 */
290 static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time)
291 {
292 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
293 struct ath_hw *ah = sc->sc_ah;
294
295 ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, tsf_time, 1000000);
296 tsf_time -= ath9k_hw_gettsf32(ah);
297 tsf_time = msecs_to_jiffies(tsf_time / 1000) + 1;
298 mod_timer(&sc->sched.timer, jiffies + tsf_time);
299
300 ath_dbg(common, CHAN_CTX,
301 "Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time));
302 }
303
304 void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
305 enum ath_chanctx_event ev)
306 {
307 struct ath_hw *ah = sc->sc_ah;
308 struct ath_common *common = ath9k_hw_common(ah);
309 struct ath_beacon_config *cur_conf;
310 struct ath_vif *avp = NULL;
311 struct ath_chanctx *ctx;
312 u32 tsf_time;
313 u32 beacon_int;
314
315 if (vif)
316 avp = (struct ath_vif *) vif->drv_priv;
317
318 spin_lock_bh(&sc->chan_lock);
319
320 ath_dbg(common, CHAN_CTX, "cur_chan: %d MHz, event: %s, state: %s\n",
321 sc->cur_chan->chandef.center_freq1,
322 chanctx_event_string(ev),
323 chanctx_state_string(sc->sched.state));
324
325 switch (ev) {
326 case ATH_CHANCTX_EVENT_BEACON_PREPARE:
327 if (avp->offchannel_duration)
328 avp->offchannel_duration = 0;
329
330 if (avp->chanctx != sc->cur_chan) {
331 ath_dbg(common, CHAN_CTX,
332 "Contexts differ, not preparing beacon\n");
333 break;
334 }
335
336 if (sc->sched.offchannel_pending && !sc->sched.wait_switch) {
337 sc->sched.offchannel_pending = false;
338 sc->next_chan = &sc->offchannel.chan;
339 sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
340 ath_dbg(common, CHAN_CTX,
341 "Setting offchannel_pending to false\n");
342 }
343
344 ctx = ath_chanctx_get_next(sc, sc->cur_chan);
345 if (ctx->active && sc->sched.state == ATH_CHANCTX_STATE_IDLE) {
346 sc->next_chan = ctx;
347 sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
348 ath_dbg(common, CHAN_CTX,
349 "Set next context, move chanctx state to WAIT_FOR_BEACON\n");
350 }
351
352 /* if the timer missed its window, use the next interval */
353 if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) {
354 sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
355 ath_dbg(common, CHAN_CTX,
356 "Move chanctx state from WAIT_FOR_TIMER to WAIT_FOR_BEACON\n");
357 }
358
359 /*
360 * When a context becomes inactive, for example,
361 * disassociation of a station context, the NoA
362 * attribute needs to be removed from subsequent
363 * beacons.
364 */
365 if (!ctx->active && avp->noa_duration &&
366 sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) {
367 avp->noa_duration = 0;
368 avp->periodic_noa = false;
369
370 ath_dbg(common, CHAN_CTX,
371 "Clearing NoA schedule\n");
372 }
373
374 if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
375 break;
376
377 ath_dbg(common, CHAN_CTX, "Preparing beacon for vif: %pM\n", vif->addr);
378
379 sc->sched.beacon_pending = true;
380 sc->sched.next_tbtt = REG_READ(ah, AR_NEXT_TBTT_TIMER);
381
382 cur_conf = &sc->cur_chan->beacon;
383 beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
384
385 /* defer channel switch by a quarter beacon interval */
386 tsf_time = sc->sched.next_tbtt + beacon_int / 4;
387 sc->sched.switch_start_time = tsf_time;
388 sc->cur_chan->last_beacon = sc->sched.next_tbtt;
389
390 /*
391 * If an offchannel switch is scheduled to happen after
392 * a beacon transmission, update the NoA with one-shot
393 * values and increment the index.
394 */
395 if (sc->next_chan == &sc->offchannel.chan) {
396 avp->noa_index++;
397 avp->offchannel_start = tsf_time;
398 avp->offchannel_duration = sc->sched.offchannel_duration;
399
400 ath_dbg(common, CHAN_CTX,
401 "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n",
402 avp->offchannel_duration,
403 avp->offchannel_start,
404 avp->noa_index);
405
406 /*
407 * When multiple contexts are active, the NoA
408 * has to be recalculated and advertised after
409 * an offchannel operation.
410 */
411 if (ctx->active && avp->noa_duration)
412 avp->noa_duration = 0;
413
414 break;
415 }
416
417 /*
418 * Clear the extend_absence flag if it had been
419 * set during the previous beacon transmission,
420 * since we need to revert to the normal NoA
421 * schedule.
422 */
423 if (ctx->active && sc->sched.extend_absence) {
424 avp->noa_duration = 0;
425 sc->sched.extend_absence = false;
426 }
427
428 /* If at least two consecutive beacons were missed on the STA
429 * chanctx, stay on the STA channel for one extra beacon period,
430 * to resync the timer properly.
431 */
432 if (ctx->active && sc->sched.beacon_miss >= 2) {
433 avp->noa_duration = 0;
434 sc->sched.extend_absence = true;
435 }
436
437 /* Prevent wrap-around issues */
438 if (avp->noa_duration && tsf_time - avp->noa_start > BIT(30))
439 avp->noa_duration = 0;
440
441 /*
442 * If multiple contexts are active, start periodic
443 * NoA and increment the index for the first
444 * announcement.
445 */
446 if (ctx->active &&
447 (!avp->noa_duration || sc->sched.force_noa_update)) {
448 avp->noa_index++;
449 avp->noa_start = tsf_time;
450
451 if (sc->sched.extend_absence)
452 avp->noa_duration = (3 * beacon_int / 2) +
453 sc->sched.channel_switch_time;
454 else
455 avp->noa_duration =
456 TU_TO_USEC(cur_conf->beacon_interval) / 2 +
457 sc->sched.channel_switch_time;
458
459 if (test_bit(ATH_OP_SCANNING, &common->op_flags) ||
460 sc->sched.extend_absence)
461 avp->periodic_noa = false;
462 else
463 avp->periodic_noa = true;
464
465 ath_dbg(common, CHAN_CTX,
466 "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
467 avp->noa_duration,
468 avp->noa_start,
469 avp->noa_index,
470 avp->periodic_noa);
471 }
472
473 if (ctx->active && sc->sched.force_noa_update)
474 sc->sched.force_noa_update = false;
475
476 break;
477 case ATH_CHANCTX_EVENT_BEACON_SENT:
478 if (!sc->sched.beacon_pending) {
479 ath_dbg(common, CHAN_CTX,
480 "No pending beacon\n");
481 break;
482 }
483
484 sc->sched.beacon_pending = false;
485 if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
486 break;
487
488 ath_dbg(common, CHAN_CTX,
489 "Move chanctx state to WAIT_FOR_TIMER\n");
490
491 sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
492 ath_chanctx_setup_timer(sc, sc->sched.switch_start_time);
493 break;
494 case ATH_CHANCTX_EVENT_TSF_TIMER:
495 if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_TIMER)
496 break;
497
498 if (!sc->cur_chan->switch_after_beacon &&
499 sc->sched.beacon_pending)
500 sc->sched.beacon_miss++;
501
502 ath_dbg(common, CHAN_CTX,
503 "Move chanctx state to SWITCH\n");
504
505 sc->sched.state = ATH_CHANCTX_STATE_SWITCH;
506 ieee80211_queue_work(sc->hw, &sc->chanctx_work);
507 break;
508 case ATH_CHANCTX_EVENT_BEACON_RECEIVED:
509 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
510 sc->cur_chan == &sc->offchannel.chan)
511 break;
512
513 sc->sched.beacon_pending = false;
514 sc->sched.beacon_miss = 0;
515
516 if (sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
517 !sc->cur_chan->tsf_val)
518 break;
519
520 ath_chanctx_adjust_tbtt_delta(sc);
521
522 /* TSF time might have been updated by the incoming beacon,
523 * need update the channel switch timer to reflect the change.
524 */
525 tsf_time = sc->sched.switch_start_time;
526 tsf_time -= (u32) sc->cur_chan->tsf_val +
527 ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
528 tsf_time += ath9k_hw_gettsf32(ah);
529
530
531 ath_chanctx_setup_timer(sc, tsf_time);
532 break;
533 case ATH_CHANCTX_EVENT_AUTHORIZED:
534 if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE ||
535 avp->chanctx != sc->cur_chan)
536 break;
537
538 ath_dbg(common, CHAN_CTX,
539 "Move chanctx state from FORCE_ACTIVE to IDLE\n");
540
541 sc->sched.state = ATH_CHANCTX_STATE_IDLE;
542 /* fall through */
543 case ATH_CHANCTX_EVENT_SWITCH:
544 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
545 sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
546 sc->cur_chan->switch_after_beacon ||
547 sc->cur_chan == &sc->offchannel.chan)
548 break;
549
550 /* If this is a station chanctx, stay active for a half
551 * beacon period (minus channel switch time)
552 */
553 sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan);
554 cur_conf = &sc->cur_chan->beacon;
555
556 ath_dbg(common, CHAN_CTX,
557 "Move chanctx state to WAIT_FOR_TIMER (event SWITCH)\n");
558
559 sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
560 sc->sched.wait_switch = false;
561
562 tsf_time = TU_TO_USEC(cur_conf->beacon_interval) / 2;
563
564 if (sc->sched.extend_absence) {
565 sc->sched.beacon_miss = 0;
566 tsf_time *= 3;
567 }
568
569 tsf_time -= sc->sched.channel_switch_time;
570 tsf_time += ath9k_hw_gettsf32(sc->sc_ah);
571 sc->sched.switch_start_time = tsf_time;
572
573 ath_chanctx_setup_timer(sc, tsf_time);
574 sc->sched.beacon_pending = true;
575 break;
576 case ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL:
577 if (sc->cur_chan == &sc->offchannel.chan ||
578 sc->cur_chan->switch_after_beacon)
579 break;
580
581 sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan);
582 ieee80211_queue_work(sc->hw, &sc->chanctx_work);
583 break;
584 case ATH_CHANCTX_EVENT_UNASSIGN:
585 if (sc->cur_chan->assigned) {
586 if (sc->next_chan && !sc->next_chan->assigned &&
587 sc->next_chan != &sc->offchannel.chan)
588 sc->sched.state = ATH_CHANCTX_STATE_IDLE;
589 break;
590 }
591
592 ctx = ath_chanctx_get_next(sc, sc->cur_chan);
593 sc->sched.state = ATH_CHANCTX_STATE_IDLE;
594 if (!ctx->assigned)
595 break;
596
597 sc->next_chan = ctx;
598 ieee80211_queue_work(sc->hw, &sc->chanctx_work);
599 break;
600 case ATH_CHANCTX_EVENT_ASSIGN:
601 /*
602 * When adding a new channel context, check if a scan
603 * is in progress and abort it since the addition of
604 * a new channel context is usually followed by VIF
605 * assignment, in which case we have to start multi-channel
606 * operation.
607 */
608 if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
609 ath_dbg(common, CHAN_CTX,
610 "Aborting HW scan to add new context\n");
611
612 spin_unlock_bh(&sc->chan_lock);
613 del_timer_sync(&sc->offchannel.timer);
614 ath_scan_complete(sc, true);
615 spin_lock_bh(&sc->chan_lock);
616 }
617 break;
618 case ATH_CHANCTX_EVENT_CHANGE:
619 break;
620 }
621
622 spin_unlock_bh(&sc->chan_lock);
623 }
624
625 void ath_chanctx_beacon_sent_ev(struct ath_softc *sc,
626 enum ath_chanctx_event ev)
627 {
628 if (sc->sched.beacon_pending)
629 ath_chanctx_event(sc, NULL, ev);
630 }
631
632 void ath_chanctx_beacon_recv_ev(struct ath_softc *sc,
633 enum ath_chanctx_event ev)
634 {
635 ath_chanctx_event(sc, NULL, ev);
636 }
637
638 static int ath_scan_channel_duration(struct ath_softc *sc,
639 struct ieee80211_channel *chan)
640 {
641 struct cfg80211_scan_request *req = sc->offchannel.scan_req;
642
643 if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR))
644 return (HZ / 9); /* ~110 ms */
645
646 return (HZ / 16); /* ~60 ms */
647 }
648
649 static void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx,
650 struct cfg80211_chan_def *chandef)
651 {
652 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
653
654 spin_lock_bh(&sc->chan_lock);
655
656 if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) &&
657 (sc->cur_chan != ctx) && (ctx == &sc->offchannel.chan)) {
658 if (chandef)
659 ctx->chandef = *chandef;
660
661 sc->sched.offchannel_pending = true;
662 sc->sched.wait_switch = true;
663 sc->sched.offchannel_duration =
664 jiffies_to_usecs(sc->offchannel.duration) +
665 sc->sched.channel_switch_time;
666
667 spin_unlock_bh(&sc->chan_lock);
668 ath_dbg(common, CHAN_CTX,
669 "Set offchannel_pending to true\n");
670 return;
671 }
672
673 sc->next_chan = ctx;
674 if (chandef) {
675 ctx->chandef = *chandef;
676 ath_dbg(common, CHAN_CTX,
677 "Assigned next_chan to %d MHz\n", chandef->center_freq1);
678 }
679
680 if (sc->next_chan == &sc->offchannel.chan) {
681 sc->sched.offchannel_duration =
682 jiffies_to_usecs(sc->offchannel.duration) +
683 sc->sched.channel_switch_time;
684
685 if (chandef) {
686 ath_dbg(common, CHAN_CTX,
687 "Offchannel duration for chan %d MHz : %u\n",
688 chandef->center_freq1,
689 sc->sched.offchannel_duration);
690 }
691 }
692 spin_unlock_bh(&sc->chan_lock);
693 ieee80211_queue_work(sc->hw, &sc->chanctx_work);
694 }
695
696 static void ath_chanctx_offchan_switch(struct ath_softc *sc,
697 struct ieee80211_channel *chan)
698 {
699 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
700 struct cfg80211_chan_def chandef;
701
702 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
703 ath_dbg(common, CHAN_CTX,
704 "Channel definition created: %d MHz\n", chandef.center_freq1);
705
706 ath_chanctx_switch(sc, &sc->offchannel.chan, &chandef);
707 }
708
709 static struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc,
710 bool active)
711 {
712 struct ath_chanctx *ctx;
713
714 ath_for_each_chanctx(sc, ctx) {
715 if (!ctx->assigned || list_empty(&ctx->vifs))
716 continue;
717 if (active && !ctx->active)
718 continue;
719
720 if (ctx->switch_after_beacon)
721 return ctx;
722 }
723
724 return &sc->chanctx[0];
725 }
726
727 static void
728 ath_scan_next_channel(struct ath_softc *sc)
729 {
730 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
731 struct cfg80211_scan_request *req = sc->offchannel.scan_req;
732 struct ieee80211_channel *chan;
733
734 if (sc->offchannel.scan_idx >= req->n_channels) {
735 ath_dbg(common, CHAN_CTX,
736 "Moving offchannel state to ATH_OFFCHANNEL_IDLE, "
737 "scan_idx: %d, n_channels: %d\n",
738 sc->offchannel.scan_idx,
739 req->n_channels);
740
741 sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
742 ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
743 NULL);
744 return;
745 }
746
747 ath_dbg(common, CHAN_CTX,
748 "Moving offchannel state to ATH_OFFCHANNEL_PROBE_SEND, scan_idx: %d\n",
749 sc->offchannel.scan_idx);
750
751 chan = req->channels[sc->offchannel.scan_idx++];
752 sc->offchannel.duration = ath_scan_channel_duration(sc, chan);
753 sc->offchannel.state = ATH_OFFCHANNEL_PROBE_SEND;
754
755 ath_chanctx_offchan_switch(sc, chan);
756 }
757
758 void ath_offchannel_next(struct ath_softc *sc)
759 {
760 struct ieee80211_vif *vif;
761
762 if (sc->offchannel.scan_req) {
763 vif = sc->offchannel.scan_vif;
764 sc->offchannel.chan.txpower = vif->bss_conf.txpower;
765 ath_scan_next_channel(sc);
766 } else if (sc->offchannel.roc_vif) {
767 vif = sc->offchannel.roc_vif;
768 sc->offchannel.chan.txpower = vif->bss_conf.txpower;
769 sc->offchannel.duration =
770 msecs_to_jiffies(sc->offchannel.roc_duration);
771 sc->offchannel.state = ATH_OFFCHANNEL_ROC_START;
772 ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan);
773 } else {
774 ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
775 NULL);
776 sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
777 if (sc->ps_idle)
778 ath_cancel_work(sc);
779 }
780 }
781
782 void ath_roc_complete(struct ath_softc *sc, bool abort)
783 {
784 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
785
786 if (abort)
787 ath_dbg(common, CHAN_CTX, "RoC aborted\n");
788 else
789 ath_dbg(common, CHAN_CTX, "RoC expired\n");
790
791 sc->offchannel.roc_vif = NULL;
792 sc->offchannel.roc_chan = NULL;
793 if (!abort)
794 ieee80211_remain_on_channel_expired(sc->hw);
795 ath_offchannel_next(sc);
796 ath9k_ps_restore(sc);
797 }
798
799 void ath_scan_complete(struct ath_softc *sc, bool abort)
800 {
801 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
802
803 if (abort)
804 ath_dbg(common, CHAN_CTX, "HW scan aborted\n");
805 else
806 ath_dbg(common, CHAN_CTX, "HW scan complete\n");
807
808 sc->offchannel.scan_req = NULL;
809 sc->offchannel.scan_vif = NULL;
810 sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
811 ieee80211_scan_completed(sc->hw, abort);
812 clear_bit(ATH_OP_SCANNING, &common->op_flags);
813 spin_lock_bh(&sc->chan_lock);
814 if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
815 sc->sched.force_noa_update = true;
816 spin_unlock_bh(&sc->chan_lock);
817 ath_offchannel_next(sc);
818 ath9k_ps_restore(sc);
819 }
820
821 static void ath_scan_send_probe(struct ath_softc *sc,
822 struct cfg80211_ssid *ssid)
823 {
824 struct cfg80211_scan_request *req = sc->offchannel.scan_req;
825 struct ieee80211_vif *vif = sc->offchannel.scan_vif;
826 struct ath_tx_control txctl = {};
827 struct sk_buff *skb;
828 struct ieee80211_tx_info *info;
829 int band = sc->offchannel.chan.chandef.chan->band;
830
831 skb = ieee80211_probereq_get(sc->hw, vif,
832 ssid->ssid, ssid->ssid_len, req->ie_len);
833 if (!skb)
834 return;
835
836 info = IEEE80211_SKB_CB(skb);
837 if (req->no_cck)
838 info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
839
840 if (req->ie_len)
841 memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
842
843 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
844
845 if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL))
846 goto error;
847
848 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
849 txctl.force_channel = true;
850 if (ath_tx_start(sc->hw, skb, &txctl))
851 goto error;
852
853 return;
854
855 error:
856 ieee80211_free_txskb(sc->hw, skb);
857 }
858
859 static void ath_scan_channel_start(struct ath_softc *sc)
860 {
861 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
862 struct cfg80211_scan_request *req = sc->offchannel.scan_req;
863 int i;
864
865 if (!(sc->cur_chan->chandef.chan->flags & IEEE80211_CHAN_NO_IR) &&
866 req->n_ssids) {
867 for (i = 0; i < req->n_ssids; i++)
868 ath_scan_send_probe(sc, &req->ssids[i]);
869
870 }
871
872 ath_dbg(common, CHAN_CTX,
873 "Moving offchannel state to ATH_OFFCHANNEL_PROBE_WAIT\n");
874
875 sc->offchannel.state = ATH_OFFCHANNEL_PROBE_WAIT;
876 mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration);
877 }
878
879 static void ath_chanctx_timer(unsigned long data)
880 {
881 struct ath_softc *sc = (struct ath_softc *) data;
882 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
883
884 ath_dbg(common, CHAN_CTX,
885 "Channel context timer invoked\n");
886
887 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER);
888 }
889
890 static void ath_offchannel_timer(unsigned long data)
891 {
892 struct ath_softc *sc = (struct ath_softc *)data;
893 struct ath_chanctx *ctx;
894 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
895
896 ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n",
897 __func__, offchannel_state_string(sc->offchannel.state));
898
899 switch (sc->offchannel.state) {
900 case ATH_OFFCHANNEL_PROBE_WAIT:
901 if (!sc->offchannel.scan_req)
902 return;
903
904 /* get first active channel context */
905 ctx = ath_chanctx_get_oper_chan(sc, true);
906 if (ctx->active) {
907 ath_dbg(common, CHAN_CTX,
908 "Switch to oper/active context, "
909 "move offchannel state to ATH_OFFCHANNEL_SUSPEND\n");
910
911 sc->offchannel.state = ATH_OFFCHANNEL_SUSPEND;
912 ath_chanctx_switch(sc, ctx, NULL);
913 mod_timer(&sc->offchannel.timer, jiffies + HZ / 10);
914 break;
915 }
916 /* fall through */
917 case ATH_OFFCHANNEL_SUSPEND:
918 if (!sc->offchannel.scan_req)
919 return;
920
921 ath_scan_next_channel(sc);
922 break;
923 case ATH_OFFCHANNEL_ROC_START:
924 case ATH_OFFCHANNEL_ROC_WAIT:
925 ctx = ath_chanctx_get_oper_chan(sc, false);
926 sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
927 ath_chanctx_switch(sc, ctx, NULL);
928 break;
929 default:
930 break;
931 }
932 }
933
934 static bool
935 ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
936 bool powersave)
937 {
938 struct ieee80211_vif *vif = avp->vif;
939 struct ieee80211_sta *sta = NULL;
940 struct ieee80211_hdr_3addr *nullfunc;
941 struct ath_tx_control txctl;
942 struct sk_buff *skb;
943 int band = sc->cur_chan->chandef.chan->band;
944
945 switch (vif->type) {
946 case NL80211_IFTYPE_STATION:
947 if (!avp->assoc)
948 return false;
949
950 skb = ieee80211_nullfunc_get(sc->hw, vif);
951 if (!skb)
952 return false;
953
954 nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
955 if (powersave)
956 nullfunc->frame_control |=
957 cpu_to_le16(IEEE80211_FCTL_PM);
958
959 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
960 if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) {
961 dev_kfree_skb_any(skb);
962 return false;
963 }
964 break;
965 default:
966 return false;
967 }
968
969 memset(&txctl, 0, sizeof(txctl));
970 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
971 txctl.sta = sta;
972 txctl.force_channel = true;
973 if (ath_tx_start(sc->hw, skb, &txctl)) {
974 ieee80211_free_txskb(sc->hw, skb);
975 return false;
976 }
977
978 return true;
979 }
980
981 static bool
982 ath_chanctx_send_ps_frame(struct ath_softc *sc, bool powersave)
983 {
984 struct ath_vif *avp;
985 bool sent = false;
986
987 rcu_read_lock();
988 list_for_each_entry(avp, &sc->cur_chan->vifs, list) {
989 if (ath_chanctx_send_vif_ps_frame(sc, avp, powersave))
990 sent = true;
991 }
992 rcu_read_unlock();
993
994 return sent;
995 }
996
997 static bool ath_chanctx_defer_switch(struct ath_softc *sc)
998 {
999 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1000
1001 if (sc->cur_chan == &sc->offchannel.chan)
1002 return false;
1003
1004 switch (sc->sched.state) {
1005 case ATH_CHANCTX_STATE_SWITCH:
1006 return false;
1007 case ATH_CHANCTX_STATE_IDLE:
1008 if (!sc->cur_chan->switch_after_beacon)
1009 return false;
1010
1011 ath_dbg(common, CHAN_CTX,
1012 "Defer switch, set chanctx state to WAIT_FOR_BEACON\n");
1013
1014 sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
1015 break;
1016 default:
1017 break;
1018 }
1019
1020 return true;
1021 }
1022
1023 static void ath_offchannel_channel_change(struct ath_softc *sc)
1024 {
1025 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1026
1027 ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n",
1028 __func__, offchannel_state_string(sc->offchannel.state));
1029
1030 switch (sc->offchannel.state) {
1031 case ATH_OFFCHANNEL_PROBE_SEND:
1032 if (!sc->offchannel.scan_req)
1033 return;
1034
1035 if (sc->cur_chan->chandef.chan !=
1036 sc->offchannel.chan.chandef.chan)
1037 return;
1038
1039 ath_scan_channel_start(sc);
1040 break;
1041 case ATH_OFFCHANNEL_IDLE:
1042 if (!sc->offchannel.scan_req)
1043 return;
1044
1045 ath_scan_complete(sc, false);
1046 break;
1047 case ATH_OFFCHANNEL_ROC_START:
1048 if (sc->cur_chan != &sc->offchannel.chan)
1049 break;
1050
1051 sc->offchannel.state = ATH_OFFCHANNEL_ROC_WAIT;
1052 mod_timer(&sc->offchannel.timer,
1053 jiffies + sc->offchannel.duration);
1054 ieee80211_ready_on_channel(sc->hw);
1055 break;
1056 case ATH_OFFCHANNEL_ROC_DONE:
1057 ath_roc_complete(sc, false);
1058 break;
1059 default:
1060 break;
1061 }
1062 }
1063
1064 void ath_chanctx_set_next(struct ath_softc *sc, bool force)
1065 {
1066 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1067 struct ath_chanctx *old_ctx;
1068 struct timespec ts;
1069 bool measure_time = false;
1070 bool send_ps = false;
1071 bool queues_stopped = false;
1072
1073 spin_lock_bh(&sc->chan_lock);
1074 if (!sc->next_chan) {
1075 spin_unlock_bh(&sc->chan_lock);
1076 return;
1077 }
1078
1079 if (!force && ath_chanctx_defer_switch(sc)) {
1080 spin_unlock_bh(&sc->chan_lock);
1081 return;
1082 }
1083
1084 ath_dbg(common, CHAN_CTX,
1085 "%s: current: %d MHz, next: %d MHz\n",
1086 __func__,
1087 sc->cur_chan->chandef.center_freq1,
1088 sc->next_chan->chandef.center_freq1);
1089
1090 if (sc->cur_chan != sc->next_chan) {
1091 ath_dbg(common, CHAN_CTX,
1092 "Stopping current chanctx: %d\n",
1093 sc->cur_chan->chandef.center_freq1);
1094 sc->cur_chan->stopped = true;
1095 spin_unlock_bh(&sc->chan_lock);
1096
1097 if (sc->next_chan == &sc->offchannel.chan) {
1098 getrawmonotonic(&ts);
1099 measure_time = true;
1100 }
1101
1102 ath9k_chanctx_stop_queues(sc, sc->cur_chan);
1103 queues_stopped = true;
1104
1105 __ath9k_flush(sc->hw, ~0, true);
1106
1107 if (ath_chanctx_send_ps_frame(sc, true))
1108 __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false);
1109
1110 send_ps = true;
1111 spin_lock_bh(&sc->chan_lock);
1112
1113 if (sc->cur_chan != &sc->offchannel.chan) {
1114 getrawmonotonic(&sc->cur_chan->tsf_ts);
1115 sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
1116 }
1117 }
1118 old_ctx = sc->cur_chan;
1119 sc->cur_chan = sc->next_chan;
1120 sc->cur_chan->stopped = false;
1121 sc->next_chan = NULL;
1122
1123 if (!sc->sched.offchannel_pending)
1124 sc->sched.offchannel_duration = 0;
1125
1126 if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE)
1127 sc->sched.state = ATH_CHANCTX_STATE_IDLE;
1128
1129 spin_unlock_bh(&sc->chan_lock);
1130
1131 if (sc->sc_ah->chip_fullsleep ||
1132 memcmp(&sc->cur_chandef, &sc->cur_chan->chandef,
1133 sizeof(sc->cur_chandef))) {
1134 ath_dbg(common, CHAN_CTX,
1135 "%s: Set channel %d MHz\n",
1136 __func__, sc->cur_chan->chandef.center_freq1);
1137 ath_set_channel(sc);
1138 if (measure_time)
1139 sc->sched.channel_switch_time =
1140 ath9k_hw_get_tsf_offset(&ts, NULL);
1141 /*
1142 * A reset will ensure that all queues are woken up,
1143 * so there is no need to awaken them again.
1144 */
1145 goto out;
1146 }
1147
1148 if (queues_stopped)
1149 ath9k_chanctx_wake_queues(sc, old_ctx);
1150 out:
1151 if (send_ps)
1152 ath_chanctx_send_ps_frame(sc, false);
1153
1154 ath_offchannel_channel_change(sc);
1155 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_SWITCH);
1156 }
1157
1158 static void ath_chanctx_work(struct work_struct *work)
1159 {
1160 struct ath_softc *sc = container_of(work, struct ath_softc,
1161 chanctx_work);
1162 mutex_lock(&sc->mutex);
1163 ath_chanctx_set_next(sc, false);
1164 mutex_unlock(&sc->mutex);
1165 }
1166
1167 void ath9k_offchannel_init(struct ath_softc *sc)
1168 {
1169 struct ath_chanctx *ctx;
1170 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1171 struct ieee80211_supported_band *sband;
1172 struct ieee80211_channel *chan;
1173 int i;
1174
1175 sband = &common->sbands[IEEE80211_BAND_2GHZ];
1176 if (!sband->n_channels)
1177 sband = &common->sbands[IEEE80211_BAND_5GHZ];
1178
1179 chan = &sband->channels[0];
1180
1181 ctx = &sc->offchannel.chan;
1182 INIT_LIST_HEAD(&ctx->vifs);
1183 ctx->txpower = ATH_TXPOWER_MAX;
1184 cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
1185
1186 for (i = 0; i < ARRAY_SIZE(ctx->acq); i++)
1187 INIT_LIST_HEAD(&ctx->acq[i]);
1188
1189 sc->offchannel.chan.offchannel = true;
1190 }
1191
1192 void ath9k_init_channel_context(struct ath_softc *sc)
1193 {
1194 INIT_WORK(&sc->chanctx_work, ath_chanctx_work);
1195
1196 setup_timer(&sc->offchannel.timer, ath_offchannel_timer,
1197 (unsigned long)sc);
1198 setup_timer(&sc->sched.timer, ath_chanctx_timer,
1199 (unsigned long)sc);
1200 }
1201
1202 void ath9k_deinit_channel_context(struct ath_softc *sc)
1203 {
1204 cancel_work_sync(&sc->chanctx_work);
1205 }
1206
1207 bool ath9k_is_chanctx_enabled(void)
1208 {
1209 return (ath9k_use_chanctx == 1);
1210 }
1211
1212 /********************/
1213 /* Queue management */
1214 /********************/
1215
1216 void ath9k_chanctx_stop_queues(struct ath_softc *sc, struct ath_chanctx *ctx)
1217 {
1218 struct ath_hw *ah = sc->sc_ah;
1219 int i;
1220
1221 if (ctx == &sc->offchannel.chan) {
1222 ieee80211_stop_queue(sc->hw,
1223 sc->hw->offchannel_tx_hw_queue);
1224 } else {
1225 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1226 ieee80211_stop_queue(sc->hw,
1227 ctx->hw_queue_base + i);
1228 }
1229
1230 if (ah->opmode == NL80211_IFTYPE_AP)
1231 ieee80211_stop_queue(sc->hw, sc->hw->queues - 2);
1232 }
1233
1234
1235 void ath9k_chanctx_wake_queues(struct ath_softc *sc, struct ath_chanctx *ctx)
1236 {
1237 struct ath_hw *ah = sc->sc_ah;
1238 int i;
1239
1240 if (ctx == &sc->offchannel.chan) {
1241 ieee80211_wake_queue(sc->hw,
1242 sc->hw->offchannel_tx_hw_queue);
1243 } else {
1244 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1245 ieee80211_wake_queue(sc->hw,
1246 ctx->hw_queue_base + i);
1247 }
1248
1249 if (ah->opmode == NL80211_IFTYPE_AP)
1250 ieee80211_wake_queue(sc->hw, sc->hw->queues - 2);
1251 }
1252
1253 /*****************/
1254 /* P2P Powersave */
1255 /*****************/
1256
1257 static void ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
1258 {
1259 struct ath_hw *ah = sc->sc_ah;
1260 s32 tsf, target_tsf;
1261
1262 if (!avp || !avp->noa.has_next_tsf)
1263 return;
1264
1265 ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
1266
1267 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1268
1269 target_tsf = avp->noa.next_tsf;
1270 if (!avp->noa.absent)
1271 target_tsf -= ATH_P2P_PS_STOP_TIME;
1272
1273 if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
1274 target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
1275
1276 ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
1277 }
1278
1279 static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
1280 {
1281 struct ath_vif *avp = (void *)vif->drv_priv;
1282 u32 tsf;
1283
1284 if (!sc->p2p_ps_timer)
1285 return;
1286
1287 if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
1288 return;
1289
1290 sc->p2p_ps_vif = avp;
1291 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1292 ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
1293 ath9k_update_p2p_ps_timer(sc, avp);
1294 }
1295
1296 static u8 ath9k_get_ctwin(struct ath_softc *sc, struct ath_vif *avp)
1297 {
1298 struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
1299 u8 switch_time, ctwin;
1300
1301 /*
1302 * Channel switch in multi-channel mode is deferred
1303 * by a quarter beacon interval when handling
1304 * ATH_CHANCTX_EVENT_BEACON_PREPARE, so the P2P-GO
1305 * interface is guaranteed to be discoverable
1306 * for that duration after a TBTT.
1307 */
1308 switch_time = cur_conf->beacon_interval / 4;
1309
1310 ctwin = avp->vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
1311 if (ctwin && (ctwin < switch_time))
1312 return ctwin;
1313
1314 if (switch_time < P2P_DEFAULT_CTWIN)
1315 return 0;
1316
1317 return P2P_DEFAULT_CTWIN;
1318 }
1319
1320 void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp,
1321 struct sk_buff *skb)
1322 {
1323 static const u8 noa_ie_hdr[] = {
1324 WLAN_EID_VENDOR_SPECIFIC, /* type */
1325 0, /* length */
1326 0x50, 0x6f, 0x9a, /* WFA OUI */
1327 0x09, /* P2P subtype */
1328 0x0c, /* Notice of Absence */
1329 0x00, /* LSB of little-endian len */
1330 0x00, /* MSB of little-endian len */
1331 };
1332
1333 struct ieee80211_p2p_noa_attr *noa;
1334 int noa_len, noa_desc, i = 0;
1335 u8 *hdr;
1336
1337 if (!avp->offchannel_duration && !avp->noa_duration)
1338 return;
1339
1340 noa_desc = !!avp->offchannel_duration + !!avp->noa_duration;
1341 noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc;
1342
1343 hdr = skb_put(skb, sizeof(noa_ie_hdr));
1344 memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr));
1345 hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2;
1346 hdr[7] = noa_len;
1347
1348 noa = (void *) skb_put(skb, noa_len);
1349 memset(noa, 0, noa_len);
1350
1351 noa->index = avp->noa_index;
1352 noa->oppps_ctwindow = ath9k_get_ctwin(sc, avp);
1353
1354 if (avp->noa_duration) {
1355 if (avp->periodic_noa) {
1356 u32 interval = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
1357 noa->desc[i].count = 255;
1358 noa->desc[i].interval = cpu_to_le32(interval);
1359 } else {
1360 noa->desc[i].count = 1;
1361 }
1362
1363 noa->desc[i].start_time = cpu_to_le32(avp->noa_start);
1364 noa->desc[i].duration = cpu_to_le32(avp->noa_duration);
1365 i++;
1366 }
1367
1368 if (avp->offchannel_duration) {
1369 noa->desc[i].count = 1;
1370 noa->desc[i].start_time = cpu_to_le32(avp->offchannel_start);
1371 noa->desc[i].duration = cpu_to_le32(avp->offchannel_duration);
1372 }
1373 }
1374
1375 void ath9k_p2p_ps_timer(void *priv)
1376 {
1377 struct ath_softc *sc = priv;
1378 struct ath_vif *avp = sc->p2p_ps_vif;
1379 struct ieee80211_vif *vif;
1380 struct ieee80211_sta *sta;
1381 struct ath_node *an;
1382 u32 tsf;
1383
1384 del_timer_sync(&sc->sched.timer);
1385 ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer);
1386 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER);
1387
1388 if (!avp || avp->chanctx != sc->cur_chan)
1389 return;
1390
1391 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1392 if (!avp->noa.absent)
1393 tsf += ATH_P2P_PS_STOP_TIME;
1394
1395 if (!avp->noa.has_next_tsf ||
1396 avp->noa.next_tsf - tsf > BIT(31))
1397 ieee80211_update_p2p_noa(&avp->noa, tsf);
1398
1399 ath9k_update_p2p_ps_timer(sc, avp);
1400
1401 rcu_read_lock();
1402
1403 vif = avp->vif;
1404 sta = ieee80211_find_sta(vif, avp->bssid);
1405 if (!sta)
1406 goto out;
1407
1408 an = (void *) sta->drv_priv;
1409 if (an->sleeping == !!avp->noa.absent)
1410 goto out;
1411
1412 an->sleeping = avp->noa.absent;
1413 if (an->sleeping)
1414 ath_tx_aggr_sleep(sta, sc, an);
1415 else
1416 ath_tx_aggr_wakeup(sc, an);
1417
1418 out:
1419 rcu_read_unlock();
1420 }
1421
1422 void ath9k_p2p_bss_info_changed(struct ath_softc *sc,
1423 struct ieee80211_vif *vif)
1424 {
1425 unsigned long flags;
1426
1427 spin_lock_bh(&sc->sc_pcu_lock);
1428 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1429 if (!(sc->ps_flags & PS_BEACON_SYNC))
1430 ath9k_update_p2p_ps(sc, vif);
1431 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1432 spin_unlock_bh(&sc->sc_pcu_lock);
1433 }
1434
1435 void ath9k_p2p_beacon_sync(struct ath_softc *sc)
1436 {
1437 if (sc->p2p_ps_vif)
1438 ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
1439 }
1440
1441 void ath9k_p2p_remove_vif(struct ath_softc *sc,
1442 struct ieee80211_vif *vif)
1443 {
1444 struct ath_vif *avp = (void *)vif->drv_priv;
1445
1446 spin_lock_bh(&sc->sc_pcu_lock);
1447 if (avp == sc->p2p_ps_vif) {
1448 sc->p2p_ps_vif = NULL;
1449 ath9k_update_p2p_ps_timer(sc, NULL);
1450 }
1451 spin_unlock_bh(&sc->sc_pcu_lock);
1452 }
1453
1454 int ath9k_init_p2p(struct ath_softc *sc)
1455 {
1456 sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
1457 NULL, sc, AR_FIRST_NDP_TIMER);
1458 if (!sc->p2p_ps_timer)
1459 return -ENOMEM;
1460
1461 return 0;
1462 }
1463
1464 void ath9k_deinit_p2p(struct ath_softc *sc)
1465 {
1466 if (sc->p2p_ps_timer)
1467 ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
1468 }
1469
1470 #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
This page took 0.064627 seconds and 5 git commands to generate.