ath9k: Allow user to change tx power when asked
[deliverable/linux.git] / drivers / net / wireless / ath9k / xmit.c
1 /*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /*
18 * Implementation of transmit path.
19 */
20
21 #include "core.h"
22
23 #define BITS_PER_BYTE 8
24 #define OFDM_PLCP_BITS 22
25 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
27 #define L_STF 8
28 #define L_LTF 8
29 #define L_SIG 4
30 #define HT_SIG 8
31 #define HT_STF 4
32 #define HT_LTF(_ns) (4 * (_ns))
33 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
37
38 #define OFDM_SIFS_TIME 16
39
40 static u32 bits_per_symbol[][2] = {
41 /* 20MHz 40MHz */
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
58 };
59
60 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
61
62 /*
63 * Insert a chain of ath_buf (descriptors) on a txq and
64 * assume the descriptors are already chained together by caller.
65 * NB: must be called with txq lock held
66 */
67
68 static void ath_tx_txqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq, struct list_head *head)
70 {
71 struct ath_hal *ah = sc->sc_ah;
72 struct ath_buf *bf;
73 /*
74 * Insert the frame on the outbound list and
75 * pass it on to the hardware.
76 */
77
78 if (list_empty(head))
79 return;
80
81 bf = list_first_entry(head, struct ath_buf, list);
82
83 list_splice_tail_init(head, &txq->axq_q);
84 txq->axq_depth++;
85 txq->axq_totalqueued++;
86 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
87
88 DPRINTF(sc, ATH_DBG_QUEUE,
89 "%s: txq depth = %d\n", __func__, txq->axq_depth);
90
91 if (txq->axq_link == NULL) {
92 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
93 DPRINTF(sc, ATH_DBG_XMIT,
94 "%s: TXDP[%u] = %llx (%p)\n",
95 __func__, txq->axq_qnum,
96 ito64(bf->bf_daddr), bf->bf_desc);
97 } else {
98 *txq->axq_link = bf->bf_daddr;
99 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
100 __func__,
101 txq->axq_qnum, txq->axq_link,
102 ito64(bf->bf_daddr), bf->bf_desc);
103 }
104 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
105 ath9k_hw_txstart(ah, txq->axq_qnum);
106 }
107
108 /* Get transmit rate index using rate in Kbps */
109
110 static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
111 {
112 int i;
113 int ndx = 0;
114
115 for (i = 0; i < rt->rateCount; i++) {
116 if (rt->info[i].rateKbps == rate) {
117 ndx = i;
118 break;
119 }
120 }
121
122 return ndx;
123 }
124
125 /* Check if it's okay to send out aggregates */
126
127 static int ath_aggr_query(struct ath_softc *sc,
128 struct ath_node *an, u8 tidno)
129 {
130 struct ath_atx_tid *tid;
131 tid = ATH_AN_2_TID(an, tidno);
132
133 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
134 return 1;
135 else
136 return 0;
137 }
138
139 static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
140 {
141 enum ath9k_pkt_type htype;
142 __le16 fc;
143
144 fc = hdr->frame_control;
145
146 /* Calculate Atheros packet type from IEEE80211 packet header */
147
148 if (ieee80211_is_beacon(fc))
149 htype = ATH9K_PKT_TYPE_BEACON;
150 else if (ieee80211_is_probe_resp(fc))
151 htype = ATH9K_PKT_TYPE_PROBE_RESP;
152 else if (ieee80211_is_atim(fc))
153 htype = ATH9K_PKT_TYPE_ATIM;
154 else if (ieee80211_is_pspoll(fc))
155 htype = ATH9K_PKT_TYPE_PSPOLL;
156 else
157 htype = ATH9K_PKT_TYPE_NORMAL;
158
159 return htype;
160 }
161
162 static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
163 {
164 struct ieee80211_hdr *hdr;
165 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
166 struct ath_tx_info_priv *tx_info_priv;
167 __le16 fc;
168
169 hdr = (struct ieee80211_hdr *)skb->data;
170 fc = hdr->frame_control;
171
172 /* XXX: HACK! */
173 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
174
175 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
176 txctl->use_minrate = 1;
177 txctl->min_rate = tx_info_priv->min_rate;
178 } else if (ieee80211_is_data(fc)) {
179 if (ieee80211_is_nullfunc(fc) ||
180 /* Port Access Entity (IEEE 802.1X) */
181 (skb->protocol == cpu_to_be16(0x888E))) {
182 txctl->use_minrate = 1;
183 txctl->min_rate = tx_info_priv->min_rate;
184 }
185 if (is_multicast_ether_addr(hdr->addr1))
186 txctl->mcast_rate = tx_info_priv->min_rate;
187 }
188
189 }
190
191 /* This function will setup additional txctl information, mostly rate stuff */
192 /* FIXME: seqno, ps */
193 static int ath_tx_prepare(struct ath_softc *sc,
194 struct sk_buff *skb,
195 struct ath_tx_control *txctl)
196 {
197 struct ieee80211_hw *hw = sc->hw;
198 struct ieee80211_hdr *hdr;
199 struct ath_rc_series *rcs;
200 struct ath_txq *txq = NULL;
201 const struct ath9k_rate_table *rt;
202 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
203 struct ath_tx_info_priv *tx_info_priv;
204 int hdrlen;
205 u8 rix, antenna;
206 __le16 fc;
207 u8 *qc;
208
209 txctl->dev = sc;
210 hdr = (struct ieee80211_hdr *)skb->data;
211 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
212 fc = hdr->frame_control;
213
214 rt = sc->sc_currates;
215 BUG_ON(!rt);
216
217 /* Fill misc fields */
218
219 spin_lock_bh(&sc->node_lock);
220 txctl->an = ath_node_get(sc, hdr->addr1);
221 /* create a temp node, if the node is not there already */
222 if (!txctl->an)
223 txctl->an = ath_node_attach(sc, hdr->addr1, 0);
224 spin_unlock_bh(&sc->node_lock);
225
226 if (ieee80211_is_data_qos(fc)) {
227 qc = ieee80211_get_qos_ctl(hdr);
228 txctl->tidno = qc[0] & 0xf;
229 }
230
231 txctl->if_id = 0;
232 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
233
234 /* Always try at highest power possible unless the the device
235 * was configured by the user to use another power. */
236 if (likely(sc->sc_config.txpowlimit == ATH_TXPOWER_MAX))
237 txctl->txpower = ATH_TXPOWER_MAX;
238 else
239 txctl->txpower = sc->sc_config.txpowlimit;
240
241 /* Fill Key related fields */
242
243 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
244 txctl->keyix = ATH9K_TXKEYIX_INVALID;
245
246 if (tx_info->control.hw_key) {
247 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
248 txctl->frmlen += tx_info->control.hw_key->icv_len;
249
250 if (tx_info->control.hw_key->alg == ALG_WEP)
251 txctl->keytype = ATH9K_KEY_TYPE_WEP;
252 else if (tx_info->control.hw_key->alg == ALG_TKIP)
253 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
254 else if (tx_info->control.hw_key->alg == ALG_CCMP)
255 txctl->keytype = ATH9K_KEY_TYPE_AES;
256 }
257
258 /* Fill packet type */
259
260 txctl->atype = get_hal_packet_type(hdr);
261
262 /* Fill qnum */
263
264 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
265 txctl->qnum = 0;
266 txq = sc->sc_cabq;
267 } else {
268 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
269 txq = &sc->sc_txq[txctl->qnum];
270 }
271 spin_lock_bh(&txq->axq_lock);
272
273 /* Try to avoid running out of descriptors */
274 if (txq->axq_depth >= (ATH_TXBUF - 20) &&
275 !(txctl->flags & ATH9K_TXDESC_CAB)) {
276 DPRINTF(sc, ATH_DBG_FATAL,
277 "%s: TX queue: %d is full, depth: %d\n",
278 __func__,
279 txctl->qnum,
280 txq->axq_depth);
281 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
282 txq->stopped = 1;
283 spin_unlock_bh(&txq->axq_lock);
284 return -1;
285 }
286
287 spin_unlock_bh(&txq->axq_lock);
288
289 /* Fill rate */
290
291 fill_min_rates(skb, txctl);
292
293 /* Fill flags */
294
295 txctl->flags |= ATH9K_TXDESC_CLRDMASK /* needed for crypto errors */
296 | ATH9K_TXDESC_INTREQ; /* Generate an interrupt */
297
298 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
299 txctl->flags |= ATH9K_TXDESC_NOACK;
300
301 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
302 txctl->flags |= ATH9K_TXDESC_RTSENA;
303
304 /*
305 * Setup for rate calculations.
306 */
307
308 /* XXX: HACK! */
309 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
310 rcs = tx_info_priv->rcs;
311
312 if (ieee80211_is_data(fc) && !txctl->use_minrate) {
313
314 /* Enable HT only for DATA frames and not for EAPOL */
315 /* XXX why AMPDU only?? */
316 txctl->ht = (hw->conf.ht.enabled &&
317 (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
318
319 if (is_multicast_ether_addr(hdr->addr1)) {
320 rcs[0].rix = (u8)
321 ath_tx_findindex(rt, txctl->mcast_rate);
322
323 /*
324 * mcast packets are not re-tried.
325 */
326 rcs[0].tries = 1;
327 }
328 /* For HT capable stations, we save tidno for later use.
329 * We also override seqno set by upper layer with the one
330 * in tx aggregation state.
331 *
332 * First, the fragmentation stat is determined.
333 * If fragmentation is on, the sequence number is
334 * not overridden, since it has been
335 * incremented by the fragmentation routine.
336 */
337 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
338 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
339 struct ath_atx_tid *tid;
340
341 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
342
343 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
344 IEEE80211_SEQ_SEQ_SHIFT);
345 txctl->seqno = tid->seq_next;
346 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
347 }
348 } else {
349 /* for management and control frames,
350 * or for NULL and EAPOL frames */
351 if (txctl->min_rate)
352 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
353 else
354 rcs[0].rix = 0;
355 rcs[0].tries = ATH_MGT_TXMAXTRY;
356 }
357 rix = rcs[0].rix;
358
359 if (ieee80211_has_morefrags(fc) ||
360 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
361 /*
362 ** Force hardware to use computed duration for next
363 ** fragment by disabling multi-rate retry, which
364 ** updates duration based on the multi-rate
365 ** duration table.
366 */
367 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
368 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
369 /* reset tries but keep rate index */
370 rcs[0].tries = ATH_TXMAXTRY;
371 }
372
373 if (is_multicast_ether_addr(hdr->addr1)) {
374 antenna = sc->sc_mcastantenna + 1;
375 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
376 }
377
378 return 0;
379 }
380
381 /* To complete a chain of buffers associated a frame */
382
383 static void ath_tx_complete_buf(struct ath_softc *sc,
384 struct ath_buf *bf,
385 struct list_head *bf_q,
386 int txok, int sendbar)
387 {
388 struct sk_buff *skb = bf->bf_mpdu;
389 struct ath_xmit_status tx_status;
390
391 /*
392 * Set retry information.
393 * NB: Don't use the information in the descriptor, because the frame
394 * could be software retried.
395 */
396 tx_status.retries = bf->bf_retries;
397 tx_status.flags = 0;
398
399 if (sendbar)
400 tx_status.flags = ATH_TX_BAR;
401
402 if (!txok) {
403 tx_status.flags |= ATH_TX_ERROR;
404
405 if (bf_isxretried(bf))
406 tx_status.flags |= ATH_TX_XRETRY;
407 }
408 /* Unmap this frame */
409 pci_unmap_single(sc->pdev,
410 bf->bf_dmacontext,
411 skb->len,
412 PCI_DMA_TODEVICE);
413 /* complete this frame */
414 ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
415
416 /*
417 * Return the list of ath_buf of this mpdu to free queue
418 */
419 spin_lock_bh(&sc->sc_txbuflock);
420 list_splice_tail_init(bf_q, &sc->sc_txbuf);
421 spin_unlock_bh(&sc->sc_txbuflock);
422 }
423
424 /*
425 * queue up a dest/ac pair for tx scheduling
426 * NB: must be called with txq lock held
427 */
428
429 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
430 {
431 struct ath_atx_ac *ac = tid->ac;
432
433 /*
434 * if tid is paused, hold off
435 */
436 if (tid->paused)
437 return;
438
439 /*
440 * add tid to ac atmost once
441 */
442 if (tid->sched)
443 return;
444
445 tid->sched = true;
446 list_add_tail(&tid->list, &ac->tid_q);
447
448 /*
449 * add node ac to txq atmost once
450 */
451 if (ac->sched)
452 return;
453
454 ac->sched = true;
455 list_add_tail(&ac->list, &txq->axq_acq);
456 }
457
458 /* pause a tid */
459
460 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
461 {
462 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
463
464 spin_lock_bh(&txq->axq_lock);
465
466 tid->paused++;
467
468 spin_unlock_bh(&txq->axq_lock);
469 }
470
471 /* resume a tid and schedule aggregate */
472
473 void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
474 {
475 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
476
477 ASSERT(tid->paused > 0);
478 spin_lock_bh(&txq->axq_lock);
479
480 tid->paused--;
481
482 if (tid->paused > 0)
483 goto unlock;
484
485 if (list_empty(&tid->buf_q))
486 goto unlock;
487
488 /*
489 * Add this TID to scheduler and try to send out aggregates
490 */
491 ath_tx_queue_tid(txq, tid);
492 ath_txq_schedule(sc, txq);
493 unlock:
494 spin_unlock_bh(&txq->axq_lock);
495 }
496
497 /* Compute the number of bad frames */
498
499 static int ath_tx_num_badfrms(struct ath_softc *sc,
500 struct ath_buf *bf, int txok)
501 {
502 struct ath_node *an = bf->bf_node;
503 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
504 struct ath_buf *bf_last = bf->bf_lastbf;
505 struct ath_desc *ds = bf_last->bf_desc;
506 u16 seq_st = 0;
507 u32 ba[WME_BA_BMP_SIZE >> 5];
508 int ba_index;
509 int nbad = 0;
510 int isaggr = 0;
511
512 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
513 return 0;
514
515 isaggr = bf_isaggr(bf);
516 if (isaggr) {
517 seq_st = ATH_DS_BA_SEQ(ds);
518 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
519 }
520
521 while (bf) {
522 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
523 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
524 nbad++;
525
526 bf = bf->bf_next;
527 }
528
529 return nbad;
530 }
531
532 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
533 {
534 struct sk_buff *skb;
535 struct ieee80211_hdr *hdr;
536
537 bf->bf_state.bf_type |= BUF_RETRY;
538 bf->bf_retries++;
539
540 skb = bf->bf_mpdu;
541 hdr = (struct ieee80211_hdr *)skb->data;
542 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
543 }
544
545 /* Update block ack window */
546
547 static void ath_tx_update_baw(struct ath_softc *sc,
548 struct ath_atx_tid *tid, int seqno)
549 {
550 int index, cindex;
551
552 index = ATH_BA_INDEX(tid->seq_start, seqno);
553 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
554
555 tid->tx_buf[cindex] = NULL;
556
557 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
558 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
559 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
560 }
561 }
562
563 /*
564 * ath_pkt_dur - compute packet duration (NB: not NAV)
565 *
566 * rix - rate index
567 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
568 * width - 0 for 20 MHz, 1 for 40 MHz
569 * half_gi - to use 4us v/s 3.6 us for symbol time
570 */
571
572 static u32 ath_pkt_duration(struct ath_softc *sc,
573 u8 rix,
574 struct ath_buf *bf,
575 int width,
576 int half_gi,
577 bool shortPreamble)
578 {
579 const struct ath9k_rate_table *rt = sc->sc_currates;
580 u32 nbits, nsymbits, duration, nsymbols;
581 u8 rc;
582 int streams, pktlen;
583
584 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
585 rc = rt->info[rix].rateCode;
586
587 /*
588 * for legacy rates, use old function to compute packet duration
589 */
590 if (!IS_HT_RATE(rc))
591 return ath9k_hw_computetxtime(sc->sc_ah,
592 rt,
593 pktlen,
594 rix,
595 shortPreamble);
596 /*
597 * find number of symbols: PLCP + data
598 */
599 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
600 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
601 nsymbols = (nbits + nsymbits - 1) / nsymbits;
602
603 if (!half_gi)
604 duration = SYMBOL_TIME(nsymbols);
605 else
606 duration = SYMBOL_TIME_HALFGI(nsymbols);
607
608 /*
609 * addup duration for legacy/ht training and signal fields
610 */
611 streams = HT_RC_2_STREAMS(rc);
612 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
613 return duration;
614 }
615
616 /* Rate module function to set rate related fields in tx descriptor */
617
618 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
619 {
620 struct ath_hal *ah = sc->sc_ah;
621 const struct ath9k_rate_table *rt;
622 struct ath_desc *ds = bf->bf_desc;
623 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
624 struct ath9k_11n_rate_series series[4];
625 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
626 u32 ctsduration = 0;
627 u8 rix = 0, cix, ctsrate = 0;
628 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
629 struct ath_node *an = (struct ath_node *) bf->bf_node;
630
631 /*
632 * get the cix for the lowest valid rix.
633 */
634 rt = sc->sc_currates;
635 for (i = 4; i--;) {
636 if (bf->bf_rcs[i].tries) {
637 rix = bf->bf_rcs[i].rix;
638 break;
639 }
640 }
641 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
642 cix = rt->info[rix].controlRate;
643
644 /*
645 * If 802.11g protection is enabled, determine whether
646 * to use RTS/CTS or just CTS. Note that this is only
647 * done for OFDM/HT unicast frames.
648 */
649 if (sc->sc_protmode != PROT_M_NONE &&
650 (rt->info[rix].phy == PHY_OFDM ||
651 rt->info[rix].phy == PHY_HT) &&
652 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
653 if (sc->sc_protmode == PROT_M_RTSCTS)
654 flags = ATH9K_TXDESC_RTSENA;
655 else if (sc->sc_protmode == PROT_M_CTSONLY)
656 flags = ATH9K_TXDESC_CTSENA;
657
658 cix = rt->info[sc->sc_protrix].controlRate;
659 rtsctsena = 1;
660 }
661
662 /* For 11n, the default behavior is to enable RTS for
663 * hw retried frames. We enable the global flag here and
664 * let rate series flags determine which rates will actually
665 * use RTS.
666 */
667 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
668 BUG_ON(!an);
669 /*
670 * 802.11g protection not needed, use our default behavior
671 */
672 if (!rtsctsena)
673 flags = ATH9K_TXDESC_RTSENA;
674 /*
675 * For dynamic MIMO PS, RTS needs to precede the first aggregate
676 * and the second aggregate should have any protection at all.
677 */
678 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
679 if (!bf_isaggrburst(bf)) {
680 flags = ATH9K_TXDESC_RTSENA;
681 dynamic_mimops = 1;
682 } else {
683 flags = 0;
684 }
685 }
686 }
687
688 /*
689 * Set protection if aggregate protection on
690 */
691 if (sc->sc_config.ath_aggr_prot &&
692 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
693 flags = ATH9K_TXDESC_RTSENA;
694 cix = rt->info[sc->sc_protrix].controlRate;
695 rtsctsena = 1;
696 }
697
698 /*
699 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
700 */
701 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
702 /*
703 * Ensure that in the case of SM Dynamic power save
704 * while we are bursting the second aggregate the
705 * RTS is cleared.
706 */
707 flags &= ~(ATH9K_TXDESC_RTSENA);
708 }
709
710 /*
711 * CTS transmit rate is derived from the transmit rate
712 * by looking in the h/w rate table. We must also factor
713 * in whether or not a short preamble is to be used.
714 */
715 /* NB: cix is set above where RTS/CTS is enabled */
716 BUG_ON(cix == 0xff);
717 ctsrate = rt->info[cix].rateCode |
718 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
719
720 /*
721 * Setup HAL rate series
722 */
723 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
724
725 for (i = 0; i < 4; i++) {
726 if (!bf->bf_rcs[i].tries)
727 continue;
728
729 rix = bf->bf_rcs[i].rix;
730
731 series[i].Rate = rt->info[rix].rateCode |
732 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
733
734 series[i].Tries = bf->bf_rcs[i].tries;
735
736 series[i].RateFlags = (
737 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
738 ATH9K_RATESERIES_RTS_CTS : 0) |
739 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
740 ATH9K_RATESERIES_2040 : 0) |
741 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
742 ATH9K_RATESERIES_HALFGI : 0);
743
744 series[i].PktDuration = ath_pkt_duration(
745 sc, rix, bf,
746 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
747 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
748 bf_isshpreamble(bf));
749
750 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
751 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
752 /*
753 * When sending to an HT node that has enabled static
754 * SM/MIMO power save, send at single stream rates but
755 * use maximum allowed transmit chains per user,
756 * hardware, regulatory, or country limits for
757 * better range.
758 */
759 series[i].ChSel = sc->sc_tx_chainmask;
760 } else {
761 if (bf_isht(bf))
762 series[i].ChSel =
763 ath_chainmask_sel_logic(sc, an);
764 else
765 series[i].ChSel = sc->sc_tx_chainmask;
766 }
767
768 if (rtsctsena)
769 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
770
771 /*
772 * Set RTS for all rates if node is in dynamic powersave
773 * mode and we are using dual stream rates.
774 */
775 if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
776 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
777 }
778
779 /*
780 * For non-HT devices, calculate RTS/CTS duration in software
781 * and disable multi-rate retry.
782 */
783 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
784 /*
785 * Compute the transmit duration based on the frame
786 * size and the size of an ACK frame. We call into the
787 * HAL to do the computation since it depends on the
788 * characteristics of the actual PHY being used.
789 *
790 * NB: CTS is assumed the same size as an ACK so we can
791 * use the precalculated ACK durations.
792 */
793 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
794 ctsduration += bf_isshpreamble(bf) ?
795 rt->info[cix].spAckDuration :
796 rt->info[cix].lpAckDuration;
797 }
798
799 ctsduration += series[0].PktDuration;
800
801 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
802 ctsduration += bf_isshpreamble(bf) ?
803 rt->info[rix].spAckDuration :
804 rt->info[rix].lpAckDuration;
805 }
806
807 /*
808 * Disable multi-rate retry when using RTS/CTS by clearing
809 * series 1, 2 and 3.
810 */
811 memset(&series[1], 0, sizeof(struct ath9k_11n_rate_series) * 3);
812 }
813
814 /*
815 * set dur_update_en for l-sig computation except for PS-Poll frames
816 */
817 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
818 !bf_ispspoll(bf),
819 ctsrate,
820 ctsduration,
821 series, 4, flags);
822 if (sc->sc_config.ath_aggr_prot && flags)
823 ath9k_hw_set11n_burstduration(ah, ds, 8192);
824 }
825
826 /*
827 * Function to send a normal HT (non-AMPDU) frame
828 * NB: must be called with txq lock held
829 */
830
831 static int ath_tx_send_normal(struct ath_softc *sc,
832 struct ath_txq *txq,
833 struct ath_atx_tid *tid,
834 struct list_head *bf_head)
835 {
836 struct ath_buf *bf;
837 struct sk_buff *skb;
838 struct ieee80211_tx_info *tx_info;
839 struct ath_tx_info_priv *tx_info_priv;
840
841 BUG_ON(list_empty(bf_head));
842
843 bf = list_first_entry(bf_head, struct ath_buf, list);
844 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
845
846 skb = (struct sk_buff *)bf->bf_mpdu;
847 tx_info = IEEE80211_SKB_CB(skb);
848
849 /* XXX: HACK! */
850 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
851 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
852
853 /* update starting sequence number for subsequent ADDBA request */
854 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
855
856 /* Queue to h/w without aggregation */
857 bf->bf_nframes = 1;
858 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
859 ath_buf_set_rate(sc, bf);
860 ath_tx_txqaddbuf(sc, txq, bf_head);
861
862 return 0;
863 }
864
865 /* flush tid's software queue and send frames as non-ampdu's */
866
867 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
868 {
869 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
870 struct ath_buf *bf;
871 struct list_head bf_head;
872 INIT_LIST_HEAD(&bf_head);
873
874 ASSERT(tid->paused > 0);
875 spin_lock_bh(&txq->axq_lock);
876
877 tid->paused--;
878
879 if (tid->paused > 0) {
880 spin_unlock_bh(&txq->axq_lock);
881 return;
882 }
883
884 while (!list_empty(&tid->buf_q)) {
885 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
886 ASSERT(!bf_isretried(bf));
887 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
888 ath_tx_send_normal(sc, txq, tid, &bf_head);
889 }
890
891 spin_unlock_bh(&txq->axq_lock);
892 }
893
894 /* Completion routine of an aggregate */
895
896 static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
897 struct ath_txq *txq,
898 struct ath_buf *bf,
899 struct list_head *bf_q,
900 int txok)
901 {
902 struct ath_node *an = bf->bf_node;
903 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
904 struct ath_buf *bf_last = bf->bf_lastbf;
905 struct ath_desc *ds = bf_last->bf_desc;
906 struct ath_buf *bf_next, *bf_lastq = NULL;
907 struct list_head bf_head, bf_pending;
908 u16 seq_st = 0;
909 u32 ba[WME_BA_BMP_SIZE >> 5];
910 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
911 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
912
913 isaggr = bf_isaggr(bf);
914 if (isaggr) {
915 if (txok) {
916 if (ATH_DS_TX_BA(ds)) {
917 /*
918 * extract starting sequence and
919 * block-ack bitmap
920 */
921 seq_st = ATH_DS_BA_SEQ(ds);
922 memcpy(ba,
923 ATH_DS_BA_BITMAP(ds),
924 WME_BA_BMP_SIZE >> 3);
925 } else {
926 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
927
928 /*
929 * AR5416 can become deaf/mute when BA
930 * issue happens. Chip needs to be reset.
931 * But AP code may have sychronization issues
932 * when perform internal reset in this routine.
933 * Only enable reset in STA mode for now.
934 */
935 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
936 needreset = 1;
937 }
938 } else {
939 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
940 }
941 }
942
943 INIT_LIST_HEAD(&bf_pending);
944 INIT_LIST_HEAD(&bf_head);
945
946 while (bf) {
947 txfail = txpending = 0;
948 bf_next = bf->bf_next;
949
950 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
951 /* transmit completion, subframe is
952 * acked by block ack */
953 } else if (!isaggr && txok) {
954 /* transmit completion */
955 } else {
956
957 if (!tid->cleanup_inprogress && !isnodegone &&
958 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
959 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
960 ath_tx_set_retry(sc, bf);
961 txpending = 1;
962 } else {
963 bf->bf_state.bf_type |= BUF_XRETRY;
964 txfail = 1;
965 sendbar = 1;
966 }
967 } else {
968 /*
969 * cleanup in progress, just fail
970 * the un-acked sub-frames
971 */
972 txfail = 1;
973 }
974 }
975 /*
976 * Remove ath_buf's of this sub-frame from aggregate queue.
977 */
978 if (bf_next == NULL) { /* last subframe in the aggregate */
979 ASSERT(bf->bf_lastfrm == bf_last);
980
981 /*
982 * The last descriptor of the last sub frame could be
983 * a holding descriptor for h/w. If that's the case,
984 * bf->bf_lastfrm won't be in the bf_q.
985 * Make sure we handle bf_q properly here.
986 */
987
988 if (!list_empty(bf_q)) {
989 bf_lastq = list_entry(bf_q->prev,
990 struct ath_buf, list);
991 list_cut_position(&bf_head,
992 bf_q, &bf_lastq->list);
993 } else {
994 /*
995 * XXX: if the last subframe only has one
996 * descriptor which is also being used as
997 * a holding descriptor. Then the ath_buf
998 * is not in the bf_q at all.
999 */
1000 INIT_LIST_HEAD(&bf_head);
1001 }
1002 } else {
1003 ASSERT(!list_empty(bf_q));
1004 list_cut_position(&bf_head,
1005 bf_q, &bf->bf_lastfrm->list);
1006 }
1007
1008 if (!txpending) {
1009 /*
1010 * complete the acked-ones/xretried ones; update
1011 * block-ack window
1012 */
1013 spin_lock_bh(&txq->axq_lock);
1014 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1015 spin_unlock_bh(&txq->axq_lock);
1016
1017 /* complete this sub-frame */
1018 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
1019 } else {
1020 /*
1021 * retry the un-acked ones
1022 */
1023 /*
1024 * XXX: if the last descriptor is holding descriptor,
1025 * in order to requeue the frame to software queue, we
1026 * need to allocate a new descriptor and
1027 * copy the content of holding descriptor to it.
1028 */
1029 if (bf->bf_next == NULL &&
1030 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
1031 struct ath_buf *tbf;
1032
1033 /* allocate new descriptor */
1034 spin_lock_bh(&sc->sc_txbuflock);
1035 ASSERT(!list_empty((&sc->sc_txbuf)));
1036 tbf = list_first_entry(&sc->sc_txbuf,
1037 struct ath_buf, list);
1038 list_del(&tbf->list);
1039 spin_unlock_bh(&sc->sc_txbuflock);
1040
1041 ATH_TXBUF_RESET(tbf);
1042
1043 /* copy descriptor content */
1044 tbf->bf_mpdu = bf_last->bf_mpdu;
1045 tbf->bf_node = bf_last->bf_node;
1046 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1047 *(tbf->bf_desc) = *(bf_last->bf_desc);
1048
1049 /* link it to the frame */
1050 if (bf_lastq) {
1051 bf_lastq->bf_desc->ds_link =
1052 tbf->bf_daddr;
1053 bf->bf_lastfrm = tbf;
1054 ath9k_hw_cleartxdesc(sc->sc_ah,
1055 bf->bf_lastfrm->bf_desc);
1056 } else {
1057 tbf->bf_state = bf_last->bf_state;
1058 tbf->bf_lastfrm = tbf;
1059 ath9k_hw_cleartxdesc(sc->sc_ah,
1060 tbf->bf_lastfrm->bf_desc);
1061
1062 /* copy the DMA context */
1063 tbf->bf_dmacontext =
1064 bf_last->bf_dmacontext;
1065 }
1066 list_add_tail(&tbf->list, &bf_head);
1067 } else {
1068 /*
1069 * Clear descriptor status words for
1070 * software retry
1071 */
1072 ath9k_hw_cleartxdesc(sc->sc_ah,
1073 bf->bf_lastfrm->bf_desc);
1074 }
1075
1076 /*
1077 * Put this buffer to the temporary pending
1078 * queue to retain ordering
1079 */
1080 list_splice_tail_init(&bf_head, &bf_pending);
1081 }
1082
1083 bf = bf_next;
1084 }
1085
1086 /*
1087 * node is already gone. no more assocication
1088 * with the node. the node might have been freed
1089 * any node acces can result in panic.note tid
1090 * is part of the node.
1091 */
1092 if (isnodegone)
1093 return;
1094
1095 if (tid->cleanup_inprogress) {
1096 /* check to see if we're done with cleaning the h/w queue */
1097 spin_lock_bh(&txq->axq_lock);
1098
1099 if (tid->baw_head == tid->baw_tail) {
1100 tid->addba_exchangecomplete = 0;
1101 tid->addba_exchangeattempts = 0;
1102 spin_unlock_bh(&txq->axq_lock);
1103
1104 tid->cleanup_inprogress = false;
1105
1106 /* send buffered frames as singles */
1107 ath_tx_flush_tid(sc, tid);
1108 } else
1109 spin_unlock_bh(&txq->axq_lock);
1110
1111 return;
1112 }
1113
1114 /*
1115 * prepend un-acked frames to the beginning of the pending frame queue
1116 */
1117 if (!list_empty(&bf_pending)) {
1118 spin_lock_bh(&txq->axq_lock);
1119 /* Note: we _prepend_, we _do_not_ at to
1120 * the end of the queue ! */
1121 list_splice(&bf_pending, &tid->buf_q);
1122 ath_tx_queue_tid(txq, tid);
1123 spin_unlock_bh(&txq->axq_lock);
1124 }
1125
1126 if (needreset)
1127 ath_reset(sc, false);
1128
1129 return;
1130 }
1131
1132 /* Process completed xmit descriptors from the specified queue */
1133
1134 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1135 {
1136 struct ath_hal *ah = sc->sc_ah;
1137 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1138 struct list_head bf_head;
1139 struct ath_desc *ds, *tmp_ds;
1140 struct sk_buff *skb;
1141 struct ieee80211_tx_info *tx_info;
1142 struct ath_tx_info_priv *tx_info_priv;
1143 int nacked, txok, nbad = 0, isrifs = 0;
1144 int status;
1145
1146 DPRINTF(sc, ATH_DBG_QUEUE,
1147 "%s: tx queue %d (%x), link %p\n", __func__,
1148 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1149 txq->axq_link);
1150
1151 nacked = 0;
1152 for (;;) {
1153 spin_lock_bh(&txq->axq_lock);
1154 if (list_empty(&txq->axq_q)) {
1155 txq->axq_link = NULL;
1156 txq->axq_linkbuf = NULL;
1157 spin_unlock_bh(&txq->axq_lock);
1158 break;
1159 }
1160 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1161
1162 /*
1163 * There is a race condition that a BH gets scheduled
1164 * after sw writes TxE and before hw re-load the last
1165 * descriptor to get the newly chained one.
1166 * Software must keep the last DONE descriptor as a
1167 * holding descriptor - software does so by marking
1168 * it with the STALE flag.
1169 */
1170 bf_held = NULL;
1171 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1172 bf_held = bf;
1173 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1174 /* FIXME:
1175 * The holding descriptor is the last
1176 * descriptor in queue. It's safe to remove
1177 * the last holding descriptor in BH context.
1178 */
1179 spin_unlock_bh(&txq->axq_lock);
1180 break;
1181 } else {
1182 /* Lets work with the next buffer now */
1183 bf = list_entry(bf_held->list.next,
1184 struct ath_buf, list);
1185 }
1186 }
1187
1188 lastbf = bf->bf_lastbf;
1189 ds = lastbf->bf_desc; /* NB: last decriptor */
1190
1191 status = ath9k_hw_txprocdesc(ah, ds);
1192 if (status == -EINPROGRESS) {
1193 spin_unlock_bh(&txq->axq_lock);
1194 break;
1195 }
1196 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1197 txq->axq_lastdsWithCTS = NULL;
1198 if (ds == txq->axq_gatingds)
1199 txq->axq_gatingds = NULL;
1200
1201 /*
1202 * Remove ath_buf's of the same transmit unit from txq,
1203 * however leave the last descriptor back as the holding
1204 * descriptor for hw.
1205 */
1206 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1207 INIT_LIST_HEAD(&bf_head);
1208
1209 if (!list_is_singular(&lastbf->list))
1210 list_cut_position(&bf_head,
1211 &txq->axq_q, lastbf->list.prev);
1212
1213 txq->axq_depth--;
1214
1215 if (bf_isaggr(bf))
1216 txq->axq_aggr_depth--;
1217
1218 txok = (ds->ds_txstat.ts_status == 0);
1219
1220 spin_unlock_bh(&txq->axq_lock);
1221
1222 if (bf_held) {
1223 list_del(&bf_held->list);
1224 spin_lock_bh(&sc->sc_txbuflock);
1225 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1226 spin_unlock_bh(&sc->sc_txbuflock);
1227 }
1228
1229 if (!bf_isampdu(bf)) {
1230 /*
1231 * This frame is sent out as a single frame.
1232 * Use hardware retry status for this frame.
1233 */
1234 bf->bf_retries = ds->ds_txstat.ts_longretry;
1235 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1236 bf->bf_state.bf_type |= BUF_XRETRY;
1237 nbad = 0;
1238 } else {
1239 nbad = ath_tx_num_badfrms(sc, bf, txok);
1240 }
1241 skb = bf->bf_mpdu;
1242 tx_info = IEEE80211_SKB_CB(skb);
1243
1244 /* XXX: HACK! */
1245 tx_info_priv = (struct ath_tx_info_priv *) tx_info->control.vif;
1246 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1247 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1248 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1249 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1250 if (ds->ds_txstat.ts_status == 0)
1251 nacked++;
1252
1253 if (bf_isdata(bf)) {
1254 if (isrifs)
1255 tmp_ds = bf->bf_rifslast->bf_desc;
1256 else
1257 tmp_ds = ds;
1258 memcpy(&tx_info_priv->tx,
1259 &tmp_ds->ds_txstat,
1260 sizeof(tx_info_priv->tx));
1261 tx_info_priv->n_frames = bf->bf_nframes;
1262 tx_info_priv->n_bad_frames = nbad;
1263 }
1264 }
1265
1266 /*
1267 * Complete this transmit unit
1268 */
1269 if (bf_isampdu(bf))
1270 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1271 else
1272 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1273
1274 /* Wake up mac80211 queue */
1275
1276 spin_lock_bh(&txq->axq_lock);
1277 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1278 (ATH_TXBUF - 20)) {
1279 int qnum;
1280 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1281 if (qnum != -1) {
1282 ieee80211_wake_queue(sc->hw, qnum);
1283 txq->stopped = 0;
1284 }
1285
1286 }
1287
1288 /*
1289 * schedule any pending packets if aggregation is enabled
1290 */
1291 if (sc->sc_flags & SC_OP_TXAGGR)
1292 ath_txq_schedule(sc, txq);
1293 spin_unlock_bh(&txq->axq_lock);
1294 }
1295 return nacked;
1296 }
1297
1298 static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1299 {
1300 struct ath_hal *ah = sc->sc_ah;
1301
1302 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1303 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1304 __func__, txq->axq_qnum,
1305 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1306 }
1307
1308 /* Drain only the data queues */
1309
1310 static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1311 {
1312 struct ath_hal *ah = sc->sc_ah;
1313 int i;
1314 int npend = 0;
1315
1316 /* XXX return value */
1317 if (!(sc->sc_flags & SC_OP_INVALID)) {
1318 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1319 if (ATH_TXQ_SETUP(sc, i)) {
1320 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1321
1322 /* The TxDMA may not really be stopped.
1323 * Double check the hal tx pending count */
1324 npend += ath9k_hw_numtxpending(ah,
1325 sc->sc_txq[i].axq_qnum);
1326 }
1327 }
1328 }
1329
1330 if (npend) {
1331 int status;
1332
1333 /* TxDMA not stopped, reset the hal */
1334 DPRINTF(sc, ATH_DBG_XMIT,
1335 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1336
1337 spin_lock_bh(&sc->sc_resetlock);
1338 if (!ath9k_hw_reset(ah,
1339 sc->sc_ah->ah_curchan,
1340 sc->sc_ht_info.tx_chan_width,
1341 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1342 sc->sc_ht_extprotspacing, true, &status)) {
1343
1344 DPRINTF(sc, ATH_DBG_FATAL,
1345 "%s: unable to reset hardware; hal status %u\n",
1346 __func__,
1347 status);
1348 }
1349 spin_unlock_bh(&sc->sc_resetlock);
1350 }
1351
1352 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1353 if (ATH_TXQ_SETUP(sc, i))
1354 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1355 }
1356 }
1357
1358 /* Add a sub-frame to block ack window */
1359
1360 static void ath_tx_addto_baw(struct ath_softc *sc,
1361 struct ath_atx_tid *tid,
1362 struct ath_buf *bf)
1363 {
1364 int index, cindex;
1365
1366 if (bf_isretried(bf))
1367 return;
1368
1369 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1370 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1371
1372 ASSERT(tid->tx_buf[cindex] == NULL);
1373 tid->tx_buf[cindex] = bf;
1374
1375 if (index >= ((tid->baw_tail - tid->baw_head) &
1376 (ATH_TID_MAX_BUFS - 1))) {
1377 tid->baw_tail = cindex;
1378 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1379 }
1380 }
1381
1382 /*
1383 * Function to send an A-MPDU
1384 * NB: must be called with txq lock held
1385 */
1386
1387 static int ath_tx_send_ampdu(struct ath_softc *sc,
1388 struct ath_txq *txq,
1389 struct ath_atx_tid *tid,
1390 struct list_head *bf_head,
1391 struct ath_tx_control *txctl)
1392 {
1393 struct ath_buf *bf;
1394 struct sk_buff *skb;
1395 struct ieee80211_tx_info *tx_info;
1396 struct ath_tx_info_priv *tx_info_priv;
1397
1398 BUG_ON(list_empty(bf_head));
1399
1400 bf = list_first_entry(bf_head, struct ath_buf, list);
1401 bf->bf_state.bf_type |= BUF_AMPDU;
1402 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1403 bf->bf_tidno = txctl->tidno;
1404
1405 /*
1406 * Do not queue to h/w when any of the following conditions is true:
1407 * - there are pending frames in software queue
1408 * - the TID is currently paused for ADDBA/BAR request
1409 * - seqno is not within block-ack window
1410 * - h/w queue depth exceeds low water mark
1411 */
1412 if (!list_empty(&tid->buf_q) || tid->paused ||
1413 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1414 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1415 /*
1416 * Add this frame to software queue for scheduling later
1417 * for aggregation.
1418 */
1419 list_splice_tail_init(bf_head, &tid->buf_q);
1420 ath_tx_queue_tid(txq, tid);
1421 return 0;
1422 }
1423
1424 skb = (struct sk_buff *)bf->bf_mpdu;
1425 tx_info = IEEE80211_SKB_CB(skb);
1426 /* XXX: HACK! */
1427 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
1428 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1429
1430 /* Add sub-frame to BAW */
1431 ath_tx_addto_baw(sc, tid, bf);
1432
1433 /* Queue to h/w without aggregation */
1434 bf->bf_nframes = 1;
1435 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1436 ath_buf_set_rate(sc, bf);
1437 ath_tx_txqaddbuf(sc, txq, bf_head);
1438 return 0;
1439 }
1440
1441 /*
1442 * looks up the rate
1443 * returns aggr limit based on lowest of the rates
1444 */
1445
1446 static u32 ath_lookup_rate(struct ath_softc *sc,
1447 struct ath_buf *bf,
1448 struct ath_atx_tid *tid)
1449 {
1450 const struct ath9k_rate_table *rt = sc->sc_currates;
1451 struct sk_buff *skb;
1452 struct ieee80211_tx_info *tx_info;
1453 struct ath_tx_info_priv *tx_info_priv;
1454 u32 max_4ms_framelen, frame_length;
1455 u16 aggr_limit, legacy = 0, maxampdu;
1456 int i;
1457
1458
1459 skb = (struct sk_buff *)bf->bf_mpdu;
1460 tx_info = IEEE80211_SKB_CB(skb);
1461 tx_info_priv = (struct ath_tx_info_priv *)
1462 tx_info->control.vif; /* XXX: HACK! */
1463 memcpy(bf->bf_rcs,
1464 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1465
1466 /*
1467 * Find the lowest frame length among the rate series that will have a
1468 * 4ms transmit duration.
1469 * TODO - TXOP limit needs to be considered.
1470 */
1471 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1472
1473 for (i = 0; i < 4; i++) {
1474 if (bf->bf_rcs[i].tries) {
1475 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1476
1477 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1478 legacy = 1;
1479 break;
1480 }
1481
1482 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1483 }
1484 }
1485
1486 /*
1487 * limit aggregate size by the minimum rate if rate selected is
1488 * not a probe rate, if rate selected is a probe rate then
1489 * avoid aggregation of this packet.
1490 */
1491 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1492 return 0;
1493
1494 aggr_limit = min(max_4ms_framelen,
1495 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1496
1497 /*
1498 * h/w can accept aggregates upto 16 bit lengths (65535).
1499 * The IE, however can hold upto 65536, which shows up here
1500 * as zero. Ignore 65536 since we are constrained by hw.
1501 */
1502 maxampdu = tid->an->maxampdu;
1503 if (maxampdu)
1504 aggr_limit = min(aggr_limit, maxampdu);
1505
1506 return aggr_limit;
1507 }
1508
1509 /*
1510 * returns the number of delimiters to be added to
1511 * meet the minimum required mpdudensity.
1512 * caller should make sure that the rate is HT rate .
1513 */
1514
1515 static int ath_compute_num_delims(struct ath_softc *sc,
1516 struct ath_atx_tid *tid,
1517 struct ath_buf *bf,
1518 u16 frmlen)
1519 {
1520 const struct ath9k_rate_table *rt = sc->sc_currates;
1521 u32 nsymbits, nsymbols, mpdudensity;
1522 u16 minlen;
1523 u8 rc, flags, rix;
1524 int width, half_gi, ndelim, mindelim;
1525
1526 /* Select standard number of delimiters based on frame length alone */
1527 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1528
1529 /*
1530 * If encryption enabled, hardware requires some more padding between
1531 * subframes.
1532 * TODO - this could be improved to be dependent on the rate.
1533 * The hardware can keep up at lower rates, but not higher rates
1534 */
1535 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1536 ndelim += ATH_AGGR_ENCRYPTDELIM;
1537
1538 /*
1539 * Convert desired mpdu density from microeconds to bytes based
1540 * on highest rate in rate series (i.e. first rate) to determine
1541 * required minimum length for subframe. Take into account
1542 * whether high rate is 20 or 40Mhz and half or full GI.
1543 */
1544 mpdudensity = tid->an->mpdudensity;
1545
1546 /*
1547 * If there is no mpdu density restriction, no further calculation
1548 * is needed.
1549 */
1550 if (mpdudensity == 0)
1551 return ndelim;
1552
1553 rix = bf->bf_rcs[0].rix;
1554 flags = bf->bf_rcs[0].flags;
1555 rc = rt->info[rix].rateCode;
1556 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1557 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1558
1559 if (half_gi)
1560 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1561 else
1562 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1563
1564 if (nsymbols == 0)
1565 nsymbols = 1;
1566
1567 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1568 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1569
1570 /* Is frame shorter than required minimum length? */
1571 if (frmlen < minlen) {
1572 /* Get the minimum number of delimiters required. */
1573 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1574 ndelim = max(mindelim, ndelim);
1575 }
1576
1577 return ndelim;
1578 }
1579
1580 /*
1581 * For aggregation from software buffer queue.
1582 * NB: must be called with txq lock held
1583 */
1584
1585 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1586 struct ath_atx_tid *tid,
1587 struct list_head *bf_q,
1588 struct ath_buf **bf_last,
1589 struct aggr_rifs_param *param,
1590 int *prev_frames)
1591 {
1592 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1593 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1594 struct list_head bf_head;
1595 int rl = 0, nframes = 0, ndelim;
1596 u16 aggr_limit = 0, al = 0, bpad = 0,
1597 al_delta, h_baw = tid->baw_size / 2;
1598 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1599 int prev_al = 0, is_ds_rate = 0;
1600 INIT_LIST_HEAD(&bf_head);
1601
1602 BUG_ON(list_empty(&tid->buf_q));
1603
1604 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1605
1606 do {
1607 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1608
1609 /*
1610 * do not step over block-ack window
1611 */
1612 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1613 status = ATH_AGGR_BAW_CLOSED;
1614 break;
1615 }
1616
1617 if (!rl) {
1618 aggr_limit = ath_lookup_rate(sc, bf, tid);
1619 rl = 1;
1620 /*
1621 * Is rate dual stream
1622 */
1623 is_ds_rate =
1624 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1625 }
1626
1627 /*
1628 * do not exceed aggregation limit
1629 */
1630 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1631
1632 if (nframes && (aggr_limit <
1633 (al + bpad + al_delta + prev_al))) {
1634 status = ATH_AGGR_LIMITED;
1635 break;
1636 }
1637
1638 /*
1639 * do not exceed subframe limit
1640 */
1641 if ((nframes + *prev_frames) >=
1642 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1643 status = ATH_AGGR_LIMITED;
1644 break;
1645 }
1646
1647 /*
1648 * add padding for previous frame to aggregation length
1649 */
1650 al += bpad + al_delta;
1651
1652 /*
1653 * Get the delimiters needed to meet the MPDU
1654 * density for this node.
1655 */
1656 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
1657
1658 bpad = PADBYTES(al_delta) + (ndelim << 2);
1659
1660 bf->bf_next = NULL;
1661 bf->bf_lastfrm->bf_desc->ds_link = 0;
1662
1663 /*
1664 * this packet is part of an aggregate
1665 * - remove all descriptors belonging to this frame from
1666 * software queue
1667 * - add it to block ack window
1668 * - set up descriptors for aggregation
1669 */
1670 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1671 ath_tx_addto_baw(sc, tid, bf);
1672
1673 list_for_each_entry(tbf, &bf_head, list) {
1674 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1675 tbf->bf_desc, ndelim);
1676 }
1677
1678 /*
1679 * link buffers of this frame to the aggregate
1680 */
1681 list_splice_tail_init(&bf_head, bf_q);
1682 nframes++;
1683
1684 if (bf_prev) {
1685 bf_prev->bf_next = bf;
1686 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1687 }
1688 bf_prev = bf;
1689
1690 #ifdef AGGR_NOSHORT
1691 /*
1692 * terminate aggregation on a small packet boundary
1693 */
1694 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1695 status = ATH_AGGR_SHORTPKT;
1696 break;
1697 }
1698 #endif
1699 } while (!list_empty(&tid->buf_q));
1700
1701 bf_first->bf_al = al;
1702 bf_first->bf_nframes = nframes;
1703 *bf_last = bf_prev;
1704 return status;
1705 #undef PADBYTES
1706 }
1707
1708 /*
1709 * process pending frames possibly doing a-mpdu aggregation
1710 * NB: must be called with txq lock held
1711 */
1712
1713 static void ath_tx_sched_aggr(struct ath_softc *sc,
1714 struct ath_txq *txq, struct ath_atx_tid *tid)
1715 {
1716 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1717 enum ATH_AGGR_STATUS status;
1718 struct list_head bf_q;
1719 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1720 int prev_frames = 0;
1721
1722 do {
1723 if (list_empty(&tid->buf_q))
1724 return;
1725
1726 INIT_LIST_HEAD(&bf_q);
1727
1728 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1729 &prev_frames);
1730
1731 /*
1732 * no frames picked up to be aggregated; block-ack
1733 * window is not open
1734 */
1735 if (list_empty(&bf_q))
1736 break;
1737
1738 bf = list_first_entry(&bf_q, struct ath_buf, list);
1739 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1740 bf->bf_lastbf = bf_last;
1741
1742 /*
1743 * if only one frame, send as non-aggregate
1744 */
1745 if (bf->bf_nframes == 1) {
1746 ASSERT(bf->bf_lastfrm == bf_last);
1747
1748 bf->bf_state.bf_type &= ~BUF_AGGR;
1749 /*
1750 * clear aggr bits for every descriptor
1751 * XXX TODO: is there a way to optimize it?
1752 */
1753 list_for_each_entry(tbf, &bf_q, list) {
1754 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1755 }
1756
1757 ath_buf_set_rate(sc, bf);
1758 ath_tx_txqaddbuf(sc, txq, &bf_q);
1759 continue;
1760 }
1761
1762 /*
1763 * setup first desc with rate and aggr info
1764 */
1765 bf->bf_state.bf_type |= BUF_AGGR;
1766 ath_buf_set_rate(sc, bf);
1767 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1768
1769 /*
1770 * anchor last frame of aggregate correctly
1771 */
1772 ASSERT(bf_lastaggr);
1773 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1774 tbf = bf_lastaggr;
1775 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1776
1777 /* XXX: We don't enter into this loop, consider removing this */
1778 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1779 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1780 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1781 }
1782
1783 txq->axq_aggr_depth++;
1784
1785 /*
1786 * Normal aggregate, queue to hardware
1787 */
1788 ath_tx_txqaddbuf(sc, txq, &bf_q);
1789
1790 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1791 status != ATH_AGGR_BAW_CLOSED);
1792 }
1793
1794 /* Called with txq lock held */
1795
1796 static void ath_tid_drain(struct ath_softc *sc,
1797 struct ath_txq *txq,
1798 struct ath_atx_tid *tid,
1799 bool bh_flag)
1800 {
1801 struct ath_buf *bf;
1802 struct list_head bf_head;
1803 INIT_LIST_HEAD(&bf_head);
1804
1805 for (;;) {
1806 if (list_empty(&tid->buf_q))
1807 break;
1808 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1809
1810 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1811
1812 /* update baw for software retried frame */
1813 if (bf_isretried(bf))
1814 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1815
1816 /*
1817 * do not indicate packets while holding txq spinlock.
1818 * unlock is intentional here
1819 */
1820 if (likely(bh_flag))
1821 spin_unlock_bh(&txq->axq_lock);
1822 else
1823 spin_unlock(&txq->axq_lock);
1824
1825 /* complete this sub-frame */
1826 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1827
1828 if (likely(bh_flag))
1829 spin_lock_bh(&txq->axq_lock);
1830 else
1831 spin_lock(&txq->axq_lock);
1832 }
1833
1834 /*
1835 * TODO: For frame(s) that are in the retry state, we will reuse the
1836 * sequence number(s) without setting the retry bit. The
1837 * alternative is to give up on these and BAR the receiver's window
1838 * forward.
1839 */
1840 tid->seq_next = tid->seq_start;
1841 tid->baw_tail = tid->baw_head;
1842 }
1843
1844 /*
1845 * Drain all pending buffers
1846 * NB: must be called with txq lock held
1847 */
1848
1849 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1850 struct ath_txq *txq,
1851 bool bh_flag)
1852 {
1853 struct ath_atx_ac *ac, *ac_tmp;
1854 struct ath_atx_tid *tid, *tid_tmp;
1855
1856 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1857 list_del(&ac->list);
1858 ac->sched = false;
1859 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1860 list_del(&tid->list);
1861 tid->sched = false;
1862 ath_tid_drain(sc, txq, tid, bh_flag);
1863 }
1864 }
1865 }
1866
1867 static int ath_tx_start_dma(struct ath_softc *sc,
1868 struct sk_buff *skb,
1869 struct scatterlist *sg,
1870 u32 n_sg,
1871 struct ath_tx_control *txctl)
1872 {
1873 struct ath_node *an = txctl->an;
1874 struct ath_buf *bf = NULL;
1875 struct list_head bf_head;
1876 struct ath_desc *ds;
1877 struct ath_hal *ah = sc->sc_ah;
1878 struct ath_txq *txq;
1879 struct ath_tx_info_priv *tx_info_priv;
1880 struct ath_rc_series *rcs;
1881 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1882 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1883 __le16 fc = hdr->frame_control;
1884
1885 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
1886 txq = sc->sc_cabq;
1887 else
1888 txq = &sc->sc_txq[txctl->qnum];
1889
1890 /* For each sglist entry, allocate an ath_buf for DMA */
1891 INIT_LIST_HEAD(&bf_head);
1892 spin_lock_bh(&sc->sc_txbuflock);
1893 if (unlikely(list_empty(&sc->sc_txbuf))) {
1894 spin_unlock_bh(&sc->sc_txbuflock);
1895 return -ENOMEM;
1896 }
1897
1898 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
1899 list_del(&bf->list);
1900 spin_unlock_bh(&sc->sc_txbuflock);
1901
1902 list_add_tail(&bf->list, &bf_head);
1903
1904 /* set up this buffer */
1905 ATH_TXBUF_RESET(bf);
1906 bf->bf_frmlen = txctl->frmlen;
1907
1908 ieee80211_is_data(fc) ?
1909 (bf->bf_state.bf_type |= BUF_DATA) :
1910 (bf->bf_state.bf_type &= ~BUF_DATA);
1911 ieee80211_is_back_req(fc) ?
1912 (bf->bf_state.bf_type |= BUF_BAR) :
1913 (bf->bf_state.bf_type &= ~BUF_BAR);
1914 ieee80211_is_pspoll(fc) ?
1915 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1916 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1917 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1918 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1919 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1920
1921 bf->bf_flags = txctl->flags;
1922 bf->bf_keytype = txctl->keytype;
1923 /* XXX: HACK! */
1924 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
1925 rcs = tx_info_priv->rcs;
1926 bf->bf_rcs[0] = rcs[0];
1927 bf->bf_rcs[1] = rcs[1];
1928 bf->bf_rcs[2] = rcs[2];
1929 bf->bf_rcs[3] = rcs[3];
1930 bf->bf_node = an;
1931 bf->bf_mpdu = skb;
1932 bf->bf_buf_addr = sg_dma_address(sg);
1933
1934 /* setup descriptor */
1935 ds = bf->bf_desc;
1936 ds->ds_link = 0;
1937 ds->ds_data = bf->bf_buf_addr;
1938
1939 /*
1940 * Save the DMA context in the first ath_buf
1941 */
1942 bf->bf_dmacontext = txctl->dmacontext;
1943
1944 /*
1945 * Formulate first tx descriptor with tx controls.
1946 */
1947 ath9k_hw_set11n_txdesc(ah,
1948 ds,
1949 bf->bf_frmlen, /* frame length */
1950 txctl->atype, /* Atheros packet type */
1951 min(txctl->txpower, (u16)60), /* txpower */
1952 txctl->keyix, /* key cache index */
1953 txctl->keytype, /* key type */
1954 txctl->flags); /* flags */
1955 ath9k_hw_filltxdesc(ah,
1956 ds,
1957 sg_dma_len(sg), /* segment length */
1958 true, /* first segment */
1959 (n_sg == 1) ? true : false, /* last segment */
1960 ds); /* first descriptor */
1961
1962 bf->bf_lastfrm = bf;
1963 (txctl->ht) ?
1964 (bf->bf_state.bf_type |= BUF_HT) :
1965 (bf->bf_state.bf_type &= ~BUF_HT);
1966
1967 spin_lock_bh(&txq->axq_lock);
1968
1969 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
1970 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
1971 if (ath_aggr_query(sc, an, txctl->tidno)) {
1972 /*
1973 * Try aggregation if it's a unicast data frame
1974 * and the destination is HT capable.
1975 */
1976 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
1977 } else {
1978 /*
1979 * Send this frame as regular when ADDBA exchange
1980 * is neither complete nor pending.
1981 */
1982 ath_tx_send_normal(sc, txq, tid, &bf_head);
1983 }
1984 } else {
1985 bf->bf_lastbf = bf;
1986 bf->bf_nframes = 1;
1987 ath_buf_set_rate(sc, bf);
1988
1989 if (ieee80211_is_back_req(fc)) {
1990 /* This is required for resuming tid
1991 * during BAR completion */
1992 bf->bf_tidno = txctl->tidno;
1993 }
1994
1995 ath_tx_txqaddbuf(sc, txq, &bf_head);
1996 }
1997 spin_unlock_bh(&txq->axq_lock);
1998 return 0;
1999 }
2000
2001 static void xmit_map_sg(struct ath_softc *sc,
2002 struct sk_buff *skb,
2003 struct ath_tx_control *txctl)
2004 {
2005 struct ath_xmit_status tx_status;
2006 struct ath_atx_tid *tid;
2007 struct scatterlist sg;
2008
2009 txctl->dmacontext = pci_map_single(sc->pdev, skb->data,
2010 skb->len, PCI_DMA_TODEVICE);
2011
2012 /* setup S/G list */
2013 memset(&sg, 0, sizeof(struct scatterlist));
2014 sg_dma_address(&sg) = txctl->dmacontext;
2015 sg_dma_len(&sg) = skb->len;
2016
2017 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2018 /*
2019 * We have to do drop frame here.
2020 */
2021 pci_unmap_single(sc->pdev, txctl->dmacontext,
2022 skb->len, PCI_DMA_TODEVICE);
2023
2024 tx_status.retries = 0;
2025 tx_status.flags = ATH_TX_ERROR;
2026
2027 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2028 /* Reclaim the seqno. */
2029 tid = ATH_AN_2_TID((struct ath_node *)
2030 txctl->an, txctl->tidno);
2031 DECR(tid->seq_next, IEEE80211_SEQ_MAX);
2032 }
2033 ath_tx_complete(sc, skb, &tx_status, txctl->an);
2034 }
2035 }
2036
2037 /* Initialize TX queue and h/w */
2038
2039 int ath_tx_init(struct ath_softc *sc, int nbufs)
2040 {
2041 int error = 0;
2042
2043 do {
2044 spin_lock_init(&sc->sc_txbuflock);
2045
2046 /* Setup tx descriptors */
2047 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2048 "tx", nbufs, 1);
2049 if (error != 0) {
2050 DPRINTF(sc, ATH_DBG_FATAL,
2051 "%s: failed to allocate tx descriptors: %d\n",
2052 __func__, error);
2053 break;
2054 }
2055
2056 /* XXX allocate beacon state together with vap */
2057 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
2058 "beacon", ATH_BCBUF, 1);
2059 if (error != 0) {
2060 DPRINTF(sc, ATH_DBG_FATAL,
2061 "%s: failed to allocate "
2062 "beacon descripotrs: %d\n",
2063 __func__, error);
2064 break;
2065 }
2066
2067 } while (0);
2068
2069 if (error != 0)
2070 ath_tx_cleanup(sc);
2071
2072 return error;
2073 }
2074
2075 /* Reclaim all tx queue resources */
2076
2077 int ath_tx_cleanup(struct ath_softc *sc)
2078 {
2079 /* cleanup beacon descriptors */
2080 if (sc->sc_bdma.dd_desc_len != 0)
2081 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
2082
2083 /* cleanup tx descriptors */
2084 if (sc->sc_txdma.dd_desc_len != 0)
2085 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
2086
2087 return 0;
2088 }
2089
2090 /* Setup a h/w transmit queue */
2091
2092 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2093 {
2094 struct ath_hal *ah = sc->sc_ah;
2095 struct ath9k_tx_queue_info qi;
2096 int qnum;
2097
2098 memset(&qi, 0, sizeof(qi));
2099 qi.tqi_subtype = subtype;
2100 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2101 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2102 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
2103 qi.tqi_physCompBuf = 0;
2104
2105 /*
2106 * Enable interrupts only for EOL and DESC conditions.
2107 * We mark tx descriptors to receive a DESC interrupt
2108 * when a tx queue gets deep; otherwise waiting for the
2109 * EOL to reap descriptors. Note that this is done to
2110 * reduce interrupt load and this only defers reaping
2111 * descriptors, never transmitting frames. Aside from
2112 * reducing interrupts this also permits more concurrency.
2113 * The only potential downside is if the tx queue backs
2114 * up in which case the top half of the kernel may backup
2115 * due to a lack of tx descriptors.
2116 *
2117 * The UAPSD queue is an exception, since we take a desc-
2118 * based intr on the EOSP frames.
2119 */
2120 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2121 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2122 else
2123 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2124 TXQ_FLAG_TXDESCINT_ENABLE;
2125 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2126 if (qnum == -1) {
2127 /*
2128 * NB: don't print a message, this happens
2129 * normally on parts with too few tx queues
2130 */
2131 return NULL;
2132 }
2133 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2134 DPRINTF(sc, ATH_DBG_FATAL,
2135 "%s: hal qnum %u out of range, max %u!\n",
2136 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2137 ath9k_hw_releasetxqueue(ah, qnum);
2138 return NULL;
2139 }
2140 if (!ATH_TXQ_SETUP(sc, qnum)) {
2141 struct ath_txq *txq = &sc->sc_txq[qnum];
2142
2143 txq->axq_qnum = qnum;
2144 txq->axq_link = NULL;
2145 INIT_LIST_HEAD(&txq->axq_q);
2146 INIT_LIST_HEAD(&txq->axq_acq);
2147 spin_lock_init(&txq->axq_lock);
2148 txq->axq_depth = 0;
2149 txq->axq_aggr_depth = 0;
2150 txq->axq_totalqueued = 0;
2151 txq->axq_linkbuf = NULL;
2152 sc->sc_txqsetup |= 1<<qnum;
2153 }
2154 return &sc->sc_txq[qnum];
2155 }
2156
2157 /* Reclaim resources for a setup queue */
2158
2159 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2160 {
2161 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2162 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2163 }
2164
2165 /*
2166 * Setup a hardware data transmit queue for the specified
2167 * access control. The hal may not support all requested
2168 * queues in which case it will return a reference to a
2169 * previously setup queue. We record the mapping from ac's
2170 * to h/w queues for use by ath_tx_start and also track
2171 * the set of h/w queues being used to optimize work in the
2172 * transmit interrupt handler and related routines.
2173 */
2174
2175 int ath_tx_setup(struct ath_softc *sc, int haltype)
2176 {
2177 struct ath_txq *txq;
2178
2179 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2180 DPRINTF(sc, ATH_DBG_FATAL,
2181 "%s: HAL AC %u out of range, max %zu!\n",
2182 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2183 return 0;
2184 }
2185 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2186 if (txq != NULL) {
2187 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2188 return 1;
2189 } else
2190 return 0;
2191 }
2192
2193 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2194 {
2195 int qnum;
2196
2197 switch (qtype) {
2198 case ATH9K_TX_QUEUE_DATA:
2199 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2200 DPRINTF(sc, ATH_DBG_FATAL,
2201 "%s: HAL AC %u out of range, max %zu!\n",
2202 __func__,
2203 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2204 return -1;
2205 }
2206 qnum = sc->sc_haltype2q[haltype];
2207 break;
2208 case ATH9K_TX_QUEUE_BEACON:
2209 qnum = sc->sc_bhalq;
2210 break;
2211 case ATH9K_TX_QUEUE_CAB:
2212 qnum = sc->sc_cabq->axq_qnum;
2213 break;
2214 default:
2215 qnum = -1;
2216 }
2217 return qnum;
2218 }
2219
2220 /* Update parameters for a transmit queue */
2221
2222 int ath_txq_update(struct ath_softc *sc, int qnum,
2223 struct ath9k_tx_queue_info *qinfo)
2224 {
2225 struct ath_hal *ah = sc->sc_ah;
2226 int error = 0;
2227 struct ath9k_tx_queue_info qi;
2228
2229 if (qnum == sc->sc_bhalq) {
2230 /*
2231 * XXX: for beacon queue, we just save the parameter.
2232 * It will be picked up by ath_beaconq_config when
2233 * it's necessary.
2234 */
2235 sc->sc_beacon_qi = *qinfo;
2236 return 0;
2237 }
2238
2239 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2240
2241 ath9k_hw_get_txq_props(ah, qnum, &qi);
2242 qi.tqi_aifs = qinfo->tqi_aifs;
2243 qi.tqi_cwmin = qinfo->tqi_cwmin;
2244 qi.tqi_cwmax = qinfo->tqi_cwmax;
2245 qi.tqi_burstTime = qinfo->tqi_burstTime;
2246 qi.tqi_readyTime = qinfo->tqi_readyTime;
2247
2248 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2249 DPRINTF(sc, ATH_DBG_FATAL,
2250 "%s: unable to update hardware queue %u!\n",
2251 __func__, qnum);
2252 error = -EIO;
2253 } else {
2254 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2255 }
2256
2257 return error;
2258 }
2259
2260 int ath_cabq_update(struct ath_softc *sc)
2261 {
2262 struct ath9k_tx_queue_info qi;
2263 int qnum = sc->sc_cabq->axq_qnum;
2264 struct ath_beacon_config conf;
2265
2266 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2267 /*
2268 * Ensure the readytime % is within the bounds.
2269 */
2270 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2271 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2272 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2273 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2274
2275 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2276 qi.tqi_readyTime =
2277 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2278 ath_txq_update(sc, qnum, &qi);
2279
2280 return 0;
2281 }
2282
2283 int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2284 {
2285 struct ath_tx_control txctl;
2286 int error = 0;
2287
2288 memset(&txctl, 0, sizeof(struct ath_tx_control));
2289 error = ath_tx_prepare(sc, skb, &txctl);
2290 if (error == 0)
2291 /*
2292 * Start DMA mapping.
2293 * ath_tx_start_dma() will be called either synchronously
2294 * or asynchrounsly once DMA is complete.
2295 */
2296 xmit_map_sg(sc, skb, &txctl);
2297 else
2298 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2299
2300 /* failed packets will be dropped by the caller */
2301 return error;
2302 }
2303
2304 /* Deferred processing of transmit interrupt */
2305
2306 void ath_tx_tasklet(struct ath_softc *sc)
2307 {
2308 int i;
2309 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2310
2311 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2312
2313 /*
2314 * Process each active queue.
2315 */
2316 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2317 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2318 ath_tx_processq(sc, &sc->sc_txq[i]);
2319 }
2320 }
2321
2322 void ath_tx_draintxq(struct ath_softc *sc,
2323 struct ath_txq *txq, bool retry_tx)
2324 {
2325 struct ath_buf *bf, *lastbf;
2326 struct list_head bf_head;
2327
2328 INIT_LIST_HEAD(&bf_head);
2329
2330 /*
2331 * NB: this assumes output has been stopped and
2332 * we do not need to block ath_tx_tasklet
2333 */
2334 for (;;) {
2335 spin_lock_bh(&txq->axq_lock);
2336
2337 if (list_empty(&txq->axq_q)) {
2338 txq->axq_link = NULL;
2339 txq->axq_linkbuf = NULL;
2340 spin_unlock_bh(&txq->axq_lock);
2341 break;
2342 }
2343
2344 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2345
2346 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2347 list_del(&bf->list);
2348 spin_unlock_bh(&txq->axq_lock);
2349
2350 spin_lock_bh(&sc->sc_txbuflock);
2351 list_add_tail(&bf->list, &sc->sc_txbuf);
2352 spin_unlock_bh(&sc->sc_txbuflock);
2353 continue;
2354 }
2355
2356 lastbf = bf->bf_lastbf;
2357 if (!retry_tx)
2358 lastbf->bf_desc->ds_txstat.ts_flags =
2359 ATH9K_TX_SW_ABORTED;
2360
2361 /* remove ath_buf's of the same mpdu from txq */
2362 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2363 txq->axq_depth--;
2364
2365 spin_unlock_bh(&txq->axq_lock);
2366
2367 if (bf_isampdu(bf))
2368 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2369 else
2370 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2371 }
2372
2373 /* flush any pending frames if aggregation is enabled */
2374 if (sc->sc_flags & SC_OP_TXAGGR) {
2375 if (!retry_tx) {
2376 spin_lock_bh(&txq->axq_lock);
2377 ath_txq_drain_pending_buffers(sc, txq,
2378 ATH9K_BH_STATUS_CHANGE);
2379 spin_unlock_bh(&txq->axq_lock);
2380 }
2381 }
2382 }
2383
2384 /* Drain the transmit queues and reclaim resources */
2385
2386 void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2387 {
2388 /* stop beacon queue. The beacon will be freed when
2389 * we go to INIT state */
2390 if (!(sc->sc_flags & SC_OP_INVALID)) {
2391 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2392 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2393 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2394 }
2395
2396 ath_drain_txdataq(sc, retry_tx);
2397 }
2398
2399 u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2400 {
2401 return sc->sc_txq[qnum].axq_depth;
2402 }
2403
2404 u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2405 {
2406 return sc->sc_txq[qnum].axq_aggr_depth;
2407 }
2408
2409 /* Check if an ADDBA is required. A valid node must be passed. */
2410 enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2411 struct ath_node *an,
2412 u8 tidno)
2413 {
2414 struct ath_atx_tid *txtid;
2415
2416 if (!(sc->sc_flags & SC_OP_TXAGGR))
2417 return AGGR_NOT_REQUIRED;
2418
2419 /* ADDBA exchange must be completed before sending aggregates */
2420 txtid = ATH_AN_2_TID(an, tidno);
2421
2422 if (txtid->addba_exchangecomplete)
2423 return AGGR_EXCHANGE_DONE;
2424
2425 if (txtid->cleanup_inprogress)
2426 return AGGR_CLEANUP_PROGRESS;
2427
2428 if (txtid->addba_exchangeinprogress)
2429 return AGGR_EXCHANGE_PROGRESS;
2430
2431 if (!txtid->addba_exchangecomplete) {
2432 if (!txtid->addba_exchangeinprogress &&
2433 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2434 txtid->addba_exchangeattempts++;
2435 return AGGR_REQUIRED;
2436 }
2437 }
2438
2439 return AGGR_NOT_REQUIRED;
2440 }
2441
2442 /* Start TX aggregation */
2443
2444 int ath_tx_aggr_start(struct ath_softc *sc,
2445 const u8 *addr,
2446 u16 tid,
2447 u16 *ssn)
2448 {
2449 struct ath_atx_tid *txtid;
2450 struct ath_node *an;
2451
2452 spin_lock_bh(&sc->node_lock);
2453 an = ath_node_find(sc, (u8 *) addr);
2454 spin_unlock_bh(&sc->node_lock);
2455
2456 if (!an) {
2457 DPRINTF(sc, ATH_DBG_AGGR,
2458 "%s: Node not found to initialize "
2459 "TX aggregation\n", __func__);
2460 return -1;
2461 }
2462
2463 if (sc->sc_flags & SC_OP_TXAGGR) {
2464 txtid = ATH_AN_2_TID(an, tid);
2465 txtid->addba_exchangeinprogress = 1;
2466 ath_tx_pause_tid(sc, txtid);
2467 }
2468
2469 return 0;
2470 }
2471
2472 /* Stop tx aggregation */
2473
2474 int ath_tx_aggr_stop(struct ath_softc *sc,
2475 const u8 *addr,
2476 u16 tid)
2477 {
2478 struct ath_node *an;
2479
2480 spin_lock_bh(&sc->node_lock);
2481 an = ath_node_find(sc, (u8 *) addr);
2482 spin_unlock_bh(&sc->node_lock);
2483
2484 if (!an) {
2485 DPRINTF(sc, ATH_DBG_AGGR,
2486 "%s: TX aggr stop for non-existent node\n", __func__);
2487 return -1;
2488 }
2489
2490 ath_tx_aggr_teardown(sc, an, tid);
2491 return 0;
2492 }
2493
2494 /*
2495 * Performs transmit side cleanup when TID changes from aggregated to
2496 * unaggregated.
2497 * - Pause the TID and mark cleanup in progress
2498 * - Discard all retry frames from the s/w queue.
2499 */
2500
2501 void ath_tx_aggr_teardown(struct ath_softc *sc,
2502 struct ath_node *an, u8 tid)
2503 {
2504 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2505 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2506 struct ath_buf *bf;
2507 struct list_head bf_head;
2508 INIT_LIST_HEAD(&bf_head);
2509
2510 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2511
2512 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2513 return;
2514
2515 if (!txtid->addba_exchangecomplete) {
2516 txtid->addba_exchangeattempts = 0;
2517 return;
2518 }
2519
2520 /* TID must be paused first */
2521 ath_tx_pause_tid(sc, txtid);
2522
2523 /* drop all software retried frames and mark this TID */
2524 spin_lock_bh(&txq->axq_lock);
2525 while (!list_empty(&txtid->buf_q)) {
2526 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2527 if (!bf_isretried(bf)) {
2528 /*
2529 * NB: it's based on the assumption that
2530 * software retried frame will always stay
2531 * at the head of software queue.
2532 */
2533 break;
2534 }
2535 list_cut_position(&bf_head,
2536 &txtid->buf_q, &bf->bf_lastfrm->list);
2537 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2538
2539 /* complete this sub-frame */
2540 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2541 }
2542
2543 if (txtid->baw_head != txtid->baw_tail) {
2544 spin_unlock_bh(&txq->axq_lock);
2545 txtid->cleanup_inprogress = true;
2546 } else {
2547 txtid->addba_exchangecomplete = 0;
2548 txtid->addba_exchangeattempts = 0;
2549 spin_unlock_bh(&txq->axq_lock);
2550 ath_tx_flush_tid(sc, txtid);
2551 }
2552 }
2553
2554 /*
2555 * Tx scheduling logic
2556 * NB: must be called with txq lock held
2557 */
2558
2559 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2560 {
2561 struct ath_atx_ac *ac;
2562 struct ath_atx_tid *tid;
2563
2564 /* nothing to schedule */
2565 if (list_empty(&txq->axq_acq))
2566 return;
2567 /*
2568 * get the first node/ac pair on the queue
2569 */
2570 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2571 list_del(&ac->list);
2572 ac->sched = false;
2573
2574 /*
2575 * process a single tid per destination
2576 */
2577 do {
2578 /* nothing to schedule */
2579 if (list_empty(&ac->tid_q))
2580 return;
2581
2582 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2583 list_del(&tid->list);
2584 tid->sched = false;
2585
2586 if (tid->paused) /* check next tid to keep h/w busy */
2587 continue;
2588
2589 if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
2590 ((txq->axq_depth % 2) == 0)) {
2591 ath_tx_sched_aggr(sc, txq, tid);
2592 }
2593
2594 /*
2595 * add tid to round-robin queue if more frames
2596 * are pending for the tid
2597 */
2598 if (!list_empty(&tid->buf_q))
2599 ath_tx_queue_tid(txq, tid);
2600
2601 /* only schedule one TID at a time */
2602 break;
2603 } while (!list_empty(&ac->tid_q));
2604
2605 /*
2606 * schedule AC if more TIDs need processing
2607 */
2608 if (!list_empty(&ac->tid_q)) {
2609 /*
2610 * add dest ac to txq if not already added
2611 */
2612 if (!ac->sched) {
2613 ac->sched = true;
2614 list_add_tail(&ac->list, &txq->axq_acq);
2615 }
2616 }
2617 }
2618
2619 /* Initialize per-node transmit state */
2620
2621 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2622 {
2623 if (sc->sc_flags & SC_OP_TXAGGR) {
2624 struct ath_atx_tid *tid;
2625 struct ath_atx_ac *ac;
2626 int tidno, acno;
2627
2628 an->maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
2629
2630 /*
2631 * Init per tid tx state
2632 */
2633 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2634 tidno < WME_NUM_TID;
2635 tidno++, tid++) {
2636 tid->an = an;
2637 tid->tidno = tidno;
2638 tid->seq_start = tid->seq_next = 0;
2639 tid->baw_size = WME_MAX_BA;
2640 tid->baw_head = tid->baw_tail = 0;
2641 tid->sched = false;
2642 tid->paused = false;
2643 tid->cleanup_inprogress = false;
2644 INIT_LIST_HEAD(&tid->buf_q);
2645
2646 acno = TID_TO_WME_AC(tidno);
2647 tid->ac = &an->an_aggr.tx.ac[acno];
2648
2649 /* ADDBA state */
2650 tid->addba_exchangecomplete = 0;
2651 tid->addba_exchangeinprogress = 0;
2652 tid->addba_exchangeattempts = 0;
2653 }
2654
2655 /*
2656 * Init per ac tx state
2657 */
2658 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2659 acno < WME_NUM_AC; acno++, ac++) {
2660 ac->sched = false;
2661 INIT_LIST_HEAD(&ac->tid_q);
2662
2663 switch (acno) {
2664 case WME_AC_BE:
2665 ac->qnum = ath_tx_get_qnum(sc,
2666 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2667 break;
2668 case WME_AC_BK:
2669 ac->qnum = ath_tx_get_qnum(sc,
2670 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2671 break;
2672 case WME_AC_VI:
2673 ac->qnum = ath_tx_get_qnum(sc,
2674 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2675 break;
2676 case WME_AC_VO:
2677 ac->qnum = ath_tx_get_qnum(sc,
2678 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2679 break;
2680 }
2681 }
2682 }
2683 }
2684
2685 /* Cleanupthe pending buffers for the node. */
2686
2687 void ath_tx_node_cleanup(struct ath_softc *sc,
2688 struct ath_node *an, bool bh_flag)
2689 {
2690 int i;
2691 struct ath_atx_ac *ac, *ac_tmp;
2692 struct ath_atx_tid *tid, *tid_tmp;
2693 struct ath_txq *txq;
2694 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2695 if (ATH_TXQ_SETUP(sc, i)) {
2696 txq = &sc->sc_txq[i];
2697
2698 if (likely(bh_flag))
2699 spin_lock_bh(&txq->axq_lock);
2700 else
2701 spin_lock(&txq->axq_lock);
2702
2703 list_for_each_entry_safe(ac,
2704 ac_tmp, &txq->axq_acq, list) {
2705 tid = list_first_entry(&ac->tid_q,
2706 struct ath_atx_tid, list);
2707 if (tid && tid->an != an)
2708 continue;
2709 list_del(&ac->list);
2710 ac->sched = false;
2711
2712 list_for_each_entry_safe(tid,
2713 tid_tmp, &ac->tid_q, list) {
2714 list_del(&tid->list);
2715 tid->sched = false;
2716 ath_tid_drain(sc, txq, tid, bh_flag);
2717 tid->addba_exchangecomplete = 0;
2718 tid->addba_exchangeattempts = 0;
2719 tid->cleanup_inprogress = false;
2720 }
2721 }
2722
2723 if (likely(bh_flag))
2724 spin_unlock_bh(&txq->axq_lock);
2725 else
2726 spin_unlock(&txq->axq_lock);
2727 }
2728 }
2729 }
2730
2731 /* Cleanup per node transmit state */
2732
2733 void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2734 {
2735 if (sc->sc_flags & SC_OP_TXAGGR) {
2736 struct ath_atx_tid *tid;
2737 int tidno, i;
2738
2739 /* Init per tid rx state */
2740 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2741 tidno < WME_NUM_TID;
2742 tidno++, tid++) {
2743
2744 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
2745 ASSERT(tid->tx_buf[i] == NULL);
2746 }
2747 }
2748 }
2749
2750 void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2751 {
2752 int hdrlen, padsize;
2753 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2754 struct ath_tx_control txctl;
2755
2756 /*
2757 * As a temporary workaround, assign seq# here; this will likely need
2758 * to be cleaned up to work better with Beacon transmission and virtual
2759 * BSSes.
2760 */
2761 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2762 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2763 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2764 sc->seq_no += 0x10;
2765 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2766 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2767 }
2768
2769 /* Add the padding after the header if this is not already done */
2770 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2771 if (hdrlen & 3) {
2772 padsize = hdrlen % 4;
2773 if (skb_headroom(skb) < padsize) {
2774 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2775 "failed\n", __func__);
2776 dev_kfree_skb_any(skb);
2777 return;
2778 }
2779 skb_push(skb, padsize);
2780 memmove(skb->data, skb->data + padsize, hdrlen);
2781 }
2782
2783 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2784 __func__,
2785 skb);
2786
2787 memset(&txctl, 0, sizeof(struct ath_tx_control));
2788 txctl.flags = ATH9K_TXDESC_CAB;
2789 if (ath_tx_prepare(sc, skb, &txctl) == 0) {
2790 /*
2791 * Start DMA mapping.
2792 * ath_tx_start_dma() will be called either synchronously
2793 * or asynchrounsly once DMA is complete.
2794 */
2795 xmit_map_sg(sc, skb, &txctl);
2796 } else {
2797 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2798 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
2799 dev_kfree_skb_any(skb);
2800 }
2801 }
2802
This page took 0.091392 seconds and 5 git commands to generate.