ath5k: clean up base.h and its use
[deliverable/linux.git] / drivers / net / wireless / ath / ath5k / qcu.c
1 /*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 /********************************************\
20 Queue Control Unit, DFS Control Unit Functions
21 \********************************************/
22
23 #include "ath5k.h"
24 #include "reg.h"
25 #include "debug.h"
26
27
28 /******************\
29 * Helper functions *
30 \******************/
31
32 /*
33 * Get number of pending frames
34 * for a specific queue [5211+]
35 */
36 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
37 {
38 u32 pending;
39 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
40
41 /* Return if queue is declared inactive */
42 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
43 return false;
44
45 /* XXX: How about AR5K_CFG_TXCNT ? */
46 if (ah->ah_version == AR5K_AR5210)
47 return false;
48
49 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
50 pending &= AR5K_QCU_STS_FRMPENDCNT;
51
52 /* It's possible to have no frames pending even if TXE
53 * is set. To indicate that q has not stopped return
54 * true */
55 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
56 return true;
57
58 return pending;
59 }
60
61 /*
62 * Set a transmit queue inactive
63 */
64 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
65 {
66 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
67 return;
68
69 /* This queue will be skipped in further operations */
70 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
71 /*For SIMR setup*/
72 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
73 }
74
75 /*
76 * Make sure cw is a power of 2 minus 1 and smaller than 1024
77 */
78 static u16 ath5k_cw_validate(u16 cw_req)
79 {
80 u32 cw = 1;
81 cw_req = min(cw_req, (u16)1023);
82
83 while (cw < cw_req)
84 cw = (cw << 1) | 1;
85
86 return cw;
87 }
88
89 /*
90 * Get properties for a transmit queue
91 */
92 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
93 struct ath5k_txq_info *queue_info)
94 {
95 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
96 return 0;
97 }
98
99 /*
100 * Set properties for a transmit queue
101 */
102 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
103 const struct ath5k_txq_info *qinfo)
104 {
105 struct ath5k_txq_info *qi;
106
107 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
108
109 qi = &ah->ah_txq[queue];
110
111 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
112 return -EIO;
113
114 /* copy and validate values */
115 qi->tqi_type = qinfo->tqi_type;
116 qi->tqi_subtype = qinfo->tqi_subtype;
117 qi->tqi_flags = qinfo->tqi_flags;
118 /*
119 * According to the docs: Although the AIFS field is 8 bit wide,
120 * the maximum supported value is 0xFC. Setting it higher than that
121 * will cause the DCU to hang.
122 */
123 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
124 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
125 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
126 qi->tqi_cbr_period = qinfo->tqi_cbr_period;
127 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
128 qi->tqi_burst_time = qinfo->tqi_burst_time;
129 qi->tqi_ready_time = qinfo->tqi_ready_time;
130
131 /*XXX: Is this supported on 5210 ?*/
132 /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
133 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
134 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
135 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
136 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
137 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
138
139 return 0;
140 }
141
142 /*
143 * Initialize a transmit queue
144 */
145 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
146 struct ath5k_txq_info *queue_info)
147 {
148 unsigned int queue;
149 int ret;
150
151 /*
152 * Get queue by type
153 */
154 /* 5210 only has 2 queues */
155 if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
156 switch (queue_type) {
157 case AR5K_TX_QUEUE_DATA:
158 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
159 break;
160 case AR5K_TX_QUEUE_BEACON:
161 case AR5K_TX_QUEUE_CAB:
162 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
163 break;
164 default:
165 return -EINVAL;
166 }
167 } else {
168 switch (queue_type) {
169 case AR5K_TX_QUEUE_DATA:
170 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
171 ah->ah_txq[queue].tqi_type !=
172 AR5K_TX_QUEUE_INACTIVE; queue++) {
173
174 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
175 return -EINVAL;
176 }
177 break;
178 case AR5K_TX_QUEUE_UAPSD:
179 queue = AR5K_TX_QUEUE_ID_UAPSD;
180 break;
181 case AR5K_TX_QUEUE_BEACON:
182 queue = AR5K_TX_QUEUE_ID_BEACON;
183 break;
184 case AR5K_TX_QUEUE_CAB:
185 queue = AR5K_TX_QUEUE_ID_CAB;
186 break;
187 default:
188 return -EINVAL;
189 }
190 }
191
192 /*
193 * Setup internal queue structure
194 */
195 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
196 ah->ah_txq[queue].tqi_type = queue_type;
197
198 if (queue_info != NULL) {
199 queue_info->tqi_type = queue_type;
200 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
201 if (ret)
202 return ret;
203 }
204
205 /*
206 * We use ah_txq_status to hold a temp value for
207 * the Secondary interrupt mask registers on 5211+
208 * check out ath5k_hw_reset_tx_queue
209 */
210 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
211
212 return queue;
213 }
214
215
216 /*******************************\
217 * Single QCU/DCU initialization *
218 \*******************************/
219
220 /*
221 * Set tx retry limits on DCU
222 */
223 void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
224 unsigned int queue)
225 {
226 /* Single data queue on AR5210 */
227 if (ah->ah_version == AR5K_AR5210) {
228 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
229
230 if (queue > 0)
231 return;
232
233 ath5k_hw_reg_write(ah,
234 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
235 | AR5K_REG_SM(ah->ah_retry_long,
236 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
237 | AR5K_REG_SM(ah->ah_retry_short,
238 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
239 | AR5K_REG_SM(ah->ah_retry_long,
240 AR5K_NODCU_RETRY_LMT_LG_RETRY)
241 | AR5K_REG_SM(ah->ah_retry_short,
242 AR5K_NODCU_RETRY_LMT_SH_RETRY),
243 AR5K_NODCU_RETRY_LMT);
244 /* DCU on AR5211+ */
245 } else {
246 ath5k_hw_reg_write(ah,
247 AR5K_REG_SM(ah->ah_retry_long,
248 AR5K_DCU_RETRY_LMT_RTS)
249 | AR5K_REG_SM(ah->ah_retry_long,
250 AR5K_DCU_RETRY_LMT_STA_RTS)
251 | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
252 AR5K_DCU_RETRY_LMT_STA_DATA),
253 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
254 }
255 }
256
257 /**
258 * ath5k_hw_reset_tx_queue - Initialize a single hw queue
259 *
260 * @ah The &struct ath5k_hw
261 * @queue The hw queue number
262 *
263 * Set DFS properties for the given transmit queue on DCU
264 * and configures all queue-specific parameters.
265 */
266 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
267 {
268 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
269
270 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
271
272 tq = &ah->ah_txq[queue];
273
274 /* Skip if queue inactive or if we are on AR5210
275 * that doesn't have QCU/DCU */
276 if ((ah->ah_version == AR5K_AR5210) ||
277 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
278 return 0;
279
280 /*
281 * Set contention window (cw_min/cw_max)
282 * and arbitrated interframe space (aifs)...
283 */
284 ath5k_hw_reg_write(ah,
285 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
286 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
287 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
288 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
289
290 /*
291 * Set tx retry limits for this queue
292 */
293 ath5k_hw_set_tx_retry_limits(ah, queue);
294
295
296 /*
297 * Set misc registers
298 */
299
300 /* Enable DCU to wait for next fragment from QCU */
301 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
302 AR5K_DCU_MISC_FRAG_WAIT);
303
304 /* On Maui and Spirit use the global seqnum on DCU */
305 if (ah->ah_mac_version < AR5K_SREV_AR5211)
306 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
307 AR5K_DCU_MISC_SEQNUM_CTL);
308
309 /* Constant bit rate period */
310 if (tq->tqi_cbr_period) {
311 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
312 AR5K_QCU_CBRCFG_INTVAL) |
313 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
314 AR5K_QCU_CBRCFG_ORN_THRES),
315 AR5K_QUEUE_CBRCFG(queue));
316
317 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
318 AR5K_QCU_MISC_FRSHED_CBR);
319
320 if (tq->tqi_cbr_overflow_limit)
321 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
322 AR5K_QCU_MISC_CBR_THRES_ENABLE);
323 }
324
325 /* Ready time interval */
326 if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
327 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
328 AR5K_QCU_RDYTIMECFG_INTVAL) |
329 AR5K_QCU_RDYTIMECFG_ENABLE,
330 AR5K_QUEUE_RDYTIMECFG(queue));
331
332 if (tq->tqi_burst_time) {
333 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
334 AR5K_DCU_CHAN_TIME_DUR) |
335 AR5K_DCU_CHAN_TIME_ENABLE,
336 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
337
338 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
339 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
340 AR5K_QCU_MISC_RDY_VEOL_POLICY);
341 }
342
343 /* Enable/disable Post frame backoff */
344 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
345 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
346 AR5K_QUEUE_DFS_MISC(queue));
347
348 /* Enable/disable fragmentation burst backoff */
349 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
350 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
351 AR5K_QUEUE_DFS_MISC(queue));
352
353 /*
354 * Set registers by queue type
355 */
356 switch (tq->tqi_type) {
357 case AR5K_TX_QUEUE_BEACON:
358 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
359 AR5K_QCU_MISC_FRSHED_DBA_GT |
360 AR5K_QCU_MISC_CBREXP_BCN_DIS |
361 AR5K_QCU_MISC_BCN_ENABLE);
362
363 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
364 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
365 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
366 AR5K_DCU_MISC_ARBLOCK_IGNORE |
367 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
368 AR5K_DCU_MISC_BCN_ENABLE);
369 break;
370
371 case AR5K_TX_QUEUE_CAB:
372 /* XXX: use BCN_SENT_GT, if we can figure out how */
373 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
374 AR5K_QCU_MISC_FRSHED_DBA_GT |
375 AR5K_QCU_MISC_CBREXP_DIS |
376 AR5K_QCU_MISC_CBREXP_BCN_DIS);
377
378 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
379 (AR5K_TUNE_SW_BEACON_RESP -
380 AR5K_TUNE_DMA_BEACON_RESP) -
381 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
382 AR5K_QCU_RDYTIMECFG_ENABLE,
383 AR5K_QUEUE_RDYTIMECFG(queue));
384
385 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
386 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
387 AR5K_DCU_MISC_ARBLOCK_CTL_S));
388 break;
389
390 case AR5K_TX_QUEUE_UAPSD:
391 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
392 AR5K_QCU_MISC_CBREXP_DIS);
393 break;
394
395 case AR5K_TX_QUEUE_DATA:
396 default:
397 break;
398 }
399
400 /* TODO: Handle frame compression */
401
402 /*
403 * Enable interrupts for this tx queue
404 * in the secondary interrupt mask registers
405 */
406 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
407 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
408
409 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
410 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
411
412 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
413 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
414
415 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
416 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
417
418 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
419 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
420
421 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
422 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
423
424 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
425 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
426
427 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
428 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
429
430 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
431 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
432
433 /* Update secondary interrupt mask registers */
434
435 /* Filter out inactive queues */
436 ah->ah_txq_imr_txok &= ah->ah_txq_status;
437 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
438 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
439 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
440 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
441 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
442 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
443 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
444 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
445
446 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
447 AR5K_SIMR0_QCU_TXOK) |
448 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
449 AR5K_SIMR0_QCU_TXDESC),
450 AR5K_SIMR0);
451
452 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
453 AR5K_SIMR1_QCU_TXERR) |
454 AR5K_REG_SM(ah->ah_txq_imr_txeol,
455 AR5K_SIMR1_QCU_TXEOL),
456 AR5K_SIMR1);
457
458 /* Update SIMR2 but don't overwrite rest simr2 settings */
459 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
460 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
461 AR5K_REG_SM(ah->ah_txq_imr_txurn,
462 AR5K_SIMR2_QCU_TXURN));
463
464 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
465 AR5K_SIMR3_QCBRORN) |
466 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
467 AR5K_SIMR3_QCBRURN),
468 AR5K_SIMR3);
469
470 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
471 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
472
473 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
474 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
475 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
476
477 /* No queue has TXNOFRM enabled, disable the interrupt
478 * by setting AR5K_TXNOFRM to zero */
479 if (ah->ah_txq_imr_nofrm == 0)
480 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
481
482 /* Set QCU mask for this DCU to save power */
483 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
484
485 return 0;
486 }
487
488
489 /**************************\
490 * Global QCU/DCU functions *
491 \**************************/
492
493 /**
494 * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
495 *
496 * @ah The &struct ath5k_hw
497 * @slot_time Slot time in us
498 *
499 * Sets the global IFS intervals on DCU (also works on AR5210) for
500 * the given slot time and the current bwmode.
501 */
502 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
503 {
504 struct ieee80211_channel *channel = ah->ah_current_channel;
505 struct ieee80211_rate *rate;
506 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
507 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
508
509 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
510 return -EINVAL;
511
512 sifs = ath5k_hw_get_default_sifs(ah);
513 sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
514
515 /* EIFS
516 * Txtime of ack at lowest rate + SIFS + DIFS
517 * (DIFS = SIFS + 2 * Slot time)
518 *
519 * Note: HAL has some predefined values for EIFS
520 * Turbo: (37 + 2 * 6)
521 * Default: (74 + 2 * 9)
522 * Half: (149 + 2 * 13)
523 * Quarter: (298 + 2 * 21)
524 *
525 * (74 + 2 * 6) for AR5210 default and turbo !
526 *
527 * According to the formula we have
528 * ack_tx_time = 25 for turbo and
529 * ack_tx_time = 42.5 * clock multiplier
530 * for default/half/quarter.
531 *
532 * This can't be right, 42 is what we would get
533 * from ath5k_hw_get_frame_dur_for_bwmode or
534 * ieee80211_generic_frame_duration for zero frame
535 * length and without SIFS !
536 *
537 * Also we have different lowest rate for 802.11a
538 */
539 if (channel->band == IEEE80211_BAND_5GHZ)
540 rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
541 else
542 rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
543
544 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
545
546 /* ack_tx_time includes an SIFS already */
547 eifs = ack_tx_time + sifs + 2 * slot_time;
548 eifs_clock = ath5k_hw_htoclock(ah, eifs);
549
550 /* Set IFS settings on AR5210 */
551 if (ah->ah_version == AR5K_AR5210) {
552 u32 pifs, pifs_clock, difs, difs_clock;
553
554 /* Set slot time */
555 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
556
557 /* Set EIFS */
558 eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
559
560 /* PIFS = Slot time + SIFS */
561 pifs = slot_time + sifs;
562 pifs_clock = ath5k_hw_htoclock(ah, pifs);
563 pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
564
565 /* DIFS = SIFS + 2 * Slot time */
566 difs = sifs + 2 * slot_time;
567 difs_clock = ath5k_hw_htoclock(ah, difs);
568
569 /* Set SIFS/DIFS */
570 ath5k_hw_reg_write(ah, (difs_clock <<
571 AR5K_IFS0_DIFS_S) | sifs_clock,
572 AR5K_IFS0);
573
574 /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
575 ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
576 (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
577 AR5K_IFS1);
578
579 return 0;
580 }
581
582 /* Set IFS slot time */
583 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
584
585 /* Set EIFS interval */
586 ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
587
588 /* Set SIFS interval in usecs */
589 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
590 AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
591 sifs);
592
593 /* Set SIFS interval in clock cycles */
594 ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
595
596 return 0;
597 }
598
599
600 int ath5k_hw_init_queues(struct ath5k_hw *ah)
601 {
602 int i, ret;
603
604 /* TODO: HW Compression support for data queues */
605 /* TODO: Burst prefetch for data queues */
606
607 /*
608 * Reset queues and start beacon timers at the end of the reset routine
609 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
610 * Note: If we want we can assign multiple qcus on one dcu.
611 */
612 if (ah->ah_version != AR5K_AR5210)
613 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
614 ret = ath5k_hw_reset_tx_queue(ah, i);
615 if (ret) {
616 ATH5K_ERR(ah,
617 "failed to reset TX queue #%d\n", i);
618 return ret;
619 }
620 }
621 else
622 /* No QCU/DCU on AR5210, just set tx
623 * retry limits. We set IFS parameters
624 * on ath5k_hw_set_ifs_intervals */
625 ath5k_hw_set_tx_retry_limits(ah, 0);
626
627 /* Set the turbo flag when operating on 40MHz */
628 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
629 AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
630 AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
631
632 /* If we didn't set IFS timings through
633 * ath5k_hw_set_coverage_class make sure
634 * we set them here */
635 if (!ah->ah_coverage_class) {
636 unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
637 ath5k_hw_set_ifs_intervals(ah, slot_time);
638 }
639
640 return 0;
641 }
This page took 0.055143 seconds and 5 git commands to generate.