Commit | Line | Data |
---|---|---|
1053d35f RR |
1 | /****************************************************************************** |
2 | * | |
01f8162a | 3 | * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. |
1053d35f RR |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
759ef89f | 25 | * Intel Linux Wireless <ilw@linux.intel.com> |
1053d35f RR |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
fd4abac5 | 30 | #include <linux/etherdevice.h> |
d43c36dc | 31 | #include <linux/sched.h> |
1053d35f RR |
32 | #include <net/mac80211.h> |
33 | #include "iwl-eeprom.h" | |
34 | #include "iwl-dev.h" | |
35 | #include "iwl-core.h" | |
36 | #include "iwl-sta.h" | |
37 | #include "iwl-io.h" | |
38 | #include "iwl-helpers.h" | |
39 | ||
30e553e3 TW |
40 | static const u16 default_tid_to_tx_fifo[] = { |
41 | IWL_TX_FIFO_AC1, | |
42 | IWL_TX_FIFO_AC0, | |
43 | IWL_TX_FIFO_AC0, | |
44 | IWL_TX_FIFO_AC1, | |
45 | IWL_TX_FIFO_AC2, | |
46 | IWL_TX_FIFO_AC2, | |
47 | IWL_TX_FIFO_AC3, | |
48 | IWL_TX_FIFO_AC3, | |
49 | IWL_TX_FIFO_NONE, | |
50 | IWL_TX_FIFO_NONE, | |
51 | IWL_TX_FIFO_NONE, | |
52 | IWL_TX_FIFO_NONE, | |
53 | IWL_TX_FIFO_NONE, | |
54 | IWL_TX_FIFO_NONE, | |
55 | IWL_TX_FIFO_NONE, | |
56 | IWL_TX_FIFO_NONE, | |
57 | IWL_TX_FIFO_AC3 | |
58 | }; | |
59 | ||
4ddbb7d0 TW |
60 | static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, |
61 | struct iwl_dma_ptr *ptr, size_t size) | |
62 | { | |
63 | ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma); | |
64 | if (!ptr->addr) | |
65 | return -ENOMEM; | |
66 | ptr->size = size; | |
67 | return 0; | |
68 | } | |
69 | ||
70 | static inline void iwl_free_dma_ptr(struct iwl_priv *priv, | |
71 | struct iwl_dma_ptr *ptr) | |
72 | { | |
73 | if (unlikely(!ptr->addr)) | |
74 | return; | |
75 | ||
76 | pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma); | |
77 | memset(ptr, 0, sizeof(*ptr)); | |
78 | } | |
79 | ||
fd4abac5 TW |
80 | /** |
81 | * iwl_txq_update_write_ptr - Send new write index to hardware | |
82 | */ | |
83 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |
84 | { | |
85 | u32 reg = 0; | |
86 | int ret = 0; | |
87 | int txq_id = txq->q.id; | |
88 | ||
89 | if (txq->need_update == 0) | |
90 | return ret; | |
91 | ||
92 | /* if we're trying to save power */ | |
93 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | |
94 | /* wake up nic if it's powered down ... | |
95 | * uCode will wake up, and interrupt us again, so next | |
96 | * time we'll skip this part. */ | |
97 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | |
98 | ||
99 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
e1623446 | 100 | IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); |
fd4abac5 TW |
101 | iwl_set_bit(priv, CSR_GP_CNTRL, |
102 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
103 | return ret; | |
104 | } | |
105 | ||
fd4abac5 TW |
106 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, |
107 | txq->q.write_ptr | (txq_id << 8)); | |
fd4abac5 TW |
108 | |
109 | /* else not in power-save mode, uCode will never sleep when we're | |
110 | * trying to tx (during RFKILL, we're not trying to tx). */ | |
111 | } else | |
112 | iwl_write32(priv, HBUS_TARG_WRPTR, | |
113 | txq->q.write_ptr | (txq_id << 8)); | |
114 | ||
115 | txq->need_update = 0; | |
116 | ||
117 | return ret; | |
118 | } | |
119 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | |
120 | ||
121 | ||
1053d35f RR |
122 | /** |
123 | * iwl_tx_queue_free - Deallocate DMA queue. | |
124 | * @txq: Transmit queue to deallocate. | |
125 | * | |
126 | * Empty queue by removing and destroying all BD's. | |
127 | * Free all buffers. | |
128 | * 0-fill, but do not free "txq" descriptor structure. | |
129 | */ | |
a8e74e27 | 130 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) |
1053d35f | 131 | { |
da99c4b6 | 132 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
443cfd45 | 133 | struct iwl_queue *q = &txq->q; |
1053d35f | 134 | struct pci_dev *dev = priv->pci_dev; |
961ba60a | 135 | int i, len; |
1053d35f RR |
136 | |
137 | if (q->n_bd == 0) | |
138 | return; | |
139 | ||
140 | /* first, empty all BD's */ | |
141 | for (; q->write_ptr != q->read_ptr; | |
142 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) | |
7aaa1d79 | 143 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
1053d35f | 144 | |
c2acea8e | 145 | len = sizeof(struct iwl_device_cmd) * q->n_window; |
1053d35f RR |
146 | |
147 | /* De-alloc array of command/tx buffers */ | |
961ba60a | 148 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) |
da99c4b6 | 149 | kfree(txq->cmd[i]); |
1053d35f RR |
150 | |
151 | /* De-alloc circular buffer of TFDs */ | |
152 | if (txq->q.n_bd) | |
a8e74e27 | 153 | pci_free_consistent(dev, priv->hw_params.tfd_size * |
499b1883 | 154 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
1053d35f RR |
155 | |
156 | /* De-alloc array of per-TFD driver data */ | |
157 | kfree(txq->txb); | |
158 | txq->txb = NULL; | |
159 | ||
c2acea8e JB |
160 | /* deallocate arrays */ |
161 | kfree(txq->cmd); | |
162 | kfree(txq->meta); | |
163 | txq->cmd = NULL; | |
164 | txq->meta = NULL; | |
165 | ||
1053d35f RR |
166 | /* 0-fill queue descriptor structure */ |
167 | memset(txq, 0, sizeof(*txq)); | |
168 | } | |
a8e74e27 | 169 | EXPORT_SYMBOL(iwl_tx_queue_free); |
961ba60a TW |
170 | |
171 | /** | |
172 | * iwl_cmd_queue_free - Deallocate DMA queue. | |
173 | * @txq: Transmit queue to deallocate. | |
174 | * | |
175 | * Empty queue by removing and destroying all BD's. | |
176 | * Free all buffers. | |
177 | * 0-fill, but do not free "txq" descriptor structure. | |
178 | */ | |
3e5d238f | 179 | void iwl_cmd_queue_free(struct iwl_priv *priv) |
961ba60a TW |
180 | { |
181 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | |
182 | struct iwl_queue *q = &txq->q; | |
183 | struct pci_dev *dev = priv->pci_dev; | |
184 | int i, len; | |
185 | ||
186 | if (q->n_bd == 0) | |
187 | return; | |
188 | ||
c2acea8e | 189 | len = sizeof(struct iwl_device_cmd) * q->n_window; |
961ba60a TW |
190 | len += IWL_MAX_SCAN_SIZE; |
191 | ||
192 | /* De-alloc array of command/tx buffers */ | |
193 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | |
194 | kfree(txq->cmd[i]); | |
195 | ||
196 | /* De-alloc circular buffer of TFDs */ | |
197 | if (txq->q.n_bd) | |
3e5d238f | 198 | pci_free_consistent(dev, priv->hw_params.tfd_size * |
499b1883 | 199 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
961ba60a | 200 | |
28142986 RC |
201 | /* deallocate arrays */ |
202 | kfree(txq->cmd); | |
203 | kfree(txq->meta); | |
204 | txq->cmd = NULL; | |
205 | txq->meta = NULL; | |
206 | ||
961ba60a TW |
207 | /* 0-fill queue descriptor structure */ |
208 | memset(txq, 0, sizeof(*txq)); | |
209 | } | |
3e5d238f AK |
210 | EXPORT_SYMBOL(iwl_cmd_queue_free); |
211 | ||
fd4abac5 TW |
212 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
213 | * DMA services | |
214 | * | |
215 | * Theory of operation | |
216 | * | |
217 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | |
218 | * of buffer descriptors, each of which points to one or more data buffers for | |
219 | * the device to read from or fill. Driver and device exchange status of each | |
220 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | |
221 | * entries in each circular buffer, to protect against confusing empty and full | |
222 | * queue states. | |
223 | * | |
224 | * The device reads or writes the data in the queues via the device's several | |
225 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | |
226 | * | |
227 | * For Tx queue, there are low mark and high mark limits. If, after queuing | |
228 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | |
229 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | |
230 | * Tx queue resumed. | |
231 | * | |
232 | * See more detailed info in iwl-4965-hw.h. | |
233 | ***************************************************/ | |
234 | ||
235 | int iwl_queue_space(const struct iwl_queue *q) | |
236 | { | |
237 | int s = q->read_ptr - q->write_ptr; | |
238 | ||
239 | if (q->read_ptr > q->write_ptr) | |
240 | s -= q->n_bd; | |
241 | ||
242 | if (s <= 0) | |
243 | s += q->n_window; | |
244 | /* keep some reserve to not confuse empty and full situations */ | |
245 | s -= 2; | |
246 | if (s < 0) | |
247 | s = 0; | |
248 | return s; | |
249 | } | |
250 | EXPORT_SYMBOL(iwl_queue_space); | |
251 | ||
252 | ||
1053d35f RR |
253 | /** |
254 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | |
255 | */ | |
443cfd45 | 256 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, |
1053d35f RR |
257 | int count, int slots_num, u32 id) |
258 | { | |
259 | q->n_bd = count; | |
260 | q->n_window = slots_num; | |
261 | q->id = id; | |
262 | ||
263 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | |
264 | * and iwl_queue_dec_wrap are broken. */ | |
265 | BUG_ON(!is_power_of_2(count)); | |
266 | ||
267 | /* slots_num must be power-of-two size, otherwise | |
268 | * get_cmd_index is broken. */ | |
269 | BUG_ON(!is_power_of_2(slots_num)); | |
270 | ||
271 | q->low_mark = q->n_window / 4; | |
272 | if (q->low_mark < 4) | |
273 | q->low_mark = 4; | |
274 | ||
275 | q->high_mark = q->n_window / 8; | |
276 | if (q->high_mark < 2) | |
277 | q->high_mark = 2; | |
278 | ||
279 | q->write_ptr = q->read_ptr = 0; | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | /** | |
285 | * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue | |
286 | */ | |
287 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | |
16466903 | 288 | struct iwl_tx_queue *txq, u32 id) |
1053d35f RR |
289 | { |
290 | struct pci_dev *dev = priv->pci_dev; | |
3978e5bc | 291 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; |
1053d35f RR |
292 | |
293 | /* Driver private data, only for Tx (not command) queues, | |
294 | * not shared with device. */ | |
295 | if (id != IWL_CMD_QUEUE_NUM) { | |
296 | txq->txb = kmalloc(sizeof(txq->txb[0]) * | |
297 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | |
298 | if (!txq->txb) { | |
15b1687c | 299 | IWL_ERR(priv, "kmalloc for auxiliary BD " |
1053d35f RR |
300 | "structures failed\n"); |
301 | goto error; | |
302 | } | |
3978e5bc | 303 | } else { |
1053d35f | 304 | txq->txb = NULL; |
3978e5bc | 305 | } |
1053d35f RR |
306 | |
307 | /* Circular buffer of transmit frame descriptors (TFDs), | |
308 | * shared with device */ | |
3978e5bc | 309 | txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr); |
1053d35f | 310 | |
499b1883 | 311 | if (!txq->tfds) { |
3978e5bc | 312 | IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); |
1053d35f RR |
313 | goto error; |
314 | } | |
315 | txq->q.id = id; | |
316 | ||
317 | return 0; | |
318 | ||
319 | error: | |
320 | kfree(txq->txb); | |
321 | txq->txb = NULL; | |
322 | ||
323 | return -ENOMEM; | |
324 | } | |
325 | ||
1053d35f RR |
326 | /** |
327 | * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue | |
328 | */ | |
a8e74e27 SO |
329 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
330 | int slots_num, u32 txq_id) | |
1053d35f | 331 | { |
da99c4b6 | 332 | int i, len; |
73b7d742 | 333 | int ret; |
c2acea8e | 334 | int actual_slots = slots_num; |
1053d35f RR |
335 | |
336 | /* | |
337 | * Alloc buffer array for commands (Tx or other types of commands). | |
338 | * For the command queue (#4), allocate command space + one big | |
339 | * command for scan, since scan command is very huge; the system will | |
340 | * not have two scans at the same time, so only one is needed. | |
341 | * For normal Tx queues (all other queues), no super-size command | |
342 | * space is needed. | |
343 | */ | |
c2acea8e JB |
344 | if (txq_id == IWL_CMD_QUEUE_NUM) |
345 | actual_slots++; | |
346 | ||
347 | txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, | |
348 | GFP_KERNEL); | |
349 | txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, | |
350 | GFP_KERNEL); | |
351 | ||
352 | if (!txq->meta || !txq->cmd) | |
353 | goto out_free_arrays; | |
354 | ||
355 | len = sizeof(struct iwl_device_cmd); | |
356 | for (i = 0; i < actual_slots; i++) { | |
357 | /* only happens for cmd queue */ | |
358 | if (i == slots_num) | |
359 | len += IWL_MAX_SCAN_SIZE; | |
da99c4b6 | 360 | |
49898852 | 361 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); |
da99c4b6 | 362 | if (!txq->cmd[i]) |
73b7d742 | 363 | goto err; |
da99c4b6 | 364 | } |
1053d35f RR |
365 | |
366 | /* Alloc driver data array and TFD circular buffer */ | |
73b7d742 TW |
367 | ret = iwl_tx_queue_alloc(priv, txq, txq_id); |
368 | if (ret) | |
369 | goto err; | |
1053d35f | 370 | |
1053d35f RR |
371 | txq->need_update = 0; |
372 | ||
45af8195 JB |
373 | /* aggregation TX queues will get their ID when aggregation begins */ |
374 | if (txq_id <= IWL_TX_FIFO_AC3) | |
375 | txq->swq_id = txq_id; | |
376 | ||
1053d35f RR |
377 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise |
378 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
379 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
380 | ||
381 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
382 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | |
383 | ||
384 | /* Tell device where to find queue */ | |
a8e74e27 | 385 | priv->cfg->ops->lib->txq_init(priv, txq); |
1053d35f RR |
386 | |
387 | return 0; | |
73b7d742 | 388 | err: |
c2acea8e | 389 | for (i = 0; i < actual_slots; i++) |
73b7d742 | 390 | kfree(txq->cmd[i]); |
c2acea8e JB |
391 | out_free_arrays: |
392 | kfree(txq->meta); | |
393 | kfree(txq->cmd); | |
73b7d742 | 394 | |
73b7d742 | 395 | return -ENOMEM; |
1053d35f | 396 | } |
a8e74e27 SO |
397 | EXPORT_SYMBOL(iwl_tx_queue_init); |
398 | ||
da1bc453 TW |
399 | /** |
400 | * iwl_hw_txq_ctx_free - Free TXQ Context | |
401 | * | |
402 | * Destroy all TX DMA queues and structures | |
403 | */ | |
404 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |
405 | { | |
406 | int txq_id; | |
407 | ||
408 | /* Tx queues */ | |
409 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
961ba60a TW |
410 | if (txq_id == IWL_CMD_QUEUE_NUM) |
411 | iwl_cmd_queue_free(priv); | |
412 | else | |
413 | iwl_tx_queue_free(priv, txq_id); | |
da1bc453 | 414 | |
4ddbb7d0 TW |
415 | iwl_free_dma_ptr(priv, &priv->kw); |
416 | ||
417 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | |
da1bc453 TW |
418 | } |
419 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | |
420 | ||
1053d35f RR |
421 | /** |
422 | * iwl_txq_ctx_reset - Reset TX queue context | |
a96a27f9 | 423 | * Destroys all DMA structures and initialize them again |
1053d35f RR |
424 | * |
425 | * @param priv | |
426 | * @return error code | |
427 | */ | |
428 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | |
429 | { | |
430 | int ret = 0; | |
431 | int txq_id, slots_num; | |
da1bc453 | 432 | unsigned long flags; |
1053d35f | 433 | |
1053d35f RR |
434 | /* Free all tx/cmd queues and keep-warm buffer */ |
435 | iwl_hw_txq_ctx_free(priv); | |
436 | ||
4ddbb7d0 TW |
437 | ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, |
438 | priv->hw_params.scd_bc_tbls_size); | |
439 | if (ret) { | |
15b1687c | 440 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); |
4ddbb7d0 TW |
441 | goto error_bc_tbls; |
442 | } | |
1053d35f | 443 | /* Alloc keep-warm buffer */ |
4ddbb7d0 | 444 | ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); |
1053d35f | 445 | if (ret) { |
15b1687c | 446 | IWL_ERR(priv, "Keep Warm allocation failed\n"); |
1053d35f RR |
447 | goto error_kw; |
448 | } | |
da1bc453 | 449 | spin_lock_irqsave(&priv->lock, flags); |
1053d35f RR |
450 | |
451 | /* Turn off all Tx DMA fifos */ | |
da1bc453 TW |
452 | priv->cfg->ops->lib->txq_set_sched(priv, 0); |
453 | ||
4ddbb7d0 TW |
454 | /* Tell NIC where to find the "keep warm" buffer */ |
455 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | |
456 | ||
da1bc453 TW |
457 | spin_unlock_irqrestore(&priv->lock, flags); |
458 | ||
da1bc453 | 459 | /* Alloc and init all Tx queues, including the command queue (#4) */ |
1053d35f RR |
460 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { |
461 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | |
462 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | |
463 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | |
464 | txq_id); | |
465 | if (ret) { | |
15b1687c | 466 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); |
1053d35f RR |
467 | goto error; |
468 | } | |
469 | } | |
470 | ||
471 | return ret; | |
472 | ||
473 | error: | |
474 | iwl_hw_txq_ctx_free(priv); | |
4ddbb7d0 | 475 | iwl_free_dma_ptr(priv, &priv->kw); |
1053d35f | 476 | error_kw: |
4ddbb7d0 TW |
477 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); |
478 | error_bc_tbls: | |
1053d35f RR |
479 | return ret; |
480 | } | |
a33c2f47 | 481 | |
da1bc453 TW |
482 | /** |
483 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | |
484 | */ | |
485 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | |
486 | { | |
f3f911d1 | 487 | int ch; |
da1bc453 TW |
488 | unsigned long flags; |
489 | ||
da1bc453 TW |
490 | /* Turn off all Tx DMA fifos */ |
491 | spin_lock_irqsave(&priv->lock, flags); | |
da1bc453 TW |
492 | |
493 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | |
494 | ||
495 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
f3f911d1 ZY |
496 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { |
497 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | |
da1bc453 | 498 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, |
f3f911d1 | 499 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
f056658b | 500 | 1000); |
da1bc453 | 501 | } |
da1bc453 TW |
502 | spin_unlock_irqrestore(&priv->lock, flags); |
503 | ||
504 | /* Deallocate memory for all Tx queues */ | |
505 | iwl_hw_txq_ctx_free(priv); | |
506 | } | |
507 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | |
fd4abac5 TW |
508 | |
509 | /* | |
510 | * handle build REPLY_TX command notification. | |
511 | */ | |
512 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | |
513 | struct iwl_tx_cmd *tx_cmd, | |
e039fa4a | 514 | struct ieee80211_tx_info *info, |
fd4abac5 | 515 | struct ieee80211_hdr *hdr, |
0e7690f1 | 516 | u8 std_id) |
fd4abac5 | 517 | { |
fd7c8a40 | 518 | __le16 fc = hdr->frame_control; |
fd4abac5 TW |
519 | __le32 tx_flags = tx_cmd->tx_flags; |
520 | ||
521 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | |
e039fa4a | 522 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { |
fd4abac5 | 523 | tx_flags |= TX_CMD_FLG_ACK_MSK; |
fd7c8a40 | 524 | if (ieee80211_is_mgmt(fc)) |
fd4abac5 | 525 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; |
fd7c8a40 | 526 | if (ieee80211_is_probe_resp(fc) && |
fd4abac5 TW |
527 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) |
528 | tx_flags |= TX_CMD_FLG_TSF_MSK; | |
529 | } else { | |
530 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | |
531 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
532 | } | |
533 | ||
fd7c8a40 | 534 | if (ieee80211_is_back_req(fc)) |
fd4abac5 TW |
535 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; |
536 | ||
537 | ||
538 | tx_cmd->sta_id = std_id; | |
8b7b1e05 | 539 | if (ieee80211_has_morefrags(fc)) |
fd4abac5 TW |
540 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; |
541 | ||
fd7c8a40 HH |
542 | if (ieee80211_is_data_qos(fc)) { |
543 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
fd4abac5 TW |
544 | tx_cmd->tid_tspec = qc[0] & 0xf; |
545 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | |
546 | } else { | |
547 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
548 | } | |
549 | ||
a326a5d0 | 550 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); |
fd4abac5 TW |
551 | |
552 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | |
553 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | |
554 | ||
555 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | |
fd7c8a40 HH |
556 | if (ieee80211_is_mgmt(fc)) { |
557 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | |
fd4abac5 TW |
558 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); |
559 | else | |
560 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | |
561 | } else { | |
562 | tx_cmd->timeout.pm_frame_timeout = 0; | |
563 | } | |
564 | ||
565 | tx_cmd->driver_txop = 0; | |
566 | tx_cmd->tx_flags = tx_flags; | |
567 | tx_cmd->next_frame_len = 0; | |
568 | } | |
569 | ||
570 | #define RTS_HCCA_RETRY_LIMIT 3 | |
571 | #define RTS_DFAULT_RETRY_LIMIT 60 | |
572 | ||
573 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | |
574 | struct iwl_tx_cmd *tx_cmd, | |
e039fa4a | 575 | struct ieee80211_tx_info *info, |
b58ef214 | 576 | __le16 fc, int is_hcca) |
fd4abac5 | 577 | { |
b58ef214 | 578 | u32 rate_flags; |
76eff18b | 579 | int rate_idx; |
b58ef214 DH |
580 | u8 rts_retry_limit; |
581 | u8 data_retry_limit; | |
fd4abac5 | 582 | u8 rate_plcp; |
2e92e6f2 | 583 | |
b58ef214 | 584 | /* Set retry limit on DATA packets and Probe Responses*/ |
fd4abac5 TW |
585 | if (priv->data_retry_limit != -1) |
586 | data_retry_limit = priv->data_retry_limit; | |
b58ef214 DH |
587 | else if (ieee80211_is_probe_resp(fc)) |
588 | data_retry_limit = 3; | |
589 | else | |
590 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | |
591 | tx_cmd->data_retry_limit = data_retry_limit; | |
fd4abac5 | 592 | |
b58ef214 DH |
593 | /* Set retry limit on RTS packets */ |
594 | rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT : | |
595 | RTS_DFAULT_RETRY_LIMIT; | |
596 | if (data_retry_limit < rts_retry_limit) | |
597 | rts_retry_limit = data_retry_limit; | |
598 | tx_cmd->rts_retry_limit = rts_retry_limit; | |
fd4abac5 | 599 | |
b58ef214 DH |
600 | /* DATA packets will use the uCode station table for rate/antenna |
601 | * selection */ | |
fd4abac5 TW |
602 | if (ieee80211_is_data(fc)) { |
603 | tx_cmd->initial_rate_index = 0; | |
604 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | |
b58ef214 DH |
605 | return; |
606 | } | |
607 | ||
608 | /** | |
609 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | |
610 | * not really a TX rate. Thus, we use the lowest supported rate for | |
611 | * this band. Also use the lowest supported rate if the stored rate | |
612 | * index is invalid. | |
613 | */ | |
614 | rate_idx = info->control.rates[0].idx; | |
615 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | |
616 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | |
617 | rate_idx = rate_lowest_index(&priv->bands[info->band], | |
618 | info->control.sta); | |
619 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | |
620 | if (info->band == IEEE80211_BAND_5GHZ) | |
621 | rate_idx += IWL_FIRST_OFDM_RATE; | |
622 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | |
623 | rate_plcp = iwl_rates[rate_idx].plcp; | |
624 | /* Zero out flags for this packet */ | |
625 | rate_flags = 0; | |
fd4abac5 | 626 | |
b58ef214 DH |
627 | /* Set CCK flag as needed */ |
628 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | |
629 | rate_flags |= RATE_MCS_CCK_MSK; | |
630 | ||
631 | /* Set up RTS and CTS flags for certain packets */ | |
632 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | |
633 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | |
634 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | |
635 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | |
636 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | |
637 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | |
638 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | |
639 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | |
640 | } | |
641 | break; | |
642 | default: | |
643 | break; | |
fd4abac5 TW |
644 | } |
645 | ||
b58ef214 DH |
646 | /* Set up antennas */ |
647 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); | |
648 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | |
649 | ||
650 | /* Set the rate in the TX cmd */ | |
e7d326ac | 651 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); |
fd4abac5 TW |
652 | } |
653 | ||
654 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | |
e039fa4a | 655 | struct ieee80211_tx_info *info, |
fd4abac5 TW |
656 | struct iwl_tx_cmd *tx_cmd, |
657 | struct sk_buff *skb_frag, | |
658 | int sta_id) | |
659 | { | |
e039fa4a | 660 | struct ieee80211_key_conf *keyconf = info->control.hw_key; |
fd4abac5 | 661 | |
ccc038ab | 662 | switch (keyconf->alg) { |
fd4abac5 TW |
663 | case ALG_CCMP: |
664 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | |
ccc038ab | 665 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); |
e039fa4a | 666 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
fd4abac5 | 667 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; |
e1623446 | 668 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); |
fd4abac5 TW |
669 | break; |
670 | ||
671 | case ALG_TKIP: | |
672 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | |
ccc038ab | 673 | ieee80211_get_tkip_key(keyconf, skb_frag, |
fd4abac5 | 674 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); |
e1623446 | 675 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); |
fd4abac5 TW |
676 | break; |
677 | ||
678 | case ALG_WEP: | |
fd4abac5 | 679 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | |
ccc038ab EG |
680 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); |
681 | ||
682 | if (keyconf->keylen == WEP_KEY_LEN_128) | |
683 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | |
684 | ||
685 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | |
fd4abac5 | 686 | |
e1623446 | 687 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " |
ccc038ab | 688 | "with key %d\n", keyconf->keyidx); |
fd4abac5 TW |
689 | break; |
690 | ||
691 | default: | |
978785a3 | 692 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); |
fd4abac5 TW |
693 | break; |
694 | } | |
695 | } | |
696 | ||
fd4abac5 TW |
697 | /* |
698 | * start REPLY_TX command process | |
699 | */ | |
e039fa4a | 700 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) |
fd4abac5 TW |
701 | { |
702 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
e039fa4a | 703 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
f3674227 TW |
704 | struct iwl_tx_queue *txq; |
705 | struct iwl_queue *q; | |
c2acea8e JB |
706 | struct iwl_device_cmd *out_cmd; |
707 | struct iwl_cmd_meta *out_meta; | |
f3674227 TW |
708 | struct iwl_tx_cmd *tx_cmd; |
709 | int swq_id, txq_id; | |
fd4abac5 TW |
710 | dma_addr_t phys_addr; |
711 | dma_addr_t txcmd_phys; | |
712 | dma_addr_t scratch_phys; | |
b88b15df | 713 | u16 len, len_org; |
fd4abac5 | 714 | u16 seq_number = 0; |
fd7c8a40 | 715 | __le16 fc; |
0e7690f1 | 716 | u8 hdr_len; |
f3674227 | 717 | u8 sta_id; |
fd4abac5 TW |
718 | u8 wait_write_ptr = 0; |
719 | u8 tid = 0; | |
720 | u8 *qc = NULL; | |
721 | unsigned long flags; | |
722 | int ret; | |
723 | ||
724 | spin_lock_irqsave(&priv->lock, flags); | |
725 | if (iwl_is_rfkill(priv)) { | |
e1623446 | 726 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); |
fd4abac5 TW |
727 | goto drop_unlock; |
728 | } | |
729 | ||
fd7c8a40 | 730 | fc = hdr->frame_control; |
fd4abac5 TW |
731 | |
732 | #ifdef CONFIG_IWLWIFI_DEBUG | |
733 | if (ieee80211_is_auth(fc)) | |
e1623446 | 734 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); |
fd7c8a40 | 735 | else if (ieee80211_is_assoc_req(fc)) |
e1623446 | 736 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); |
fd7c8a40 | 737 | else if (ieee80211_is_reassoc_req(fc)) |
e1623446 | 738 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); |
fd4abac5 TW |
739 | #endif |
740 | ||
aa065263 | 741 | /* drop all non-injected data frame if we are not associated */ |
fd7c8a40 | 742 | if (ieee80211_is_data(fc) && |
aa065263 | 743 | !(info->flags & IEEE80211_TX_CTL_INJECTED) && |
d10c4ec8 | 744 | (!iwl_is_associated(priv) || |
05c914fe | 745 | ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) || |
d10c4ec8 | 746 | !priv->assoc_station_added)) { |
e1623446 | 747 | IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n"); |
fd4abac5 TW |
748 | goto drop_unlock; |
749 | } | |
750 | ||
7294ec95 | 751 | hdr_len = ieee80211_hdrlen(fc); |
fd4abac5 TW |
752 | |
753 | /* Find (or create) index into station table for destination station */ | |
aa065263 GS |
754 | if (info->flags & IEEE80211_TX_CTL_INJECTED) |
755 | sta_id = priv->hw_params.bcast_sta_id; | |
756 | else | |
757 | sta_id = iwl_get_sta_id(priv, hdr); | |
fd4abac5 | 758 | if (sta_id == IWL_INVALID_STATION) { |
e1623446 | 759 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", |
e174961c | 760 | hdr->addr1); |
3995bd93 | 761 | goto drop_unlock; |
fd4abac5 TW |
762 | } |
763 | ||
e1623446 | 764 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); |
fd4abac5 | 765 | |
45af8195 | 766 | txq_id = skb_get_queue_mapping(skb); |
fd7c8a40 HH |
767 | if (ieee80211_is_data_qos(fc)) { |
768 | qc = ieee80211_get_qos_ctl(hdr); | |
7294ec95 | 769 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; |
e6a6cf4c RC |
770 | if (unlikely(tid >= MAX_TID_COUNT)) |
771 | goto drop_unlock; | |
f3674227 TW |
772 | seq_number = priv->stations[sta_id].tid[tid].seq_number; |
773 | seq_number &= IEEE80211_SCTL_SEQ; | |
774 | hdr->seq_ctrl = hdr->seq_ctrl & | |
c1b4aa3f | 775 | cpu_to_le16(IEEE80211_SCTL_FRAG); |
f3674227 | 776 | hdr->seq_ctrl |= cpu_to_le16(seq_number); |
fd4abac5 | 777 | seq_number += 0x10; |
fd4abac5 | 778 | /* aggregation is on for this <sta,tid> */ |
45af8195 | 779 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
fd4abac5 | 780 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; |
fd4abac5 TW |
781 | } |
782 | ||
fd4abac5 | 783 | txq = &priv->txq[txq_id]; |
45af8195 | 784 | swq_id = txq->swq_id; |
fd4abac5 TW |
785 | q = &txq->q; |
786 | ||
3995bd93 JB |
787 | if (unlikely(iwl_queue_space(q) < q->high_mark)) |
788 | goto drop_unlock; | |
789 | ||
790 | if (ieee80211_is_data_qos(fc)) | |
791 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | |
fd4abac5 | 792 | |
fd4abac5 TW |
793 | /* Set up driver data for this TFD */ |
794 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | |
795 | txq->txb[q->write_ptr].skb[0] = skb; | |
fd4abac5 TW |
796 | |
797 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
b88b15df | 798 | out_cmd = txq->cmd[q->write_ptr]; |
c2acea8e | 799 | out_meta = &txq->meta[q->write_ptr]; |
fd4abac5 TW |
800 | tx_cmd = &out_cmd->cmd.tx; |
801 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | |
802 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | |
803 | ||
804 | /* | |
805 | * Set up the Tx-command (not MAC!) header. | |
806 | * Store the chosen Tx queue and TFD index within the sequence field; | |
807 | * after Tx, uCode's Tx response will return this value so driver can | |
808 | * locate the frame within the tx queue and do post-tx processing. | |
809 | */ | |
810 | out_cmd->hdr.cmd = REPLY_TX; | |
811 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
812 | INDEX_TO_SEQ(q->write_ptr))); | |
813 | ||
814 | /* Copy MAC header from skb into command buffer */ | |
815 | memcpy(tx_cmd->hdr, hdr, hdr_len); | |
816 | ||
df833b1d RC |
817 | |
818 | /* Total # bytes to be transmitted */ | |
819 | len = (u16)skb->len; | |
820 | tx_cmd->len = cpu_to_le16(len); | |
821 | ||
822 | if (info->control.hw_key) | |
823 | iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | |
824 | ||
825 | /* TODO need this for burst mode later on */ | |
826 | iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); | |
20594eb0 | 827 | iwl_dbg_log_tx_data_frame(priv, len, hdr); |
df833b1d RC |
828 | |
829 | /* set is_hcca to 0; it probably will never be implemented */ | |
b58ef214 | 830 | iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0); |
df833b1d | 831 | |
22fdf3c9 | 832 | iwl_update_stats(priv, true, fc, len); |
fd4abac5 TW |
833 | /* |
834 | * Use the first empty entry in this queue's command buffer array | |
835 | * to contain the Tx command and MAC header concatenated together | |
836 | * (payload data will be in another buffer). | |
837 | * Size of this varies, due to varying MAC header length. | |
838 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
839 | * of the MAC header (device reads on dword boundaries). | |
840 | * We'll tell device about this padding later. | |
841 | */ | |
842 | len = sizeof(struct iwl_tx_cmd) + | |
843 | sizeof(struct iwl_cmd_header) + hdr_len; | |
844 | ||
845 | len_org = len; | |
846 | len = (len + 3) & ~3; | |
847 | ||
848 | if (len_org != len) | |
849 | len_org = 1; | |
850 | else | |
851 | len_org = 0; | |
852 | ||
df833b1d RC |
853 | /* Tell NIC about any 2-byte padding after MAC header */ |
854 | if (len_org) | |
855 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
856 | ||
fd4abac5 TW |
857 | /* Physical address of this Tx command's header (not MAC header!), |
858 | * within command buffer array. */ | |
499b1883 | 859 | txcmd_phys = pci_map_single(priv->pci_dev, |
df833b1d | 860 | &out_cmd->hdr, len, |
96891cee | 861 | PCI_DMA_BIDIRECTIONAL); |
c2acea8e JB |
862 | pci_unmap_addr_set(out_meta, mapping, txcmd_phys); |
863 | pci_unmap_len_set(out_meta, len, len); | |
fd4abac5 TW |
864 | /* Add buffer containing Tx command and MAC(!) header to TFD's |
865 | * first entry */ | |
7aaa1d79 SO |
866 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
867 | txcmd_phys, len, 1, 0); | |
fd4abac5 | 868 | |
df833b1d RC |
869 | if (!ieee80211_has_morefrags(hdr->frame_control)) { |
870 | txq->need_update = 1; | |
871 | if (qc) | |
872 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | |
873 | } else { | |
874 | wait_write_ptr = 1; | |
875 | txq->need_update = 0; | |
876 | } | |
fd4abac5 TW |
877 | |
878 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
879 | * if any (802.11 null frames have no payload). */ | |
880 | len = skb->len - hdr_len; | |
881 | if (len) { | |
882 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | |
883 | len, PCI_DMA_TODEVICE); | |
7aaa1d79 SO |
884 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
885 | phys_addr, len, | |
886 | 0, 0); | |
fd4abac5 TW |
887 | } |
888 | ||
fd4abac5 | 889 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + |
df833b1d RC |
890 | offsetof(struct iwl_tx_cmd, scratch); |
891 | ||
892 | len = sizeof(struct iwl_tx_cmd) + | |
893 | sizeof(struct iwl_cmd_header) + hdr_len; | |
894 | /* take back ownership of DMA buffer to enable update */ | |
895 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | |
896 | len, PCI_DMA_BIDIRECTIONAL); | |
fd4abac5 | 897 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); |
499b1883 | 898 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); |
fd4abac5 | 899 | |
d2ee9cd2 RC |
900 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", |
901 | le16_to_cpu(out_cmd->hdr.sequence)); | |
902 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); | |
3d816c77 RC |
903 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); |
904 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
fd4abac5 TW |
905 | |
906 | /* Set up entry for this TFD in Tx byte-count array */ | |
7b80ece4 RC |
907 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
908 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | |
df833b1d RC |
909 | le16_to_cpu(tx_cmd->len)); |
910 | ||
911 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | |
912 | len, PCI_DMA_BIDIRECTIONAL); | |
fd4abac5 TW |
913 | |
914 | /* Tell device the write index *just past* this latest filled TFD */ | |
915 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
916 | ret = iwl_txq_update_write_ptr(priv, txq); | |
917 | spin_unlock_irqrestore(&priv->lock, flags); | |
918 | ||
919 | if (ret) | |
920 | return ret; | |
921 | ||
143b09ef | 922 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { |
fd4abac5 TW |
923 | if (wait_write_ptr) { |
924 | spin_lock_irqsave(&priv->lock, flags); | |
925 | txq->need_update = 1; | |
926 | iwl_txq_update_write_ptr(priv, txq); | |
927 | spin_unlock_irqrestore(&priv->lock, flags); | |
143b09ef | 928 | } else { |
e4e72fb4 | 929 | iwl_stop_queue(priv, txq->swq_id); |
fd4abac5 | 930 | } |
fd4abac5 TW |
931 | } |
932 | ||
933 | return 0; | |
934 | ||
935 | drop_unlock: | |
936 | spin_unlock_irqrestore(&priv->lock, flags); | |
fd4abac5 TW |
937 | return -1; |
938 | } | |
939 | EXPORT_SYMBOL(iwl_tx_skb); | |
940 | ||
941 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | |
942 | ||
943 | /** | |
944 | * iwl_enqueue_hcmd - enqueue a uCode command | |
945 | * @priv: device private data point | |
946 | * @cmd: a point to the ucode command structure | |
947 | * | |
948 | * The function returns < 0 values to indicate the operation is | |
949 | * failed. On success, it turns the index (> 0) of command in the | |
950 | * command queue. | |
951 | */ | |
952 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |
953 | { | |
954 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | |
955 | struct iwl_queue *q = &txq->q; | |
c2acea8e JB |
956 | struct iwl_device_cmd *out_cmd; |
957 | struct iwl_cmd_meta *out_meta; | |
fd4abac5 | 958 | dma_addr_t phys_addr; |
fd4abac5 | 959 | unsigned long flags; |
f3674227 TW |
960 | int len, ret; |
961 | u32 idx; | |
962 | u16 fix_size; | |
fd4abac5 TW |
963 | |
964 | cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); | |
965 | fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | |
966 | ||
967 | /* If any of the command structures end up being larger than | |
968 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | |
969 | * we will need to increase the size of the TFD entries */ | |
970 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | |
c2acea8e | 971 | !(cmd->flags & CMD_SIZE_HUGE)); |
fd4abac5 TW |
972 | |
973 | if (iwl_is_rfkill(priv)) { | |
4c423a2b | 974 | IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n"); |
fd4abac5 TW |
975 | return -EIO; |
976 | } | |
977 | ||
c2acea8e | 978 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
15b1687c | 979 | IWL_ERR(priv, "No space for Tx\n"); |
fd4abac5 TW |
980 | return -ENOSPC; |
981 | } | |
982 | ||
983 | spin_lock_irqsave(&priv->hcmd_lock, flags); | |
984 | ||
c2acea8e | 985 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
da99c4b6 | 986 | out_cmd = txq->cmd[idx]; |
c2acea8e JB |
987 | out_meta = &txq->meta[idx]; |
988 | ||
8ce73f3a | 989 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ |
c2acea8e JB |
990 | out_meta->flags = cmd->flags; |
991 | if (cmd->flags & CMD_WANT_SKB) | |
992 | out_meta->source = cmd; | |
993 | if (cmd->flags & CMD_ASYNC) | |
994 | out_meta->callback = cmd->callback; | |
fd4abac5 TW |
995 | |
996 | out_cmd->hdr.cmd = cmd->id; | |
fd4abac5 TW |
997 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); |
998 | ||
999 | /* At this point, the out_cmd now has all of the incoming cmd | |
1000 | * information */ | |
1001 | ||
1002 | out_cmd->hdr.flags = 0; | |
1003 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | |
1004 | INDEX_TO_SEQ(q->write_ptr)); | |
c2acea8e | 1005 | if (cmd->flags & CMD_SIZE_HUGE) |
9734cb23 | 1006 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; |
c2acea8e | 1007 | len = sizeof(struct iwl_device_cmd); |
df833b1d | 1008 | len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0; |
499b1883 | 1009 | |
fd4abac5 | 1010 | |
ded2ae7c EK |
1011 | #ifdef CONFIG_IWLWIFI_DEBUG |
1012 | switch (out_cmd->hdr.cmd) { | |
1013 | case REPLY_TX_LINK_QUALITY_CMD: | |
1014 | case SENSITIVITY_CMD: | |
e1623446 | 1015 | IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " |
ded2ae7c EK |
1016 | "%d bytes at %d[%d]:%d\n", |
1017 | get_cmd_string(out_cmd->hdr.cmd), | |
1018 | out_cmd->hdr.cmd, | |
1019 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | |
1020 | q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | |
1021 | break; | |
1022 | default: | |
e1623446 | 1023 | IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " |
ded2ae7c EK |
1024 | "%d bytes at %d[%d]:%d\n", |
1025 | get_cmd_string(out_cmd->hdr.cmd), | |
1026 | out_cmd->hdr.cmd, | |
1027 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | |
1028 | q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | |
1029 | } | |
1030 | #endif | |
fd4abac5 TW |
1031 | txq->need_update = 1; |
1032 | ||
518099a8 SO |
1033 | if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) |
1034 | /* Set up entry in queue's byte count circular buffer */ | |
1035 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); | |
fd4abac5 | 1036 | |
df833b1d RC |
1037 | phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, |
1038 | fix_size, PCI_DMA_BIDIRECTIONAL); | |
c2acea8e JB |
1039 | pci_unmap_addr_set(out_meta, mapping, phys_addr); |
1040 | pci_unmap_len_set(out_meta, len, fix_size); | |
df833b1d RC |
1041 | |
1042 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | |
1043 | phys_addr, fix_size, 1, | |
1044 | U32_PAD(cmd->len)); | |
1045 | ||
fd4abac5 TW |
1046 | /* Increment and update queue's write index */ |
1047 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
1048 | ret = iwl_txq_update_write_ptr(priv, txq); | |
1049 | ||
1050 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | |
1051 | return ret ? ret : idx; | |
1052 | } | |
1053 | ||
17b88929 TW |
1054 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) |
1055 | { | |
1056 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
1057 | struct iwl_queue *q = &txq->q; | |
1058 | struct iwl_tx_info *tx_info; | |
1059 | int nfreed = 0; | |
1060 | ||
1061 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | |
15b1687c | 1062 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
17b88929 TW |
1063 | "is out of range [0-%d] %d %d.\n", txq_id, |
1064 | index, q->n_bd, q->write_ptr, q->read_ptr); | |
1065 | return 0; | |
1066 | } | |
1067 | ||
499b1883 TW |
1068 | for (index = iwl_queue_inc_wrap(index, q->n_bd); |
1069 | q->read_ptr != index; | |
1070 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
17b88929 TW |
1071 | |
1072 | tx_info = &txq->txb[txq->q.read_ptr]; | |
1073 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | |
1074 | tx_info->skb[0] = NULL; | |
17b88929 | 1075 | |
972cf447 TW |
1076 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) |
1077 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | |
1078 | ||
7aaa1d79 | 1079 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
17b88929 TW |
1080 | nfreed++; |
1081 | } | |
1082 | return nfreed; | |
1083 | } | |
1084 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | |
1085 | ||
1086 | ||
1087 | /** | |
1088 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | |
1089 | * | |
1090 | * When FW advances 'R' index, all entries between old and new 'R' index | |
1091 | * need to be reclaimed. As result, some free space forms. If there is | |
1092 | * enough free space (> low mark), wake the stack that feeds us. | |
1093 | */ | |
499b1883 TW |
1094 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, |
1095 | int idx, int cmd_idx) | |
17b88929 TW |
1096 | { |
1097 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
1098 | struct iwl_queue *q = &txq->q; | |
1099 | int nfreed = 0; | |
1100 | ||
499b1883 | 1101 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { |
15b1687c | 1102 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
17b88929 | 1103 | "is out of range [0-%d] %d %d.\n", txq_id, |
499b1883 | 1104 | idx, q->n_bd, q->write_ptr, q->read_ptr); |
17b88929 TW |
1105 | return; |
1106 | } | |
1107 | ||
499b1883 | 1108 | pci_unmap_single(priv->pci_dev, |
c2acea8e JB |
1109 | pci_unmap_addr(&txq->meta[cmd_idx], mapping), |
1110 | pci_unmap_len(&txq->meta[cmd_idx], len), | |
96891cee | 1111 | PCI_DMA_BIDIRECTIONAL); |
499b1883 TW |
1112 | |
1113 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | |
1114 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
17b88929 | 1115 | |
499b1883 | 1116 | if (nfreed++ > 0) { |
15b1687c | 1117 | IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, |
17b88929 TW |
1118 | q->write_ptr, q->read_ptr); |
1119 | queue_work(priv->workqueue, &priv->restart); | |
1120 | } | |
da99c4b6 | 1121 | |
17b88929 TW |
1122 | } |
1123 | } | |
1124 | ||
1125 | /** | |
1126 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | |
1127 | * @rxb: Rx buffer to reclaim | |
1128 | * | |
1129 | * If an Rx buffer has an async callback associated with it the callback | |
1130 | * will be executed. The attached skb (if present) will only be freed | |
1131 | * if the callback returns 1 | |
1132 | */ | |
1133 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |
1134 | { | |
1135 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
1136 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | |
1137 | int txq_id = SEQ_TO_QUEUE(sequence); | |
1138 | int index = SEQ_TO_INDEX(sequence); | |
17b88929 | 1139 | int cmd_index; |
9734cb23 | 1140 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
c2acea8e JB |
1141 | struct iwl_device_cmd *cmd; |
1142 | struct iwl_cmd_meta *meta; | |
17b88929 TW |
1143 | |
1144 | /* If a Tx command is being handled and it isn't in the actual | |
1145 | * command queue then there a command routing bug has been introduced | |
1146 | * in the queue management code. */ | |
55d6a3cd | 1147 | if (WARN(txq_id != IWL_CMD_QUEUE_NUM, |
01ef9323 WT |
1148 | "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n", |
1149 | txq_id, sequence, | |
1150 | priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, | |
1151 | priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { | |
ec741164 | 1152 | iwl_print_hex_error(priv, pkt, 32); |
55d6a3cd | 1153 | return; |
01ef9323 | 1154 | } |
17b88929 TW |
1155 | |
1156 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | |
da99c4b6 | 1157 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; |
c2acea8e | 1158 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; |
17b88929 TW |
1159 | |
1160 | /* Input error checking is done when commands are added to queue. */ | |
c2acea8e JB |
1161 | if (meta->flags & CMD_WANT_SKB) { |
1162 | meta->source->reply_skb = rxb->skb; | |
17b88929 | 1163 | rxb->skb = NULL; |
5696aea6 JB |
1164 | } else if (meta->callback) |
1165 | meta->callback(priv, cmd, rxb->skb); | |
17b88929 | 1166 | |
499b1883 | 1167 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
17b88929 | 1168 | |
c2acea8e | 1169 | if (!(meta->flags & CMD_ASYNC)) { |
17b88929 TW |
1170 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
1171 | wake_up_interruptible(&priv->wait_command_queue); | |
1172 | } | |
1173 | } | |
1174 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | |
1175 | ||
30e553e3 TW |
1176 | /* |
1177 | * Find first available (lowest unused) Tx Queue, mark it "active". | |
1178 | * Called only when finding queue for aggregation. | |
1179 | * Should never return anything < 7, because they should already | |
1180 | * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). | |
1181 | */ | |
1182 | static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) | |
1183 | { | |
1184 | int txq_id; | |
1185 | ||
1186 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
1187 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | |
1188 | return txq_id; | |
1189 | return -1; | |
1190 | } | |
1191 | ||
1192 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | |
1193 | { | |
1194 | int sta_id; | |
1195 | int tx_fifo; | |
1196 | int txq_id; | |
1197 | int ret; | |
1198 | unsigned long flags; | |
1199 | struct iwl_tid_data *tid_data; | |
30e553e3 TW |
1200 | |
1201 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | |
1202 | tx_fifo = default_tid_to_tx_fifo[tid]; | |
1203 | else | |
1204 | return -EINVAL; | |
1205 | ||
39aadf8c | 1206 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", |
e174961c | 1207 | __func__, ra, tid); |
30e553e3 TW |
1208 | |
1209 | sta_id = iwl_find_station(priv, ra); | |
3eb92969 WYG |
1210 | if (sta_id == IWL_INVALID_STATION) { |
1211 | IWL_ERR(priv, "Start AGG on invalid station\n"); | |
30e553e3 | 1212 | return -ENXIO; |
3eb92969 | 1213 | } |
082e708a RK |
1214 | if (unlikely(tid >= MAX_TID_COUNT)) |
1215 | return -EINVAL; | |
30e553e3 TW |
1216 | |
1217 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | |
15b1687c | 1218 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); |
30e553e3 TW |
1219 | return -ENXIO; |
1220 | } | |
1221 | ||
1222 | txq_id = iwl_txq_ctx_activate_free(priv); | |
3eb92969 WYG |
1223 | if (txq_id == -1) { |
1224 | IWL_ERR(priv, "No free aggregation queue available\n"); | |
30e553e3 | 1225 | return -ENXIO; |
3eb92969 | 1226 | } |
30e553e3 TW |
1227 | |
1228 | spin_lock_irqsave(&priv->sta_lock, flags); | |
1229 | tid_data = &priv->stations[sta_id].tid[tid]; | |
1230 | *ssn = SEQ_TO_SN(tid_data->seq_number); | |
1231 | tid_data->agg.txq_id = txq_id; | |
45af8195 | 1232 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); |
30e553e3 TW |
1233 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
1234 | ||
1235 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | |
1236 | sta_id, tid, *ssn); | |
1237 | if (ret) | |
1238 | return ret; | |
1239 | ||
1240 | if (tid_data->tfds_in_queue == 0) { | |
3eb92969 | 1241 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
30e553e3 TW |
1242 | tid_data->agg.state = IWL_AGG_ON; |
1243 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); | |
1244 | } else { | |
e1623446 | 1245 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", |
30e553e3 TW |
1246 | tid_data->tfds_in_queue); |
1247 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | |
1248 | } | |
1249 | return ret; | |
1250 | } | |
1251 | EXPORT_SYMBOL(iwl_tx_agg_start); | |
1252 | ||
1253 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | |
1254 | { | |
1255 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | |
1256 | struct iwl_tid_data *tid_data; | |
1257 | int ret, write_ptr, read_ptr; | |
1258 | unsigned long flags; | |
30e553e3 TW |
1259 | |
1260 | if (!ra) { | |
15b1687c | 1261 | IWL_ERR(priv, "ra = NULL\n"); |
30e553e3 TW |
1262 | return -EINVAL; |
1263 | } | |
1264 | ||
e6a6cf4c RC |
1265 | if (unlikely(tid >= MAX_TID_COUNT)) |
1266 | return -EINVAL; | |
1267 | ||
30e553e3 TW |
1268 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) |
1269 | tx_fifo_id = default_tid_to_tx_fifo[tid]; | |
1270 | else | |
1271 | return -EINVAL; | |
1272 | ||
1273 | sta_id = iwl_find_station(priv, ra); | |
1274 | ||
a2f1cbeb WYG |
1275 | if (sta_id == IWL_INVALID_STATION) { |
1276 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | |
30e553e3 | 1277 | return -ENXIO; |
a2f1cbeb | 1278 | } |
30e553e3 TW |
1279 | |
1280 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | |
39aadf8c | 1281 | IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); |
30e553e3 TW |
1282 | |
1283 | tid_data = &priv->stations[sta_id].tid[tid]; | |
1284 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | |
1285 | txq_id = tid_data->agg.txq_id; | |
1286 | write_ptr = priv->txq[txq_id].q.write_ptr; | |
1287 | read_ptr = priv->txq[txq_id].q.read_ptr; | |
1288 | ||
1289 | /* The queue is not empty */ | |
1290 | if (write_ptr != read_ptr) { | |
e1623446 | 1291 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); |
30e553e3 TW |
1292 | priv->stations[sta_id].tid[tid].agg.state = |
1293 | IWL_EMPTYING_HW_QUEUE_DELBA; | |
1294 | return 0; | |
1295 | } | |
1296 | ||
e1623446 | 1297 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
30e553e3 TW |
1298 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; |
1299 | ||
1300 | spin_lock_irqsave(&priv->lock, flags); | |
1301 | ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | |
1302 | tx_fifo_id); | |
1303 | spin_unlock_irqrestore(&priv->lock, flags); | |
1304 | ||
1305 | if (ret) | |
1306 | return ret; | |
1307 | ||
1308 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | |
1309 | ||
1310 | return 0; | |
1311 | } | |
1312 | EXPORT_SYMBOL(iwl_tx_agg_stop); | |
1313 | ||
1314 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | |
1315 | { | |
1316 | struct iwl_queue *q = &priv->txq[txq_id].q; | |
1317 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | |
1318 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | |
1319 | ||
1320 | switch (priv->stations[sta_id].tid[tid].agg.state) { | |
1321 | case IWL_EMPTYING_HW_QUEUE_DELBA: | |
1322 | /* We are reclaiming the last packet of the */ | |
1323 | /* aggregated HW queue */ | |
3fd07a1e TW |
1324 | if ((txq_id == tid_data->agg.txq_id) && |
1325 | (q->read_ptr == q->write_ptr)) { | |
30e553e3 TW |
1326 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); |
1327 | int tx_fifo = default_tid_to_tx_fifo[tid]; | |
e1623446 | 1328 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); |
30e553e3 TW |
1329 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, |
1330 | ssn, tx_fifo); | |
1331 | tid_data->agg.state = IWL_AGG_OFF; | |
1332 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | |
1333 | } | |
1334 | break; | |
1335 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
1336 | /* We are reclaiming the last packet of the queue */ | |
1337 | if (tid_data->tfds_in_queue == 0) { | |
e1623446 | 1338 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); |
30e553e3 TW |
1339 | tid_data->agg.state = IWL_AGG_ON; |
1340 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | |
1341 | } | |
1342 | break; | |
1343 | } | |
1344 | return 0; | |
1345 | } | |
1346 | EXPORT_SYMBOL(iwl_txq_check_empty); | |
30e553e3 | 1347 | |
653fa4a0 EG |
1348 | /** |
1349 | * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack | |
1350 | * | |
1351 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | |
1352 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | |
1353 | */ | |
1354 | static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |
1355 | struct iwl_ht_agg *agg, | |
1356 | struct iwl_compressed_ba_resp *ba_resp) | |
1357 | ||
1358 | { | |
1359 | int i, sh, ack; | |
1360 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | |
1361 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
1362 | u64 bitmap; | |
1363 | int successes = 0; | |
1364 | struct ieee80211_tx_info *info; | |
1365 | ||
1366 | if (unlikely(!agg->wait_for_ba)) { | |
15b1687c | 1367 | IWL_ERR(priv, "Received BA when not expected\n"); |
653fa4a0 EG |
1368 | return -EINVAL; |
1369 | } | |
1370 | ||
1371 | /* Mark that the expected block-ack response arrived */ | |
1372 | agg->wait_for_ba = 0; | |
e1623446 | 1373 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); |
653fa4a0 EG |
1374 | |
1375 | /* Calculate shift to align block-ack bits with our Tx window bits */ | |
3fd07a1e | 1376 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); |
653fa4a0 EG |
1377 | if (sh < 0) /* tbw something is wrong with indices */ |
1378 | sh += 0x100; | |
1379 | ||
1380 | /* don't use 64-bit values for now */ | |
1381 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | |
1382 | ||
1383 | if (agg->frame_count > (64 - sh)) { | |
e1623446 | 1384 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); |
653fa4a0 EG |
1385 | return -1; |
1386 | } | |
1387 | ||
1388 | /* check for success or failure according to the | |
1389 | * transmitted bitmap and block-ack bitmap */ | |
1390 | bitmap &= agg->bitmap; | |
1391 | ||
1392 | /* For each frame attempted in aggregation, | |
1393 | * update driver's record of tx frame's status. */ | |
1394 | for (i = 0; i < agg->frame_count ; i++) { | |
4aa41f12 | 1395 | ack = bitmap & (1ULL << i); |
653fa4a0 | 1396 | successes += !!ack; |
e1623446 | 1397 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", |
c3056065 | 1398 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, |
653fa4a0 EG |
1399 | agg->start_idx + i); |
1400 | } | |
1401 | ||
1402 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | |
1403 | memset(&info->status, 0, sizeof(info->status)); | |
1404 | info->flags = IEEE80211_TX_STAT_ACK; | |
1405 | info->flags |= IEEE80211_TX_STAT_AMPDU; | |
1406 | info->status.ampdu_ack_map = successes; | |
1407 | info->status.ampdu_ack_len = agg->frame_count; | |
1408 | iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | |
1409 | ||
e1623446 | 1410 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); |
653fa4a0 EG |
1411 | |
1412 | return 0; | |
1413 | } | |
1414 | ||
1415 | /** | |
1416 | * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | |
1417 | * | |
1418 | * Handles block-acknowledge notification from device, which reports success | |
1419 | * of frames sent via aggregation. | |
1420 | */ | |
1421 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | |
1422 | struct iwl_rx_mem_buffer *rxb) | |
1423 | { | |
1424 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
1425 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | |
653fa4a0 EG |
1426 | struct iwl_tx_queue *txq = NULL; |
1427 | struct iwl_ht_agg *agg; | |
3fd07a1e TW |
1428 | int index; |
1429 | int sta_id; | |
1430 | int tid; | |
653fa4a0 EG |
1431 | |
1432 | /* "flow" corresponds to Tx queue */ | |
1433 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
1434 | ||
1435 | /* "ssn" is start of block-ack Tx window, corresponds to index | |
1436 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | |
1437 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | |
1438 | ||
1439 | if (scd_flow >= priv->hw_params.max_txq_num) { | |
15b1687c WT |
1440 | IWL_ERR(priv, |
1441 | "BUG_ON scd_flow is bigger than number of queues\n"); | |
653fa4a0 EG |
1442 | return; |
1443 | } | |
1444 | ||
1445 | txq = &priv->txq[scd_flow]; | |
3fd07a1e TW |
1446 | sta_id = ba_resp->sta_id; |
1447 | tid = ba_resp->tid; | |
1448 | agg = &priv->stations[sta_id].tid[tid].agg; | |
653fa4a0 EG |
1449 | |
1450 | /* Find index just before block-ack window */ | |
1451 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | |
1452 | ||
1453 | /* TODO: Need to get this copy more safely - now good for debug */ | |
1454 | ||
e1623446 | 1455 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " |
653fa4a0 EG |
1456 | "sta_id = %d\n", |
1457 | agg->wait_for_ba, | |
e174961c | 1458 | (u8 *) &ba_resp->sta_addr_lo32, |
653fa4a0 | 1459 | ba_resp->sta_id); |
e1623446 | 1460 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " |
653fa4a0 EG |
1461 | "%d, scd_ssn = %d\n", |
1462 | ba_resp->tid, | |
1463 | ba_resp->seq_ctl, | |
1464 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | |
1465 | ba_resp->scd_flow, | |
1466 | ba_resp->scd_ssn); | |
e1623446 | 1467 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n", |
653fa4a0 EG |
1468 | agg->start_idx, |
1469 | (unsigned long long)agg->bitmap); | |
1470 | ||
1471 | /* Update driver's record of ACK vs. not for each frame in window */ | |
1472 | iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp); | |
1473 | ||
1474 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | |
1475 | * block-ack window (we assume that they've been successfully | |
1476 | * transmitted ... if not, it's too late anyway). */ | |
1477 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | |
1478 | /* calculate mac80211 ampdu sw queue to wake */ | |
653fa4a0 | 1479 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); |
3fd07a1e TW |
1480 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; |
1481 | ||
1482 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | |
1483 | priv->mac80211_registered && | |
1484 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | |
e4e72fb4 | 1485 | iwl_wake_queue(priv, txq->swq_id); |
3fd07a1e TW |
1486 | |
1487 | iwl_txq_check_empty(priv, sta_id, tid, scd_flow); | |
653fa4a0 EG |
1488 | } |
1489 | } | |
1490 | EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); | |
1491 | ||
994d31f7 | 1492 | #ifdef CONFIG_IWLWIFI_DEBUG |
a332f8d6 TW |
1493 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x |
1494 | ||
1495 | const char *iwl_get_tx_fail_reason(u32 status) | |
1496 | { | |
1497 | switch (status & TX_STATUS_MSK) { | |
1498 | case TX_STATUS_SUCCESS: | |
1499 | return "SUCCESS"; | |
1500 | TX_STATUS_ENTRY(SHORT_LIMIT); | |
1501 | TX_STATUS_ENTRY(LONG_LIMIT); | |
1502 | TX_STATUS_ENTRY(FIFO_UNDERRUN); | |
1503 | TX_STATUS_ENTRY(MGMNT_ABORT); | |
1504 | TX_STATUS_ENTRY(NEXT_FRAG); | |
1505 | TX_STATUS_ENTRY(LIFE_EXPIRE); | |
1506 | TX_STATUS_ENTRY(DEST_PS); | |
1507 | TX_STATUS_ENTRY(ABORTED); | |
1508 | TX_STATUS_ENTRY(BT_RETRY); | |
1509 | TX_STATUS_ENTRY(STA_INVALID); | |
1510 | TX_STATUS_ENTRY(FRAG_DROPPED); | |
1511 | TX_STATUS_ENTRY(TID_DISABLE); | |
1512 | TX_STATUS_ENTRY(FRAME_FLUSHED); | |
1513 | TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | |
1514 | TX_STATUS_ENTRY(TX_LOCKED); | |
1515 | TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | |
1516 | } | |
1517 | ||
1518 | return "UNKNOWN"; | |
1519 | } | |
1520 | EXPORT_SYMBOL(iwl_get_tx_fail_reason); | |
1521 | #endif /* CONFIG_IWLWIFI_DEBUG */ |