Commit | Line | Data |
---|---|---|
c85eb619 EG |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
4e318262 | 8 | * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. |
c85eb619 EG |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
22 | * USA | |
23 | * | |
24 | * The full GNU General Public License is included in this distribution | |
25 | * in the file called LICENSE.GPL. | |
26 | * | |
27 | * Contact Information: | |
28 | * Intel Linux Wireless <ilw@linux.intel.com> | |
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
30 | * | |
31 | * BSD LICENSE | |
32 | * | |
4e318262 | 33 | * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. |
c85eb619 EG |
34 | * All rights reserved. |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * | |
40 | * * Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * * Redistributions in binary form must reproduce the above copyright | |
43 | * notice, this list of conditions and the following disclaimer in | |
44 | * the documentation and/or other materials provided with the | |
45 | * distribution. | |
46 | * * Neither the name Intel Corporation nor the names of its | |
47 | * contributors may be used to endorse or promote products derived | |
48 | * from this software without specific prior written permission. | |
49 | * | |
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
61 | * | |
62 | *****************************************************************************/ | |
a42a1844 EG |
63 | #include <linux/pci.h> |
64 | #include <linux/pci-aspm.h> | |
e6bb4c9c | 65 | #include <linux/interrupt.h> |
87e5666c | 66 | #include <linux/debugfs.h> |
6d8f6eeb EG |
67 | #include <linux/bitops.h> |
68 | #include <linux/gfp.h> | |
e6bb4c9c | 69 | |
c85eb619 | 70 | #include "iwl-trans.h" |
c17d0681 | 71 | #include "iwl-trans-pcie-int.h" |
522376d2 EG |
72 | #include "iwl-csr.h" |
73 | #include "iwl-prph.h" | |
48f20d35 | 74 | #include "iwl-shared.h" |
522376d2 | 75 | #include "iwl-eeprom.h" |
7a10e3e4 | 76 | #include "iwl-agn-hw.h" |
a6c684ee | 77 | #include "iwl-core.h" |
c85eb619 | 78 | |
5a878bf6 | 79 | static int iwl_trans_rx_alloc(struct iwl_trans *trans) |
c85eb619 | 80 | { |
5a878bf6 EG |
81 | struct iwl_trans_pcie *trans_pcie = |
82 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
83 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
1042db2a | 84 | struct device *dev = trans->dev; |
c85eb619 | 85 | |
5a878bf6 | 86 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); |
c85eb619 EG |
87 | |
88 | spin_lock_init(&rxq->lock); | |
c85eb619 EG |
89 | |
90 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
91 | return -EINVAL; | |
92 | ||
93 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
84c816da DH |
94 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
95 | &rxq->bd_dma, GFP_KERNEL); | |
c85eb619 EG |
96 | if (!rxq->bd) |
97 | goto err_bd; | |
c85eb619 EG |
98 | |
99 | /*Allocate the driver's pointer to receive buffer status */ | |
84c816da DH |
100 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), |
101 | &rxq->rb_stts_dma, GFP_KERNEL); | |
c85eb619 EG |
102 | if (!rxq->rb_stts) |
103 | goto err_rb_stts; | |
c85eb619 EG |
104 | |
105 | return 0; | |
106 | ||
107 | err_rb_stts: | |
a0f6b0a2 EG |
108 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
109 | rxq->bd, rxq->bd_dma); | |
c85eb619 EG |
110 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); |
111 | rxq->bd = NULL; | |
112 | err_bd: | |
113 | return -ENOMEM; | |
114 | } | |
115 | ||
5a878bf6 | 116 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) |
c85eb619 | 117 | { |
5a878bf6 EG |
118 | struct iwl_trans_pcie *trans_pcie = |
119 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
120 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
a0f6b0a2 | 121 | int i; |
c85eb619 EG |
122 | |
123 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
124 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
125 | /* In the reset function, these buffers may have been allocated | |
126 | * to an SKB, so we need to unmap and free potential storage */ | |
127 | if (rxq->pool[i].page != NULL) { | |
1042db2a | 128 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, |
5a878bf6 | 129 | PAGE_SIZE << hw_params(trans).rx_page_order, |
c85eb619 | 130 | DMA_FROM_DEVICE); |
790428b6 EG |
131 | __free_pages(rxq->pool[i].page, |
132 | hw_params(trans).rx_page_order); | |
c85eb619 EG |
133 | rxq->pool[i].page = NULL; |
134 | } | |
135 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
136 | } | |
a0f6b0a2 EG |
137 | } |
138 | ||
fd656935 | 139 | static void iwl_trans_rx_hw_init(struct iwl_trans *trans, |
ab697a9f EG |
140 | struct iwl_rx_queue *rxq) |
141 | { | |
142 | u32 rb_size; | |
143 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
c17d0681 | 144 | u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ |
ab697a9f EG |
145 | |
146 | if (iwlagn_mod_params.amsdu_size_8K) | |
147 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
148 | else | |
149 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
150 | ||
151 | /* Stop Rx DMA */ | |
1042db2a | 152 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
ab697a9f EG |
153 | |
154 | /* Reset driver's Rx queue write index */ | |
1042db2a | 155 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); |
ab697a9f EG |
156 | |
157 | /* Tell device where to find RBD circular buffer in DRAM */ | |
1042db2a | 158 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, |
ab697a9f EG |
159 | (u32)(rxq->bd_dma >> 8)); |
160 | ||
161 | /* Tell device where in DRAM to update its Rx status */ | |
1042db2a | 162 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, |
ab697a9f EG |
163 | rxq->rb_stts_dma >> 4); |
164 | ||
165 | /* Enable Rx DMA | |
166 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
167 | * the credit mechanism in 5000 HW RX FIFO | |
168 | * Direct rx interrupts to hosts | |
169 | * Rx buffer size 4 or 8k | |
170 | * RB timeout 0x10 | |
171 | * 256 RBDs | |
172 | */ | |
1042db2a | 173 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, |
ab697a9f EG |
174 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | |
175 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
176 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
177 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | |
178 | rb_size| | |
179 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | |
180 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
181 | ||
182 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
1042db2a | 183 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); |
ab697a9f EG |
184 | } |
185 | ||
5a878bf6 | 186 | static int iwl_rx_init(struct iwl_trans *trans) |
a0f6b0a2 | 187 | { |
5a878bf6 EG |
188 | struct iwl_trans_pcie *trans_pcie = |
189 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
190 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
191 | ||
a0f6b0a2 EG |
192 | int i, err; |
193 | unsigned long flags; | |
194 | ||
195 | if (!rxq->bd) { | |
5a878bf6 | 196 | err = iwl_trans_rx_alloc(trans); |
a0f6b0a2 EG |
197 | if (err) |
198 | return err; | |
199 | } | |
200 | ||
201 | spin_lock_irqsave(&rxq->lock, flags); | |
202 | INIT_LIST_HEAD(&rxq->rx_free); | |
203 | INIT_LIST_HEAD(&rxq->rx_used); | |
204 | ||
5a878bf6 | 205 | iwl_trans_rxq_free_rx_bufs(trans); |
c85eb619 EG |
206 | |
207 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
208 | rxq->queue[i] = NULL; | |
209 | ||
210 | /* Set us so that we have processed and used all buffers, but have | |
211 | * not restocked the Rx queue with fresh buffers */ | |
212 | rxq->read = rxq->write = 0; | |
213 | rxq->write_actual = 0; | |
214 | rxq->free_count = 0; | |
215 | spin_unlock_irqrestore(&rxq->lock, flags); | |
216 | ||
5a878bf6 | 217 | iwlagn_rx_replenish(trans); |
ab697a9f | 218 | |
fd656935 | 219 | iwl_trans_rx_hw_init(trans, rxq); |
ab697a9f | 220 | |
5a878bf6 | 221 | spin_lock_irqsave(&trans->shrd->lock, flags); |
ab697a9f | 222 | rxq->need_update = 1; |
5a878bf6 EG |
223 | iwl_rx_queue_update_write_ptr(trans, rxq); |
224 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
ab697a9f | 225 | |
c85eb619 EG |
226 | return 0; |
227 | } | |
228 | ||
5a878bf6 | 229 | static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) |
a0f6b0a2 | 230 | { |
5a878bf6 EG |
231 | struct iwl_trans_pcie *trans_pcie = |
232 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
233 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
234 | ||
a0f6b0a2 EG |
235 | unsigned long flags; |
236 | ||
237 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
238 | * exit now */ | |
239 | if (!rxq->bd) { | |
5a878bf6 | 240 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); |
a0f6b0a2 EG |
241 | return; |
242 | } | |
243 | ||
244 | spin_lock_irqsave(&rxq->lock, flags); | |
5a878bf6 | 245 | iwl_trans_rxq_free_rx_bufs(trans); |
a0f6b0a2 EG |
246 | spin_unlock_irqrestore(&rxq->lock, flags); |
247 | ||
1042db2a | 248 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, |
a0f6b0a2 EG |
249 | rxq->bd, rxq->bd_dma); |
250 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
251 | rxq->bd = NULL; | |
252 | ||
253 | if (rxq->rb_stts) | |
1042db2a | 254 | dma_free_coherent(trans->dev, |
a0f6b0a2 EG |
255 | sizeof(struct iwl_rb_status), |
256 | rxq->rb_stts, rxq->rb_stts_dma); | |
257 | else | |
5a878bf6 | 258 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); |
a0f6b0a2 EG |
259 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); |
260 | rxq->rb_stts = NULL; | |
261 | } | |
262 | ||
6d8f6eeb | 263 | static int iwl_trans_rx_stop(struct iwl_trans *trans) |
c2c52e8b EG |
264 | { |
265 | ||
266 | /* stop Rx DMA */ | |
1042db2a EG |
267 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
268 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
c2c52e8b EG |
269 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); |
270 | } | |
271 | ||
6d8f6eeb | 272 | static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, |
02aca585 EG |
273 | struct iwl_dma_ptr *ptr, size_t size) |
274 | { | |
275 | if (WARN_ON(ptr->addr)) | |
276 | return -EINVAL; | |
277 | ||
1042db2a | 278 | ptr->addr = dma_alloc_coherent(trans->dev, size, |
02aca585 EG |
279 | &ptr->dma, GFP_KERNEL); |
280 | if (!ptr->addr) | |
281 | return -ENOMEM; | |
282 | ptr->size = size; | |
283 | return 0; | |
284 | } | |
285 | ||
6d8f6eeb | 286 | static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans, |
1359ca4f EG |
287 | struct iwl_dma_ptr *ptr) |
288 | { | |
289 | if (unlikely(!ptr->addr)) | |
290 | return; | |
291 | ||
1042db2a | 292 | dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); |
1359ca4f EG |
293 | memset(ptr, 0, sizeof(*ptr)); |
294 | } | |
295 | ||
6d8f6eeb EG |
296 | static int iwl_trans_txq_alloc(struct iwl_trans *trans, |
297 | struct iwl_tx_queue *txq, int slots_num, | |
298 | u32 txq_id) | |
02aca585 | 299 | { |
ab9e212e | 300 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; |
02aca585 EG |
301 | int i; |
302 | ||
2c452297 | 303 | if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) |
02aca585 EG |
304 | return -EINVAL; |
305 | ||
1359ca4f EG |
306 | txq->q.n_window = slots_num; |
307 | ||
7f90dce1 EG |
308 | txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL); |
309 | txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL); | |
02aca585 EG |
310 | |
311 | if (!txq->meta || !txq->cmd) | |
312 | goto error; | |
313 | ||
dfa2bdba EG |
314 | if (txq_id == trans->shrd->cmd_queue) |
315 | for (i = 0; i < slots_num; i++) { | |
316 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), | |
317 | GFP_KERNEL); | |
318 | if (!txq->cmd[i]) | |
319 | goto error; | |
320 | } | |
02aca585 EG |
321 | |
322 | /* Alloc driver data array and TFD circular buffer */ | |
323 | /* Driver private data, only for Tx (not command) queues, | |
324 | * not shared with device. */ | |
6d8f6eeb | 325 | if (txq_id != trans->shrd->cmd_queue) { |
7f90dce1 EG |
326 | txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]), |
327 | GFP_KERNEL); | |
2c452297 | 328 | if (!txq->skbs) { |
6d8f6eeb | 329 | IWL_ERR(trans, "kmalloc for auxiliary BD " |
02aca585 EG |
330 | "structures failed\n"); |
331 | goto error; | |
332 | } | |
333 | } else { | |
2c452297 | 334 | txq->skbs = NULL; |
02aca585 EG |
335 | } |
336 | ||
337 | /* Circular buffer of transmit frame descriptors (TFDs), | |
338 | * shared with device */ | |
1042db2a | 339 | txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, |
6d8f6eeb | 340 | &txq->q.dma_addr, GFP_KERNEL); |
02aca585 | 341 | if (!txq->tfds) { |
6d8f6eeb | 342 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); |
02aca585 EG |
343 | goto error; |
344 | } | |
345 | txq->q.id = txq_id; | |
346 | ||
347 | return 0; | |
348 | error: | |
2c452297 EG |
349 | kfree(txq->skbs); |
350 | txq->skbs = NULL; | |
02aca585 EG |
351 | /* since txq->cmd has been zeroed, |
352 | * all non allocated cmd[i] will be NULL */ | |
dfa2bdba | 353 | if (txq->cmd && txq_id == trans->shrd->cmd_queue) |
02aca585 EG |
354 | for (i = 0; i < slots_num; i++) |
355 | kfree(txq->cmd[i]); | |
356 | kfree(txq->meta); | |
357 | kfree(txq->cmd); | |
358 | txq->meta = NULL; | |
359 | txq->cmd = NULL; | |
360 | ||
361 | return -ENOMEM; | |
362 | ||
363 | } | |
364 | ||
6d8f6eeb | 365 | static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, |
02aca585 EG |
366 | int slots_num, u32 txq_id) |
367 | { | |
368 | int ret; | |
369 | ||
370 | txq->need_update = 0; | |
371 | memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); | |
372 | ||
373 | /* | |
374 | * For the default queues 0-3, set up the swq_id | |
375 | * already -- all others need to get one later | |
376 | * (if they need one at all). | |
377 | */ | |
378 | if (txq_id < 4) | |
379 | iwl_set_swq_id(txq, txq_id, txq_id); | |
380 | ||
381 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | |
382 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
383 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
384 | ||
385 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
6d8f6eeb | 386 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, |
02aca585 EG |
387 | txq_id); |
388 | if (ret) | |
389 | return ret; | |
390 | ||
391 | /* | |
392 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
393 | * given Tx queue, and enable the DMA channel used for that queue. | |
394 | * Circular buffer (TFD queue in DRAM) physical base address */ | |
1042db2a | 395 | iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), |
02aca585 EG |
396 | txq->q.dma_addr >> 8); |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
c170b867 EG |
401 | /** |
402 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | |
403 | */ | |
6d8f6eeb | 404 | static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) |
c170b867 | 405 | { |
8ad71bef EG |
406 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
407 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
c170b867 | 408 | struct iwl_queue *q = &txq->q; |
39644e9a | 409 | enum dma_data_direction dma_dir; |
984ecb92 | 410 | unsigned long flags; |
cda4ee3f | 411 | spinlock_t *lock; |
c170b867 EG |
412 | |
413 | if (!q->n_bd) | |
414 | return; | |
415 | ||
39644e9a EG |
416 | /* In the command queue, all the TBs are mapped as BIDI |
417 | * so unmap them as such. | |
418 | */ | |
cda4ee3f | 419 | if (txq_id == trans->shrd->cmd_queue) { |
39644e9a | 420 | dma_dir = DMA_BIDIRECTIONAL; |
cda4ee3f EG |
421 | lock = &trans->hcmd_lock; |
422 | } else { | |
39644e9a | 423 | dma_dir = DMA_TO_DEVICE; |
cda4ee3f EG |
424 | lock = &trans->shrd->sta_lock; |
425 | } | |
39644e9a | 426 | |
cda4ee3f | 427 | spin_lock_irqsave(lock, flags); |
c170b867 EG |
428 | while (q->write_ptr != q->read_ptr) { |
429 | /* The read_ptr needs to bound by q->n_window */ | |
39644e9a EG |
430 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), |
431 | dma_dir); | |
c170b867 EG |
432 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
433 | } | |
cda4ee3f | 434 | spin_unlock_irqrestore(lock, flags); |
c170b867 EG |
435 | } |
436 | ||
1359ca4f EG |
437 | /** |
438 | * iwl_tx_queue_free - Deallocate DMA queue. | |
439 | * @txq: Transmit queue to deallocate. | |
440 | * | |
441 | * Empty queue by removing and destroying all BD's. | |
442 | * Free all buffers. | |
443 | * 0-fill, but do not free "txq" descriptor structure. | |
444 | */ | |
6d8f6eeb | 445 | static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) |
1359ca4f | 446 | { |
8ad71bef EG |
447 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
448 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
1042db2a | 449 | struct device *dev = trans->dev; |
1359ca4f EG |
450 | int i; |
451 | if (WARN_ON(!txq)) | |
452 | return; | |
453 | ||
6d8f6eeb | 454 | iwl_tx_queue_unmap(trans, txq_id); |
1359ca4f EG |
455 | |
456 | /* De-alloc array of command/tx buffers */ | |
dfa2bdba EG |
457 | |
458 | if (txq_id == trans->shrd->cmd_queue) | |
459 | for (i = 0; i < txq->q.n_window; i++) | |
460 | kfree(txq->cmd[i]); | |
1359ca4f EG |
461 | |
462 | /* De-alloc circular buffer of TFDs */ | |
463 | if (txq->q.n_bd) { | |
ab9e212e | 464 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * |
1359ca4f EG |
465 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
466 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | |
467 | } | |
468 | ||
469 | /* De-alloc array of per-TFD driver data */ | |
2c452297 EG |
470 | kfree(txq->skbs); |
471 | txq->skbs = NULL; | |
1359ca4f EG |
472 | |
473 | /* deallocate arrays */ | |
474 | kfree(txq->cmd); | |
475 | kfree(txq->meta); | |
476 | txq->cmd = NULL; | |
477 | txq->meta = NULL; | |
478 | ||
479 | /* 0-fill queue descriptor structure */ | |
480 | memset(txq, 0, sizeof(*txq)); | |
481 | } | |
482 | ||
483 | /** | |
484 | * iwl_trans_tx_free - Free TXQ Context | |
485 | * | |
486 | * Destroy all TX DMA queues and structures | |
487 | */ | |
6d8f6eeb | 488 | static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) |
1359ca4f EG |
489 | { |
490 | int txq_id; | |
8ad71bef | 491 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1359ca4f EG |
492 | |
493 | /* Tx queues */ | |
8ad71bef | 494 | if (trans_pcie->txq) { |
d6189124 | 495 | for (txq_id = 0; |
6d8f6eeb EG |
496 | txq_id < hw_params(trans).max_txq_num; txq_id++) |
497 | iwl_tx_queue_free(trans, txq_id); | |
1359ca4f EG |
498 | } |
499 | ||
8ad71bef EG |
500 | kfree(trans_pcie->txq); |
501 | trans_pcie->txq = NULL; | |
1359ca4f | 502 | |
9d6b2cb1 | 503 | iwlagn_free_dma_ptr(trans, &trans_pcie->kw); |
1359ca4f | 504 | |
6d8f6eeb | 505 | iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); |
1359ca4f EG |
506 | } |
507 | ||
02aca585 EG |
508 | /** |
509 | * iwl_trans_tx_alloc - allocate TX context | |
510 | * Allocate all Tx DMA structures and initialize them | |
511 | * | |
512 | * @param priv | |
513 | * @return error code | |
514 | */ | |
6d8f6eeb | 515 | static int iwl_trans_tx_alloc(struct iwl_trans *trans) |
02aca585 EG |
516 | { |
517 | int ret; | |
518 | int txq_id, slots_num; | |
8ad71bef | 519 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
02aca585 | 520 | |
fd656935 | 521 | u16 scd_bc_tbls_size = hw_params(trans).max_txq_num * |
ab9e212e EG |
522 | sizeof(struct iwlagn_scd_bc_tbl); |
523 | ||
02aca585 EG |
524 | /*It is not allowed to alloc twice, so warn when this happens. |
525 | * We cannot rely on the previous allocation, so free and fail */ | |
8ad71bef | 526 | if (WARN_ON(trans_pcie->txq)) { |
02aca585 EG |
527 | ret = -EINVAL; |
528 | goto error; | |
529 | } | |
530 | ||
6d8f6eeb | 531 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, |
ab9e212e | 532 | scd_bc_tbls_size); |
02aca585 | 533 | if (ret) { |
6d8f6eeb | 534 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); |
02aca585 EG |
535 | goto error; |
536 | } | |
537 | ||
538 | /* Alloc keep-warm buffer */ | |
9d6b2cb1 | 539 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); |
02aca585 | 540 | if (ret) { |
6d8f6eeb | 541 | IWL_ERR(trans, "Keep Warm allocation failed\n"); |
02aca585 EG |
542 | goto error; |
543 | } | |
544 | ||
7f90dce1 EG |
545 | trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num, |
546 | sizeof(struct iwl_tx_queue), GFP_KERNEL); | |
8ad71bef | 547 | if (!trans_pcie->txq) { |
6d8f6eeb | 548 | IWL_ERR(trans, "Not enough memory for txq\n"); |
02aca585 EG |
549 | ret = ENOMEM; |
550 | goto error; | |
551 | } | |
552 | ||
553 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
6d8f6eeb EG |
554 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { |
555 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | |
02aca585 | 556 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
8ad71bef EG |
557 | ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], |
558 | slots_num, txq_id); | |
02aca585 | 559 | if (ret) { |
6d8f6eeb | 560 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); |
02aca585 EG |
561 | goto error; |
562 | } | |
563 | } | |
564 | ||
565 | return 0; | |
566 | ||
567 | error: | |
ae2c30bf | 568 | iwl_trans_pcie_tx_free(trans); |
02aca585 EG |
569 | |
570 | return ret; | |
571 | } | |
6d8f6eeb | 572 | static int iwl_tx_init(struct iwl_trans *trans) |
02aca585 EG |
573 | { |
574 | int ret; | |
575 | int txq_id, slots_num; | |
576 | unsigned long flags; | |
577 | bool alloc = false; | |
8ad71bef | 578 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
02aca585 | 579 | |
8ad71bef | 580 | if (!trans_pcie->txq) { |
6d8f6eeb | 581 | ret = iwl_trans_tx_alloc(trans); |
02aca585 EG |
582 | if (ret) |
583 | goto error; | |
584 | alloc = true; | |
585 | } | |
586 | ||
6d8f6eeb | 587 | spin_lock_irqsave(&trans->shrd->lock, flags); |
02aca585 EG |
588 | |
589 | /* Turn off all Tx DMA fifos */ | |
1042db2a | 590 | iwl_write_prph(trans, SCD_TXFACT, 0); |
02aca585 EG |
591 | |
592 | /* Tell NIC where to find the "keep warm" buffer */ | |
1042db2a | 593 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, |
83ed9015 | 594 | trans_pcie->kw.dma >> 4); |
02aca585 | 595 | |
6d8f6eeb | 596 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
02aca585 EG |
597 | |
598 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
6d8f6eeb EG |
599 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { |
600 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | |
02aca585 | 601 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
8ad71bef EG |
602 | ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], |
603 | slots_num, txq_id); | |
02aca585 | 604 | if (ret) { |
6d8f6eeb | 605 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); |
02aca585 EG |
606 | goto error; |
607 | } | |
608 | } | |
609 | ||
610 | return 0; | |
611 | error: | |
612 | /*Upon error, free only if we allocated something */ | |
613 | if (alloc) | |
ae2c30bf | 614 | iwl_trans_pcie_tx_free(trans); |
02aca585 EG |
615 | return ret; |
616 | } | |
617 | ||
3e10caeb | 618 | static void iwl_set_pwr_vmain(struct iwl_trans *trans) |
392f8b78 EG |
619 | { |
620 | /* | |
621 | * (for documentation purposes) | |
622 | * to set power to V_AUX, do: | |
623 | ||
624 | if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) | |
1042db2a | 625 | iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, |
392f8b78 EG |
626 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, |
627 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
628 | */ | |
629 | ||
1042db2a | 630 | iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, |
392f8b78 EG |
631 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, |
632 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
633 | } | |
634 | ||
a6c684ee EG |
635 | /* |
636 | * Start up NIC's basic functionality after it has been reset | |
637 | * (e.g. after platform boot, or shutdown via iwl_apm_stop()) | |
638 | * NOTE: This does not load uCode nor start the embedded processor | |
639 | */ | |
640 | static int iwl_apm_init(struct iwl_trans *trans) | |
641 | { | |
642 | int ret = 0; | |
643 | IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); | |
644 | ||
645 | /* | |
646 | * Use "set_bit" below rather than "write", to preserve any hardware | |
647 | * bits already set by default after reset. | |
648 | */ | |
649 | ||
650 | /* Disable L0S exit timer (platform NMI Work/Around) */ | |
651 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, | |
652 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); | |
653 | ||
654 | /* | |
655 | * Disable L0s without affecting L1; | |
656 | * don't wait for ICH L0s (ICH bug W/A) | |
657 | */ | |
658 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, | |
659 | CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); | |
660 | ||
661 | /* Set FH wait threshold to maximum (HW error during stress W/A) */ | |
662 | iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); | |
663 | ||
664 | /* | |
665 | * Enable HAP INTA (interrupt from management bus) to | |
666 | * wake device's PCI Express link L1a -> L0s | |
667 | */ | |
668 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, | |
669 | CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); | |
670 | ||
671 | bus_apm_config(bus(trans)); | |
672 | ||
673 | /* Configure analog phase-lock-loop before activating to D0A */ | |
674 | if (cfg(trans)->base_params->pll_cfg_val) | |
675 | iwl_set_bit(trans, CSR_ANA_PLL_CFG, | |
676 | cfg(trans)->base_params->pll_cfg_val); | |
677 | ||
678 | /* | |
679 | * Set "initialization complete" bit to move adapter from | |
680 | * D0U* --> D0A* (powered-up active) state. | |
681 | */ | |
682 | iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | |
683 | ||
684 | /* | |
685 | * Wait for clock stabilization; once stabilized, access to | |
686 | * device-internal resources is supported, e.g. iwl_write_prph() | |
687 | * and accesses to uCode SRAM. | |
688 | */ | |
689 | ret = iwl_poll_bit(trans, CSR_GP_CNTRL, | |
690 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | |
691 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); | |
692 | if (ret < 0) { | |
693 | IWL_DEBUG_INFO(trans, "Failed to init the card\n"); | |
694 | goto out; | |
695 | } | |
696 | ||
697 | /* | |
698 | * Enable DMA clock and wait for it to stabilize. | |
699 | * | |
700 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits | |
701 | * do not disable clocks. This preserves any hardware bits already | |
702 | * set by default in "CLK_CTRL_REG" after reset. | |
703 | */ | |
704 | iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); | |
705 | udelay(20); | |
706 | ||
707 | /* Disable L1-Active */ | |
708 | iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, | |
709 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | |
710 | ||
711 | set_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status); | |
712 | ||
713 | out: | |
714 | return ret; | |
715 | } | |
716 | ||
6d8f6eeb | 717 | static int iwl_nic_init(struct iwl_trans *trans) |
392f8b78 EG |
718 | { |
719 | unsigned long flags; | |
720 | ||
721 | /* nic_init */ | |
6d8f6eeb | 722 | spin_lock_irqsave(&trans->shrd->lock, flags); |
a6c684ee | 723 | iwl_apm_init(trans); |
392f8b78 EG |
724 | |
725 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | |
1042db2a | 726 | iwl_write8(trans, CSR_INT_COALESCING, |
83ed9015 | 727 | IWL_HOST_INT_CALIB_TIMEOUT_DEF); |
392f8b78 | 728 | |
6d8f6eeb | 729 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
392f8b78 | 730 | |
3e10caeb | 731 | iwl_set_pwr_vmain(trans); |
392f8b78 | 732 | |
7a10e3e4 | 733 | iwl_nic_config(priv(trans)); |
392f8b78 | 734 | |
a5916977 | 735 | #ifndef CONFIG_IWLWIFI_IDI |
392f8b78 | 736 | /* Allocate the RX queue, or reset if it is already allocated */ |
6d8f6eeb | 737 | iwl_rx_init(trans); |
a5916977 | 738 | #endif |
392f8b78 EG |
739 | |
740 | /* Allocate or reset and init all Tx and Command queues */ | |
6d8f6eeb | 741 | if (iwl_tx_init(trans)) |
392f8b78 EG |
742 | return -ENOMEM; |
743 | ||
fd656935 | 744 | if (hw_params(trans).shadow_reg_enable) { |
392f8b78 | 745 | /* enable shadow regs in HW */ |
1042db2a | 746 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, |
392f8b78 EG |
747 | 0x800FFFFF); |
748 | } | |
749 | ||
6d8f6eeb | 750 | set_bit(STATUS_INIT, &trans->shrd->status); |
392f8b78 EG |
751 | |
752 | return 0; | |
753 | } | |
754 | ||
755 | #define HW_READY_TIMEOUT (50) | |
756 | ||
757 | /* Note: returns poll_bit return value, which is >= 0 if success */ | |
6d8f6eeb | 758 | static int iwl_set_hw_ready(struct iwl_trans *trans) |
392f8b78 EG |
759 | { |
760 | int ret; | |
761 | ||
1042db2a | 762 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
763 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); |
764 | ||
765 | /* See if we got it */ | |
1042db2a | 766 | ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
767 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, |
768 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
769 | HW_READY_TIMEOUT); | |
770 | ||
6d8f6eeb | 771 | IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); |
392f8b78 EG |
772 | return ret; |
773 | } | |
774 | ||
775 | /* Note: returns standard 0/-ERROR code */ | |
ebb7678d | 776 | static int iwl_prepare_card_hw(struct iwl_trans *trans) |
392f8b78 EG |
777 | { |
778 | int ret; | |
779 | ||
6d8f6eeb | 780 | IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); |
392f8b78 | 781 | |
6d8f6eeb | 782 | ret = iwl_set_hw_ready(trans); |
ebb7678d | 783 | /* If the card is ready, exit 0 */ |
392f8b78 EG |
784 | if (ret >= 0) |
785 | return 0; | |
786 | ||
787 | /* If HW is not ready, prepare the conditions to check again */ | |
1042db2a | 788 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
789 | CSR_HW_IF_CONFIG_REG_PREPARE); |
790 | ||
1042db2a | 791 | ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
792 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, |
793 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | |
794 | ||
795 | if (ret < 0) | |
796 | return ret; | |
797 | ||
798 | /* HW should be ready by now, check again. */ | |
6d8f6eeb | 799 | ret = iwl_set_hw_ready(trans); |
392f8b78 EG |
800 | if (ret >= 0) |
801 | return 0; | |
802 | return ret; | |
803 | } | |
804 | ||
e13c0c59 EG |
805 | #define IWL_AC_UNSET -1 |
806 | ||
807 | struct queue_to_fifo_ac { | |
808 | s8 fifo, ac; | |
809 | }; | |
810 | ||
811 | static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { | |
812 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
813 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
814 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
815 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
816 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
817 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
818 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
819 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
820 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
821 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
822 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
823 | }; | |
824 | ||
825 | static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { | |
826 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
827 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
828 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
829 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
830 | { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, | |
831 | { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, | |
832 | { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, | |
833 | { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, | |
834 | { IWL_TX_FIFO_BE_IPAN, 2, }, | |
835 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
836 | { IWL_TX_FIFO_AUX, IWL_AC_UNSET, }, | |
837 | }; | |
838 | ||
839 | static const u8 iwlagn_bss_ac_to_fifo[] = { | |
840 | IWL_TX_FIFO_VO, | |
841 | IWL_TX_FIFO_VI, | |
842 | IWL_TX_FIFO_BE, | |
843 | IWL_TX_FIFO_BK, | |
844 | }; | |
845 | static const u8 iwlagn_bss_ac_to_queue[] = { | |
846 | 0, 1, 2, 3, | |
847 | }; | |
848 | static const u8 iwlagn_pan_ac_to_fifo[] = { | |
849 | IWL_TX_FIFO_VO_IPAN, | |
850 | IWL_TX_FIFO_VI_IPAN, | |
851 | IWL_TX_FIFO_BE_IPAN, | |
852 | IWL_TX_FIFO_BK_IPAN, | |
853 | }; | |
854 | static const u8 iwlagn_pan_ac_to_queue[] = { | |
855 | 7, 6, 5, 4, | |
856 | }; | |
857 | ||
6d8f6eeb | 858 | static int iwl_trans_pcie_start_device(struct iwl_trans *trans) |
392f8b78 EG |
859 | { |
860 | int ret; | |
e13c0c59 EG |
861 | struct iwl_trans_pcie *trans_pcie = |
862 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
392f8b78 | 863 | |
c91bd124 | 864 | trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER; |
e13c0c59 EG |
865 | trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue; |
866 | trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue; | |
867 | ||
868 | trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo; | |
869 | trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo; | |
870 | ||
871 | trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0; | |
872 | trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE; | |
392f8b78 | 873 | |
c91bd124 | 874 | if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) && |
ebb7678d | 875 | iwl_prepare_card_hw(trans)) { |
6d8f6eeb | 876 | IWL_WARN(trans, "Exit HW not ready\n"); |
392f8b78 EG |
877 | return -EIO; |
878 | } | |
879 | ||
880 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
1042db2a | 881 | if (iwl_read32(trans, CSR_GP_CNTRL) & |
392f8b78 | 882 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) |
6d8f6eeb | 883 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); |
392f8b78 | 884 | else |
6d8f6eeb | 885 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); |
392f8b78 | 886 | |
6d8f6eeb | 887 | if (iwl_is_rfkill(trans->shrd)) { |
3e10caeb | 888 | iwl_set_hw_rfkill_state(priv(trans), true); |
6d8f6eeb | 889 | iwl_enable_interrupts(trans); |
392f8b78 EG |
890 | return -ERFKILL; |
891 | } | |
892 | ||
1042db2a | 893 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); |
392f8b78 | 894 | |
6d8f6eeb | 895 | ret = iwl_nic_init(trans); |
392f8b78 | 896 | if (ret) { |
6d8f6eeb | 897 | IWL_ERR(trans, "Unable to init nic\n"); |
392f8b78 EG |
898 | return ret; |
899 | } | |
900 | ||
901 | /* make sure rfkill handshake bits are cleared */ | |
1042db2a EG |
902 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
903 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, | |
392f8b78 EG |
904 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); |
905 | ||
906 | /* clear (again), then enable host interrupts */ | |
1042db2a | 907 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); |
6d8f6eeb | 908 | iwl_enable_interrupts(trans); |
392f8b78 EG |
909 | |
910 | /* really make sure rfkill handshake bits are cleared */ | |
1042db2a EG |
911 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
912 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
392f8b78 EG |
913 | |
914 | return 0; | |
915 | } | |
916 | ||
b3c2ce13 EG |
917 | /* |
918 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | |
10b15e6f | 919 | * must be called under priv->shrd->lock and mac access |
b3c2ce13 | 920 | */ |
6d8f6eeb | 921 | static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) |
b3c2ce13 | 922 | { |
1042db2a | 923 | iwl_write_prph(trans, SCD_TXFACT, mask); |
b3c2ce13 EG |
924 | } |
925 | ||
ed6a3803 | 926 | static void iwl_tx_start(struct iwl_trans *trans) |
b3c2ce13 EG |
927 | { |
928 | const struct queue_to_fifo_ac *queue_to_fifo; | |
105183b1 EG |
929 | struct iwl_trans_pcie *trans_pcie = |
930 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
b3c2ce13 EG |
931 | u32 a; |
932 | unsigned long flags; | |
933 | int i, chan; | |
934 | u32 reg_val; | |
935 | ||
105183b1 | 936 | spin_lock_irqsave(&trans->shrd->lock, flags); |
b3c2ce13 | 937 | |
83ed9015 | 938 | trans_pcie->scd_base_addr = |
1042db2a | 939 | iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); |
105183b1 | 940 | a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; |
b3c2ce13 | 941 | /* reset conext data memory */ |
105183b1 | 942 | for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; |
b3c2ce13 | 943 | a += 4) |
1042db2a | 944 | iwl_write_targ_mem(trans, a, 0); |
b3c2ce13 | 945 | /* reset tx status memory */ |
105183b1 | 946 | for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; |
b3c2ce13 | 947 | a += 4) |
1042db2a | 948 | iwl_write_targ_mem(trans, a, 0); |
105183b1 | 949 | for (; a < trans_pcie->scd_base_addr + |
c91bd124 | 950 | SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num); |
d6189124 | 951 | a += 4) |
1042db2a | 952 | iwl_write_targ_mem(trans, a, 0); |
b3c2ce13 | 953 | |
1042db2a | 954 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, |
105183b1 | 955 | trans_pcie->scd_bc_tbls.dma >> 10); |
b3c2ce13 EG |
956 | |
957 | /* Enable DMA channel */ | |
958 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | |
1042db2a | 959 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), |
b3c2ce13 EG |
960 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | |
961 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | |
962 | ||
963 | /* Update FH chicken bits */ | |
1042db2a EG |
964 | reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); |
965 | iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, | |
b3c2ce13 EG |
966 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); |
967 | ||
1042db2a | 968 | iwl_write_prph(trans, SCD_QUEUECHAIN_SEL, |
c91bd124 | 969 | SCD_QUEUECHAIN_SEL_ALL(trans)); |
1042db2a | 970 | iwl_write_prph(trans, SCD_AGGR_SEL, 0); |
b3c2ce13 EG |
971 | |
972 | /* initiate the queues */ | |
c91bd124 | 973 | for (i = 0; i < hw_params(trans).max_txq_num; i++) { |
1042db2a EG |
974 | iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); |
975 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); | |
976 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + | |
b3c2ce13 | 977 | SCD_CONTEXT_QUEUE_OFFSET(i), 0); |
1042db2a | 978 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + |
b3c2ce13 EG |
979 | SCD_CONTEXT_QUEUE_OFFSET(i) + |
980 | sizeof(u32), | |
981 | ((SCD_WIN_SIZE << | |
982 | SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | |
983 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | |
984 | ((SCD_FRAME_LIMIT << | |
985 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | |
986 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | |
987 | } | |
988 | ||
1042db2a | 989 | iwl_write_prph(trans, SCD_INTERRUPT_MASK, |
105183b1 | 990 | IWL_MASK(0, hw_params(trans).max_txq_num)); |
b3c2ce13 EG |
991 | |
992 | /* Activate all Tx DMA/FIFO channels */ | |
6d8f6eeb | 993 | iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); |
b3c2ce13 EG |
994 | |
995 | /* map queues to FIFOs */ | |
7a10e3e4 | 996 | if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)) |
b3c2ce13 EG |
997 | queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; |
998 | else | |
999 | queue_to_fifo = iwlagn_default_queue_to_tx_fifo; | |
1000 | ||
6d8f6eeb | 1001 | iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0); |
b3c2ce13 EG |
1002 | |
1003 | /* make sure all queue are not stopped */ | |
8ad71bef EG |
1004 | memset(&trans_pcie->queue_stopped[0], 0, |
1005 | sizeof(trans_pcie->queue_stopped)); | |
b3c2ce13 | 1006 | for (i = 0; i < 4; i++) |
8ad71bef | 1007 | atomic_set(&trans_pcie->queue_stop_count[i], 0); |
b3c2ce13 EG |
1008 | |
1009 | /* reset to 0 to enable all the queue first */ | |
8ad71bef | 1010 | trans_pcie->txq_ctx_active_msk = 0; |
b3c2ce13 | 1011 | |
effcea16 | 1012 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < |
72c04ce0 | 1013 | IWLAGN_FIRST_AMPDU_QUEUE); |
effcea16 | 1014 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) < |
72c04ce0 | 1015 | IWLAGN_FIRST_AMPDU_QUEUE); |
b3c2ce13 | 1016 | |
72c04ce0 | 1017 | for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { |
b3c2ce13 EG |
1018 | int fifo = queue_to_fifo[i].fifo; |
1019 | int ac = queue_to_fifo[i].ac; | |
1020 | ||
8ad71bef | 1021 | iwl_txq_ctx_activate(trans_pcie, i); |
b3c2ce13 EG |
1022 | |
1023 | if (fifo == IWL_TX_FIFO_UNUSED) | |
1024 | continue; | |
1025 | ||
1026 | if (ac != IWL_AC_UNSET) | |
8ad71bef EG |
1027 | iwl_set_swq_id(&trans_pcie->txq[i], ac, i); |
1028 | iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], | |
1029 | fifo, 0); | |
b3c2ce13 EG |
1030 | } |
1031 | ||
6d8f6eeb | 1032 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
b3c2ce13 EG |
1033 | |
1034 | /* Enable L1-Active */ | |
1042db2a | 1035 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, |
b3c2ce13 EG |
1036 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
1037 | } | |
1038 | ||
ed6a3803 EG |
1039 | static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) |
1040 | { | |
1041 | iwl_reset_ict(trans); | |
1042 | iwl_tx_start(trans); | |
1043 | } | |
1044 | ||
c170b867 EG |
1045 | /** |
1046 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | |
1047 | */ | |
6d8f6eeb | 1048 | static int iwl_trans_tx_stop(struct iwl_trans *trans) |
c170b867 EG |
1049 | { |
1050 | int ch, txq_id; | |
1051 | unsigned long flags; | |
8ad71bef | 1052 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
c170b867 EG |
1053 | |
1054 | /* Turn off all Tx DMA fifos */ | |
6d8f6eeb | 1055 | spin_lock_irqsave(&trans->shrd->lock, flags); |
c170b867 | 1056 | |
6d8f6eeb | 1057 | iwl_trans_txq_set_sched(trans, 0); |
c170b867 EG |
1058 | |
1059 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
02f6f659 | 1060 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { |
1042db2a | 1061 | iwl_write_direct32(trans, |
6d8f6eeb | 1062 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); |
1042db2a | 1063 | if (iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, |
c170b867 EG |
1064 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
1065 | 1000)) | |
6d8f6eeb | 1066 | IWL_ERR(trans, "Failing on timeout while stopping" |
c170b867 | 1067 | " DMA channel %d [0x%08x]", ch, |
1042db2a | 1068 | iwl_read_direct32(trans, |
6d8f6eeb | 1069 | FH_TSSR_TX_STATUS_REG)); |
c170b867 | 1070 | } |
6d8f6eeb | 1071 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
c170b867 | 1072 | |
8ad71bef | 1073 | if (!trans_pcie->txq) { |
6d8f6eeb | 1074 | IWL_WARN(trans, "Stopping tx queues that aren't allocated..."); |
c170b867 EG |
1075 | return 0; |
1076 | } | |
1077 | ||
1078 | /* Unmap DMA from host system and free skb's */ | |
6d8f6eeb EG |
1079 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) |
1080 | iwl_tx_queue_unmap(trans, txq_id); | |
c170b867 EG |
1081 | |
1082 | return 0; | |
1083 | } | |
1084 | ||
43e58856 | 1085 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) |
ae2c30bf EG |
1086 | { |
1087 | unsigned long flags; | |
43e58856 | 1088 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ae2c30bf | 1089 | |
43e58856 | 1090 | /* tell the device to stop sending interrupts */ |
ae2c30bf EG |
1091 | spin_lock_irqsave(&trans->shrd->lock, flags); |
1092 | iwl_disable_interrupts(trans); | |
1093 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
1094 | ||
ab6cf8e8 | 1095 | /* device going down, Stop using ICT table */ |
6d8f6eeb | 1096 | iwl_disable_ict(trans); |
ab6cf8e8 EG |
1097 | |
1098 | /* | |
1099 | * If a HW restart happens during firmware loading, | |
1100 | * then the firmware loading might call this function | |
1101 | * and later it might be called again due to the | |
1102 | * restart. So don't process again if the device is | |
1103 | * already dead. | |
1104 | */ | |
6d8f6eeb EG |
1105 | if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) { |
1106 | iwl_trans_tx_stop(trans); | |
a5916977 | 1107 | #ifndef CONFIG_IWLWIFI_IDI |
6d8f6eeb | 1108 | iwl_trans_rx_stop(trans); |
a5916977 | 1109 | #endif |
ab6cf8e8 | 1110 | /* Power-down device's busmaster DMA clocks */ |
1042db2a | 1111 | iwl_write_prph(trans, APMG_CLK_DIS_REG, |
ab6cf8e8 EG |
1112 | APMG_CLK_VAL_DMA_CLK_RQT); |
1113 | udelay(5); | |
1114 | } | |
1115 | ||
1116 | /* Make sure (redundant) we've released our request to stay awake */ | |
1042db2a | 1117 | iwl_clear_bit(trans, CSR_GP_CNTRL, |
6d8f6eeb | 1118 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
ab6cf8e8 EG |
1119 | |
1120 | /* Stop the device, and put it in low power state */ | |
6d8f6eeb | 1121 | iwl_apm_stop(priv(trans)); |
43e58856 EG |
1122 | |
1123 | /* Upon stop, the APM issues an interrupt if HW RF kill is set. | |
1124 | * Clean again the interrupt here | |
1125 | */ | |
1126 | spin_lock_irqsave(&trans->shrd->lock, flags); | |
1127 | iwl_disable_interrupts(trans); | |
1128 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
1129 | ||
1130 | /* wait to make sure we flush pending tasklet*/ | |
a42a1844 | 1131 | synchronize_irq(trans->irq); |
43e58856 EG |
1132 | tasklet_kill(&trans_pcie->irq_tasklet); |
1133 | ||
1134 | /* stop and reset the on-board processor */ | |
1042db2a | 1135 | iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); |
ab6cf8e8 EG |
1136 | } |
1137 | ||
e13c0c59 | 1138 | static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
14991a9d | 1139 | struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, |
34b5321e | 1140 | u8 sta_id, u8 tid) |
47c1b496 | 1141 | { |
e13c0c59 EG |
1142 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1143 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
1144 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
132f98c2 | 1145 | struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; |
47c1b496 | 1146 | struct iwl_cmd_meta *out_meta; |
e13c0c59 EG |
1147 | struct iwl_tx_queue *txq; |
1148 | struct iwl_queue *q; | |
47c1b496 EG |
1149 | |
1150 | dma_addr_t phys_addr = 0; | |
1151 | dma_addr_t txcmd_phys; | |
1152 | dma_addr_t scratch_phys; | |
1153 | u16 len, firstlen, secondlen; | |
1154 | u8 wait_write_ptr = 0; | |
e13c0c59 | 1155 | u8 txq_id; |
e13c0c59 EG |
1156 | bool is_agg = false; |
1157 | __le16 fc = hdr->frame_control; | |
47c1b496 | 1158 | u8 hdr_len = ieee80211_hdrlen(fc); |
631b84c5 | 1159 | u16 __maybe_unused wifi_seq; |
47c1b496 | 1160 | |
e13c0c59 EG |
1161 | /* |
1162 | * Send this frame after DTIM -- there's a special queue | |
1163 | * reserved for this for contexts that support AP mode. | |
1164 | */ | |
1165 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { | |
1166 | txq_id = trans_pcie->mcast_queue[ctx]; | |
1167 | ||
1168 | /* | |
1169 | * The microcode will clear the more data | |
1170 | * bit in the last frame it transmits. | |
1171 | */ | |
1172 | hdr->frame_control |= | |
1173 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | |
1174 | } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) | |
1175 | txq_id = IWL_AUX_QUEUE; | |
1176 | else | |
1177 | txq_id = | |
1178 | trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)]; | |
1179 | ||
97756fb1 EG |
1180 | /* aggregation is on for this <sta,tid> */ |
1181 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { | |
1182 | WARN_ON(tid >= IWL_MAX_TID_COUNT); | |
1183 | txq_id = trans_pcie->agg_txq[sta_id][tid]; | |
1184 | is_agg = true; | |
e13c0c59 EG |
1185 | } |
1186 | ||
8ad71bef | 1187 | txq = &trans_pcie->txq[txq_id]; |
e13c0c59 EG |
1188 | q = &txq->q; |
1189 | ||
631b84c5 EG |
1190 | /* In AGG mode, the index in the ring must correspond to the WiFi |
1191 | * sequence number. This is a HW requirements to help the SCD to parse | |
1192 | * the BA. | |
1193 | * Check here that the packets are in the right place on the ring. | |
1194 | */ | |
1195 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1196 | wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | |
1197 | WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr), | |
1198 | "Q: %d WiFi Seq %d tfdNum %d", | |
1199 | txq_id, wifi_seq, q->write_ptr); | |
1200 | #endif | |
1201 | ||
47c1b496 | 1202 | /* Set up driver data for this TFD */ |
2c452297 | 1203 | txq->skbs[q->write_ptr] = skb; |
dfa2bdba EG |
1204 | txq->cmd[q->write_ptr] = dev_cmd; |
1205 | ||
1206 | dev_cmd->hdr.cmd = REPLY_TX; | |
1207 | dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
1208 | INDEX_TO_SEQ(q->write_ptr))); | |
47c1b496 EG |
1209 | |
1210 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
1211 | out_meta = &txq->meta[q->write_ptr]; | |
1212 | ||
1213 | /* | |
1214 | * Use the first empty entry in this queue's command buffer array | |
1215 | * to contain the Tx command and MAC header concatenated together | |
1216 | * (payload data will be in another buffer). | |
1217 | * Size of this varies, due to varying MAC header length. | |
1218 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
1219 | * of the MAC header (device reads on dword boundaries). | |
1220 | * We'll tell device about this padding later. | |
1221 | */ | |
1222 | len = sizeof(struct iwl_tx_cmd) + | |
1223 | sizeof(struct iwl_cmd_header) + hdr_len; | |
1224 | firstlen = (len + 3) & ~3; | |
1225 | ||
1226 | /* Tell NIC about any 2-byte padding after MAC header */ | |
1227 | if (firstlen != len) | |
1228 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
1229 | ||
1230 | /* Physical address of this Tx command's header (not MAC header!), | |
1231 | * within command buffer array. */ | |
1042db2a | 1232 | txcmd_phys = dma_map_single(trans->dev, |
47c1b496 EG |
1233 | &dev_cmd->hdr, firstlen, |
1234 | DMA_BIDIRECTIONAL); | |
1042db2a | 1235 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) |
47c1b496 EG |
1236 | return -1; |
1237 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | |
1238 | dma_unmap_len_set(out_meta, len, firstlen); | |
1239 | ||
1240 | if (!ieee80211_has_morefrags(fc)) { | |
1241 | txq->need_update = 1; | |
1242 | } else { | |
1243 | wait_write_ptr = 1; | |
1244 | txq->need_update = 0; | |
1245 | } | |
1246 | ||
1247 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
1248 | * if any (802.11 null frames have no payload). */ | |
1249 | secondlen = skb->len - hdr_len; | |
1250 | if (secondlen > 0) { | |
1042db2a | 1251 | phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, |
47c1b496 | 1252 | secondlen, DMA_TO_DEVICE); |
1042db2a EG |
1253 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { |
1254 | dma_unmap_single(trans->dev, | |
47c1b496 EG |
1255 | dma_unmap_addr(out_meta, mapping), |
1256 | dma_unmap_len(out_meta, len), | |
1257 | DMA_BIDIRECTIONAL); | |
1258 | return -1; | |
1259 | } | |
1260 | } | |
1261 | ||
1262 | /* Attach buffers to TFD */ | |
e13c0c59 | 1263 | iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); |
47c1b496 | 1264 | if (secondlen > 0) |
e13c0c59 | 1265 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, |
47c1b496 EG |
1266 | secondlen, 0); |
1267 | ||
1268 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | |
1269 | offsetof(struct iwl_tx_cmd, scratch); | |
1270 | ||
1271 | /* take back ownership of DMA buffer to enable update */ | |
1042db2a | 1272 | dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1273 | DMA_BIDIRECTIONAL); |
1274 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | |
1275 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | |
1276 | ||
e13c0c59 | 1277 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", |
47c1b496 | 1278 | le16_to_cpu(dev_cmd->hdr.sequence)); |
e13c0c59 EG |
1279 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); |
1280 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | |
1281 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
47c1b496 EG |
1282 | |
1283 | /* Set up entry for this TFD in Tx byte-count array */ | |
96f1f05a | 1284 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); |
47c1b496 | 1285 | |
1042db2a | 1286 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1287 | DMA_BIDIRECTIONAL); |
1288 | ||
e13c0c59 | 1289 | trace_iwlwifi_dev_tx(priv(trans), |
47c1b496 EG |
1290 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], |
1291 | sizeof(struct iwl_tfd), | |
1292 | &dev_cmd->hdr, firstlen, | |
1293 | skb->data + hdr_len, secondlen); | |
1294 | ||
1295 | /* Tell device the write index *just past* this latest filled TFD */ | |
1296 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
e13c0c59 EG |
1297 | iwl_txq_update_write_ptr(trans, txq); |
1298 | ||
47c1b496 EG |
1299 | /* |
1300 | * At this point the frame is "transmitted" successfully | |
1301 | * and we will get a TX status notification eventually, | |
1302 | * regardless of the value of ret. "ret" only indicates | |
1303 | * whether or not we should update the write pointer. | |
1304 | */ | |
a0eaad71 | 1305 | if (iwl_queue_space(q) < q->high_mark) { |
47c1b496 EG |
1306 | if (wait_write_ptr) { |
1307 | txq->need_update = 1; | |
e13c0c59 | 1308 | iwl_txq_update_write_ptr(trans, txq); |
47c1b496 | 1309 | } else { |
81a3de1c | 1310 | iwl_stop_queue(trans, txq, "Queue is full"); |
47c1b496 EG |
1311 | } |
1312 | } | |
1313 | return 0; | |
1314 | } | |
1315 | ||
6d8f6eeb | 1316 | static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans) |
56d90f4c EG |
1317 | { |
1318 | /* Remove all resets to allow NIC to operate */ | |
1042db2a | 1319 | iwl_write32(trans, CSR_RESET, 0); |
56d90f4c EG |
1320 | } |
1321 | ||
57a1dc89 | 1322 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
e6bb4c9c | 1323 | { |
5a878bf6 EG |
1324 | struct iwl_trans_pcie *trans_pcie = |
1325 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
e6bb4c9c EG |
1326 | int err; |
1327 | ||
0c325769 EG |
1328 | trans_pcie->inta_mask = CSR_INI_SET_MASK; |
1329 | ||
57a1dc89 EG |
1330 | if (!trans_pcie->irq_requested) { |
1331 | tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) | |
1332 | iwl_irq_tasklet, (unsigned long)trans); | |
e6bb4c9c | 1333 | |
57a1dc89 | 1334 | iwl_alloc_isr_ict(trans); |
e6bb4c9c | 1335 | |
57a1dc89 EG |
1336 | err = request_irq(trans->irq, iwl_isr_ict, IRQF_SHARED, |
1337 | DRV_NAME, trans); | |
1338 | if (err) { | |
1339 | IWL_ERR(trans, "Error allocating IRQ %d\n", | |
1340 | trans->irq); | |
ebb7678d | 1341 | goto error; |
57a1dc89 EG |
1342 | } |
1343 | ||
1344 | INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); | |
1345 | trans_pcie->irq_requested = true; | |
e6bb4c9c EG |
1346 | } |
1347 | ||
ebb7678d EG |
1348 | err = iwl_prepare_card_hw(trans); |
1349 | if (err) { | |
1350 | IWL_ERR(trans, "Error while preparing HW: %d", err); | |
1351 | goto error; | |
1352 | } | |
a6c684ee EG |
1353 | |
1354 | iwl_apm_init(trans); | |
1355 | ||
ebb7678d EG |
1356 | return err; |
1357 | ||
1358 | error: | |
1359 | iwl_free_isr_ict(trans); | |
1360 | tasklet_kill(&trans_pcie->irq_tasklet); | |
1361 | return err; | |
e6bb4c9c EG |
1362 | } |
1363 | ||
76bc10fc | 1364 | static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, |
464021ff EG |
1365 | int txq_id, int ssn, u32 status, |
1366 | struct sk_buff_head *skbs) | |
1367 | { | |
8ad71bef EG |
1368 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1369 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
a0eaad71 EG |
1370 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ |
1371 | int tfd_num = ssn & (txq->q.n_bd - 1); | |
464021ff | 1372 | int freed = 0; |
a0eaad71 | 1373 | |
8ad71bef EG |
1374 | txq->time_stamp = jiffies; |
1375 | ||
76bc10fc EG |
1376 | if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && |
1377 | txq_id != trans_pcie->agg_txq[sta_id][tid])) { | |
1378 | /* | |
1379 | * FIXME: this is a uCode bug which need to be addressed, | |
1380 | * log the information and return for now. | |
1381 | * Since it is can possibly happen very often and in order | |
1382 | * not to fill the syslog, don't use IWL_ERR or IWL_WARN | |
1383 | */ | |
1384 | IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " | |
1385 | "agg_txq[sta_id[tid] %d", txq_id, | |
1386 | trans_pcie->agg_txq[sta_id][tid]); | |
1387 | return 1; | |
a0eaad71 EG |
1388 | } |
1389 | ||
1390 | if (txq->q.read_ptr != tfd_num) { | |
1daf04b8 EG |
1391 | IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", |
1392 | txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, | |
1393 | tfd_num, ssn); | |
464021ff | 1394 | freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); |
1ba42da4 EG |
1395 | if (iwl_queue_space(&txq->q) > txq->q.low_mark && |
1396 | (!txq->sched_retry || | |
1397 | status != TX_STATUS_FAIL_PASSIVE_NO_RX)) | |
81a3de1c | 1398 | iwl_wake_queue(trans, txq, "Packets reclaimed"); |
a0eaad71 | 1399 | } |
76bc10fc | 1400 | return 0; |
a0eaad71 EG |
1401 | } |
1402 | ||
03905495 EG |
1403 | static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) |
1404 | { | |
1405 | iowrite8(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | |
1406 | } | |
1407 | ||
1408 | static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) | |
1409 | { | |
1410 | iowrite32(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | |
1411 | } | |
1412 | ||
1413 | static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) | |
1414 | { | |
1415 | u32 val = ioread32(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | |
1416 | return val; | |
1417 | } | |
1418 | ||
6d8f6eeb | 1419 | static void iwl_trans_pcie_free(struct iwl_trans *trans) |
34c1b7ba | 1420 | { |
a42a1844 EG |
1421 | struct iwl_trans_pcie *trans_pcie = |
1422 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1423 | ||
45c30dba | 1424 | iwl_calib_free_results(trans); |
ae2c30bf | 1425 | iwl_trans_pcie_tx_free(trans); |
a5916977 | 1426 | #ifndef CONFIG_IWLWIFI_IDI |
ae2c30bf | 1427 | iwl_trans_pcie_rx_free(trans); |
a5916977 | 1428 | #endif |
57a1dc89 EG |
1429 | if (trans_pcie->irq_requested == true) { |
1430 | free_irq(trans->irq, trans); | |
1431 | iwl_free_isr_ict(trans); | |
1432 | } | |
a42a1844 EG |
1433 | |
1434 | pci_disable_msi(trans_pcie->pci_dev); | |
1435 | pci_iounmap(trans_pcie->pci_dev, trans_pcie->hw_base); | |
1436 | pci_release_regions(trans_pcie->pci_dev); | |
1437 | pci_disable_device(trans_pcie->pci_dev); | |
1438 | ||
6d8f6eeb EG |
1439 | trans->shrd->trans = NULL; |
1440 | kfree(trans); | |
34c1b7ba EG |
1441 | } |
1442 | ||
c01a4047 | 1443 | #ifdef CONFIG_PM_SLEEP |
57210f7c EG |
1444 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) |
1445 | { | |
1446 | /* | |
1447 | * This function is called when system goes into suspend state | |
ade4c649 WYG |
1448 | * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend |
1449 | * function first but since iwlagn_mac_stop() has no knowledge of | |
1450 | * who the caller is, | |
57210f7c EG |
1451 | * it will not call apm_ops.stop() to stop the DMA operation. |
1452 | * Calling apm_ops.stop here to make sure we stop the DMA. | |
1453 | * | |
1454 | * But of course ... if we have configured WoWLAN then we did other | |
1455 | * things already :-) | |
1456 | */ | |
d36120c6 | 1457 | if (!trans->shrd->wowlan) { |
57210f7c | 1458 | iwl_apm_stop(priv(trans)); |
d36120c6 JB |
1459 | } else { |
1460 | iwl_disable_interrupts(trans); | |
1042db2a | 1461 | iwl_clear_bit(trans, CSR_GP_CNTRL, |
d36120c6 JB |
1462 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
1463 | } | |
57210f7c EG |
1464 | |
1465 | return 0; | |
1466 | } | |
1467 | ||
1468 | static int iwl_trans_pcie_resume(struct iwl_trans *trans) | |
1469 | { | |
1470 | bool hw_rfkill = false; | |
1471 | ||
0c325769 | 1472 | iwl_enable_interrupts(trans); |
57210f7c | 1473 | |
1042db2a | 1474 | if (!(iwl_read32(trans, CSR_GP_CNTRL) & |
57210f7c EG |
1475 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) |
1476 | hw_rfkill = true; | |
1477 | ||
1478 | if (hw_rfkill) | |
1479 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | |
1480 | else | |
1481 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | |
1482 | ||
3e10caeb | 1483 | iwl_set_hw_rfkill_state(priv(trans), hw_rfkill); |
57210f7c EG |
1484 | |
1485 | return 0; | |
1486 | } | |
c01a4047 | 1487 | #endif /* CONFIG_PM_SLEEP */ |
57210f7c | 1488 | |
e13c0c59 | 1489 | static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, |
81a3de1c EG |
1490 | enum iwl_rxon_context_id ctx, |
1491 | const char *msg) | |
e13c0c59 EG |
1492 | { |
1493 | u8 ac, txq_id; | |
1494 | struct iwl_trans_pcie *trans_pcie = | |
1495 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1496 | ||
1497 | for (ac = 0; ac < AC_NUM; ac++) { | |
1498 | txq_id = trans_pcie->ac_to_queue[ctx][ac]; | |
81a3de1c | 1499 | IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n", |
e13c0c59 | 1500 | ac, |
8ad71bef | 1501 | (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0) |
e13c0c59 | 1502 | ? "stopped" : "awake"); |
81a3de1c | 1503 | iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg); |
e13c0c59 EG |
1504 | } |
1505 | } | |
1506 | ||
81a3de1c EG |
1507 | static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id, |
1508 | const char *msg) | |
e20d4341 | 1509 | { |
8ad71bef EG |
1510 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1511 | ||
81a3de1c | 1512 | iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg); |
e20d4341 EG |
1513 | } |
1514 | ||
5f178cd2 EG |
1515 | #define IWL_FLUSH_WAIT_MS 2000 |
1516 | ||
1517 | static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) | |
1518 | { | |
8ad71bef | 1519 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
5f178cd2 EG |
1520 | struct iwl_tx_queue *txq; |
1521 | struct iwl_queue *q; | |
1522 | int cnt; | |
1523 | unsigned long now = jiffies; | |
1524 | int ret = 0; | |
1525 | ||
1526 | /* waiting for all the tx frames complete might take a while */ | |
1527 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { | |
1528 | if (cnt == trans->shrd->cmd_queue) | |
1529 | continue; | |
8ad71bef | 1530 | txq = &trans_pcie->txq[cnt]; |
5f178cd2 EG |
1531 | q = &txq->q; |
1532 | while (q->read_ptr != q->write_ptr && !time_after(jiffies, | |
1533 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) | |
1534 | msleep(1); | |
1535 | ||
1536 | if (q->read_ptr != q->write_ptr) { | |
1537 | IWL_ERR(trans, "fail to flush all tx fifo queues\n"); | |
1538 | ret = -ETIMEDOUT; | |
1539 | break; | |
1540 | } | |
1541 | } | |
1542 | return ret; | |
1543 | } | |
1544 | ||
f22be624 EG |
1545 | /* |
1546 | * On every watchdog tick we check (latest) time stamp. If it does not | |
1547 | * change during timeout period and queue is not empty we reset firmware. | |
1548 | */ | |
1549 | static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) | |
1550 | { | |
8ad71bef EG |
1551 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1552 | struct iwl_tx_queue *txq = &trans_pcie->txq[cnt]; | |
f22be624 EG |
1553 | struct iwl_queue *q = &txq->q; |
1554 | unsigned long timeout; | |
1555 | ||
1556 | if (q->read_ptr == q->write_ptr) { | |
1557 | txq->time_stamp = jiffies; | |
1558 | return 0; | |
1559 | } | |
1560 | ||
1561 | timeout = txq->time_stamp + | |
1562 | msecs_to_jiffies(hw_params(trans).wd_timeout); | |
1563 | ||
1564 | if (time_after(jiffies, timeout)) { | |
1565 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, | |
1566 | hw_params(trans).wd_timeout); | |
08d1700d | 1567 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", |
05f8a09f | 1568 | q->read_ptr, q->write_ptr); |
08d1700d | 1569 | IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", |
1042db2a | 1570 | iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) |
08d1700d | 1571 | & (TFD_QUEUE_SIZE_MAX - 1), |
1042db2a | 1572 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); |
f22be624 EG |
1573 | return 1; |
1574 | } | |
1575 | ||
1576 | return 0; | |
1577 | } | |
1578 | ||
ff620849 EG |
1579 | static const char *get_fh_string(int cmd) |
1580 | { | |
1581 | switch (cmd) { | |
1582 | IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); | |
1583 | IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); | |
1584 | IWL_CMD(FH_RSCSR_CHNL0_WPTR); | |
1585 | IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); | |
1586 | IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); | |
1587 | IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); | |
1588 | IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); | |
1589 | IWL_CMD(FH_TSSR_TX_STATUS_REG); | |
1590 | IWL_CMD(FH_TSSR_TX_ERROR_REG); | |
1591 | default: | |
1592 | return "UNKNOWN"; | |
1593 | } | |
1594 | } | |
1595 | ||
1596 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) | |
1597 | { | |
1598 | int i; | |
1599 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1600 | int pos = 0; | |
1601 | size_t bufsz = 0; | |
1602 | #endif | |
1603 | static const u32 fh_tbl[] = { | |
1604 | FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
1605 | FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
1606 | FH_RSCSR_CHNL0_WPTR, | |
1607 | FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
1608 | FH_MEM_RSSR_SHARED_CTRL_REG, | |
1609 | FH_MEM_RSSR_RX_STATUS_REG, | |
1610 | FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, | |
1611 | FH_TSSR_TX_STATUS_REG, | |
1612 | FH_TSSR_TX_ERROR_REG | |
1613 | }; | |
1614 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1615 | if (display) { | |
1616 | bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; | |
1617 | *buf = kmalloc(bufsz, GFP_KERNEL); | |
1618 | if (!*buf) | |
1619 | return -ENOMEM; | |
1620 | pos += scnprintf(*buf + pos, bufsz - pos, | |
1621 | "FH register values:\n"); | |
1622 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | |
1623 | pos += scnprintf(*buf + pos, bufsz - pos, | |
1624 | " %34s: 0X%08x\n", | |
1625 | get_fh_string(fh_tbl[i]), | |
1042db2a | 1626 | iwl_read_direct32(trans, fh_tbl[i])); |
ff620849 EG |
1627 | } |
1628 | return pos; | |
1629 | } | |
1630 | #endif | |
1631 | IWL_ERR(trans, "FH register values:\n"); | |
1632 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | |
1633 | IWL_ERR(trans, " %34s: 0X%08x\n", | |
1634 | get_fh_string(fh_tbl[i]), | |
1042db2a | 1635 | iwl_read_direct32(trans, fh_tbl[i])); |
ff620849 EG |
1636 | } |
1637 | return 0; | |
1638 | } | |
1639 | ||
1640 | static const char *get_csr_string(int cmd) | |
1641 | { | |
1642 | switch (cmd) { | |
1643 | IWL_CMD(CSR_HW_IF_CONFIG_REG); | |
1644 | IWL_CMD(CSR_INT_COALESCING); | |
1645 | IWL_CMD(CSR_INT); | |
1646 | IWL_CMD(CSR_INT_MASK); | |
1647 | IWL_CMD(CSR_FH_INT_STATUS); | |
1648 | IWL_CMD(CSR_GPIO_IN); | |
1649 | IWL_CMD(CSR_RESET); | |
1650 | IWL_CMD(CSR_GP_CNTRL); | |
1651 | IWL_CMD(CSR_HW_REV); | |
1652 | IWL_CMD(CSR_EEPROM_REG); | |
1653 | IWL_CMD(CSR_EEPROM_GP); | |
1654 | IWL_CMD(CSR_OTP_GP_REG); | |
1655 | IWL_CMD(CSR_GIO_REG); | |
1656 | IWL_CMD(CSR_GP_UCODE_REG); | |
1657 | IWL_CMD(CSR_GP_DRIVER_REG); | |
1658 | IWL_CMD(CSR_UCODE_DRV_GP1); | |
1659 | IWL_CMD(CSR_UCODE_DRV_GP2); | |
1660 | IWL_CMD(CSR_LED_REG); | |
1661 | IWL_CMD(CSR_DRAM_INT_TBL_REG); | |
1662 | IWL_CMD(CSR_GIO_CHICKEN_BITS); | |
1663 | IWL_CMD(CSR_ANA_PLL_CFG); | |
1664 | IWL_CMD(CSR_HW_REV_WA_REG); | |
1665 | IWL_CMD(CSR_DBG_HPET_MEM_REG); | |
1666 | default: | |
1667 | return "UNKNOWN"; | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | void iwl_dump_csr(struct iwl_trans *trans) | |
1672 | { | |
1673 | int i; | |
1674 | static const u32 csr_tbl[] = { | |
1675 | CSR_HW_IF_CONFIG_REG, | |
1676 | CSR_INT_COALESCING, | |
1677 | CSR_INT, | |
1678 | CSR_INT_MASK, | |
1679 | CSR_FH_INT_STATUS, | |
1680 | CSR_GPIO_IN, | |
1681 | CSR_RESET, | |
1682 | CSR_GP_CNTRL, | |
1683 | CSR_HW_REV, | |
1684 | CSR_EEPROM_REG, | |
1685 | CSR_EEPROM_GP, | |
1686 | CSR_OTP_GP_REG, | |
1687 | CSR_GIO_REG, | |
1688 | CSR_GP_UCODE_REG, | |
1689 | CSR_GP_DRIVER_REG, | |
1690 | CSR_UCODE_DRV_GP1, | |
1691 | CSR_UCODE_DRV_GP2, | |
1692 | CSR_LED_REG, | |
1693 | CSR_DRAM_INT_TBL_REG, | |
1694 | CSR_GIO_CHICKEN_BITS, | |
1695 | CSR_ANA_PLL_CFG, | |
1696 | CSR_HW_REV_WA_REG, | |
1697 | CSR_DBG_HPET_MEM_REG | |
1698 | }; | |
1699 | IWL_ERR(trans, "CSR values:\n"); | |
1700 | IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " | |
1701 | "CSR_INT_PERIODIC_REG)\n"); | |
1702 | for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { | |
1703 | IWL_ERR(trans, " %25s: 0X%08x\n", | |
1704 | get_csr_string(csr_tbl[i]), | |
1042db2a | 1705 | iwl_read32(trans, csr_tbl[i])); |
ff620849 EG |
1706 | } |
1707 | } | |
1708 | ||
87e5666c EG |
1709 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
1710 | /* create and remove of files */ | |
1711 | #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ | |
5a878bf6 | 1712 | if (!debugfs_create_file(#name, mode, parent, trans, \ |
87e5666c EG |
1713 | &iwl_dbgfs_##name##_ops)) \ |
1714 | return -ENOMEM; \ | |
1715 | } while (0) | |
1716 | ||
1717 | /* file operation */ | |
1718 | #define DEBUGFS_READ_FUNC(name) \ | |
1719 | static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ | |
1720 | char __user *user_buf, \ | |
1721 | size_t count, loff_t *ppos); | |
1722 | ||
1723 | #define DEBUGFS_WRITE_FUNC(name) \ | |
1724 | static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ | |
1725 | const char __user *user_buf, \ | |
1726 | size_t count, loff_t *ppos); | |
1727 | ||
1728 | ||
1729 | static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file) | |
1730 | { | |
1731 | file->private_data = inode->i_private; | |
1732 | return 0; | |
1733 | } | |
1734 | ||
1735 | #define DEBUGFS_READ_FILE_OPS(name) \ | |
1736 | DEBUGFS_READ_FUNC(name); \ | |
1737 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1738 | .read = iwl_dbgfs_##name##_read, \ | |
1739 | .open = iwl_dbgfs_open_file_generic, \ | |
1740 | .llseek = generic_file_llseek, \ | |
1741 | }; | |
1742 | ||
16db88ba EG |
1743 | #define DEBUGFS_WRITE_FILE_OPS(name) \ |
1744 | DEBUGFS_WRITE_FUNC(name); \ | |
1745 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1746 | .write = iwl_dbgfs_##name##_write, \ | |
1747 | .open = iwl_dbgfs_open_file_generic, \ | |
1748 | .llseek = generic_file_llseek, \ | |
1749 | }; | |
1750 | ||
87e5666c EG |
1751 | #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ |
1752 | DEBUGFS_READ_FUNC(name); \ | |
1753 | DEBUGFS_WRITE_FUNC(name); \ | |
1754 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1755 | .write = iwl_dbgfs_##name##_write, \ | |
1756 | .read = iwl_dbgfs_##name##_read, \ | |
1757 | .open = iwl_dbgfs_open_file_generic, \ | |
1758 | .llseek = generic_file_llseek, \ | |
1759 | }; | |
1760 | ||
87e5666c EG |
1761 | static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, |
1762 | char __user *user_buf, | |
8ad71bef EG |
1763 | size_t count, loff_t *ppos) |
1764 | { | |
5a878bf6 | 1765 | struct iwl_trans *trans = file->private_data; |
8ad71bef | 1766 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
87e5666c EG |
1767 | struct iwl_tx_queue *txq; |
1768 | struct iwl_queue *q; | |
1769 | char *buf; | |
1770 | int pos = 0; | |
1771 | int cnt; | |
1772 | int ret; | |
fd656935 | 1773 | const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num; |
87e5666c | 1774 | |
8ad71bef | 1775 | if (!trans_pcie->txq) { |
3e10caeb | 1776 | IWL_ERR(trans, "txq not ready\n"); |
87e5666c EG |
1777 | return -EAGAIN; |
1778 | } | |
1779 | buf = kzalloc(bufsz, GFP_KERNEL); | |
1780 | if (!buf) | |
1781 | return -ENOMEM; | |
1782 | ||
5a878bf6 | 1783 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { |
8ad71bef | 1784 | txq = &trans_pcie->txq[cnt]; |
87e5666c EG |
1785 | q = &txq->q; |
1786 | pos += scnprintf(buf + pos, bufsz - pos, | |
1787 | "hwq %.2d: read=%u write=%u stop=%d" | |
1788 | " swq_id=%#.2x (ac %d/hwq %d)\n", | |
1789 | cnt, q->read_ptr, q->write_ptr, | |
8ad71bef | 1790 | !!test_bit(cnt, trans_pcie->queue_stopped), |
87e5666c EG |
1791 | txq->swq_id, txq->swq_id & 3, |
1792 | (txq->swq_id >> 2) & 0x1f); | |
1793 | if (cnt >= 4) | |
1794 | continue; | |
1795 | /* for the ACs, display the stop count too */ | |
1796 | pos += scnprintf(buf + pos, bufsz - pos, | |
8ad71bef EG |
1797 | " stop-count: %d\n", |
1798 | atomic_read(&trans_pcie->queue_stop_count[cnt])); | |
87e5666c EG |
1799 | } |
1800 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1801 | kfree(buf); | |
1802 | return ret; | |
1803 | } | |
1804 | ||
1805 | static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, | |
1806 | char __user *user_buf, | |
1807 | size_t count, loff_t *ppos) { | |
5a878bf6 EG |
1808 | struct iwl_trans *trans = file->private_data; |
1809 | struct iwl_trans_pcie *trans_pcie = | |
1810 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1811 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
87e5666c EG |
1812 | char buf[256]; |
1813 | int pos = 0; | |
1814 | const size_t bufsz = sizeof(buf); | |
1815 | ||
1816 | pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", | |
1817 | rxq->read); | |
1818 | pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", | |
1819 | rxq->write); | |
1820 | pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", | |
1821 | rxq->free_count); | |
1822 | if (rxq->rb_stts) { | |
1823 | pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", | |
1824 | le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); | |
1825 | } else { | |
1826 | pos += scnprintf(buf + pos, bufsz - pos, | |
1827 | "closed_rb_num: Not Allocated\n"); | |
1828 | } | |
1829 | return simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1830 | } | |
1831 | ||
7ff94706 EG |
1832 | static ssize_t iwl_dbgfs_log_event_read(struct file *file, |
1833 | char __user *user_buf, | |
1834 | size_t count, loff_t *ppos) | |
1835 | { | |
1836 | struct iwl_trans *trans = file->private_data; | |
1837 | char *buf; | |
1838 | int pos = 0; | |
1839 | ssize_t ret = -ENOMEM; | |
1840 | ||
6bb78847 | 1841 | ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true); |
7ff94706 EG |
1842 | if (buf) { |
1843 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1844 | kfree(buf); | |
1845 | } | |
1846 | return ret; | |
1847 | } | |
1848 | ||
1849 | static ssize_t iwl_dbgfs_log_event_write(struct file *file, | |
1850 | const char __user *user_buf, | |
1851 | size_t count, loff_t *ppos) | |
1852 | { | |
1853 | struct iwl_trans *trans = file->private_data; | |
1854 | u32 event_log_flag; | |
1855 | char buf[8]; | |
1856 | int buf_size; | |
1857 | ||
1858 | memset(buf, 0, sizeof(buf)); | |
1859 | buf_size = min(count, sizeof(buf) - 1); | |
1860 | if (copy_from_user(buf, user_buf, buf_size)) | |
1861 | return -EFAULT; | |
1862 | if (sscanf(buf, "%d", &event_log_flag) != 1) | |
1863 | return -EFAULT; | |
1864 | if (event_log_flag == 1) | |
6bb78847 | 1865 | iwl_dump_nic_event_log(trans, true, NULL, false); |
7ff94706 EG |
1866 | |
1867 | return count; | |
1868 | } | |
1869 | ||
1f7b6172 EG |
1870 | static ssize_t iwl_dbgfs_interrupt_read(struct file *file, |
1871 | char __user *user_buf, | |
1872 | size_t count, loff_t *ppos) { | |
1873 | ||
1874 | struct iwl_trans *trans = file->private_data; | |
1875 | struct iwl_trans_pcie *trans_pcie = | |
1876 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1877 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
1878 | ||
1879 | int pos = 0; | |
1880 | char *buf; | |
1881 | int bufsz = 24 * 64; /* 24 items * 64 char per item */ | |
1882 | ssize_t ret; | |
1883 | ||
1884 | buf = kzalloc(bufsz, GFP_KERNEL); | |
1885 | if (!buf) { | |
1886 | IWL_ERR(trans, "Can not allocate Buffer\n"); | |
1887 | return -ENOMEM; | |
1888 | } | |
1889 | ||
1890 | pos += scnprintf(buf + pos, bufsz - pos, | |
1891 | "Interrupt Statistics Report:\n"); | |
1892 | ||
1893 | pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", | |
1894 | isr_stats->hw); | |
1895 | pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", | |
1896 | isr_stats->sw); | |
1897 | if (isr_stats->sw || isr_stats->hw) { | |
1898 | pos += scnprintf(buf + pos, bufsz - pos, | |
1899 | "\tLast Restarting Code: 0x%X\n", | |
1900 | isr_stats->err_code); | |
1901 | } | |
1902 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1903 | pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", | |
1904 | isr_stats->sch); | |
1905 | pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", | |
1906 | isr_stats->alive); | |
1907 | #endif | |
1908 | pos += scnprintf(buf + pos, bufsz - pos, | |
1909 | "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); | |
1910 | ||
1911 | pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", | |
1912 | isr_stats->ctkill); | |
1913 | ||
1914 | pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", | |
1915 | isr_stats->wakeup); | |
1916 | ||
1917 | pos += scnprintf(buf + pos, bufsz - pos, | |
1918 | "Rx command responses:\t\t %u\n", isr_stats->rx); | |
1919 | ||
1920 | pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", | |
1921 | isr_stats->tx); | |
1922 | ||
1923 | pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", | |
1924 | isr_stats->unhandled); | |
1925 | ||
1926 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1927 | kfree(buf); | |
1928 | return ret; | |
1929 | } | |
1930 | ||
1931 | static ssize_t iwl_dbgfs_interrupt_write(struct file *file, | |
1932 | const char __user *user_buf, | |
1933 | size_t count, loff_t *ppos) | |
1934 | { | |
1935 | struct iwl_trans *trans = file->private_data; | |
1936 | struct iwl_trans_pcie *trans_pcie = | |
1937 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1938 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
1939 | ||
1940 | char buf[8]; | |
1941 | int buf_size; | |
1942 | u32 reset_flag; | |
1943 | ||
1944 | memset(buf, 0, sizeof(buf)); | |
1945 | buf_size = min(count, sizeof(buf) - 1); | |
1946 | if (copy_from_user(buf, user_buf, buf_size)) | |
1947 | return -EFAULT; | |
1948 | if (sscanf(buf, "%x", &reset_flag) != 1) | |
1949 | return -EFAULT; | |
1950 | if (reset_flag == 0) | |
1951 | memset(isr_stats, 0, sizeof(*isr_stats)); | |
1952 | ||
1953 | return count; | |
1954 | } | |
1955 | ||
16db88ba EG |
1956 | static ssize_t iwl_dbgfs_csr_write(struct file *file, |
1957 | const char __user *user_buf, | |
1958 | size_t count, loff_t *ppos) | |
1959 | { | |
1960 | struct iwl_trans *trans = file->private_data; | |
1961 | char buf[8]; | |
1962 | int buf_size; | |
1963 | int csr; | |
1964 | ||
1965 | memset(buf, 0, sizeof(buf)); | |
1966 | buf_size = min(count, sizeof(buf) - 1); | |
1967 | if (copy_from_user(buf, user_buf, buf_size)) | |
1968 | return -EFAULT; | |
1969 | if (sscanf(buf, "%d", &csr) != 1) | |
1970 | return -EFAULT; | |
1971 | ||
1972 | iwl_dump_csr(trans); | |
1973 | ||
1974 | return count; | |
1975 | } | |
1976 | ||
16db88ba EG |
1977 | static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, |
1978 | char __user *user_buf, | |
1979 | size_t count, loff_t *ppos) | |
1980 | { | |
1981 | struct iwl_trans *trans = file->private_data; | |
1982 | char *buf; | |
1983 | int pos = 0; | |
1984 | ssize_t ret = -EFAULT; | |
1985 | ||
1986 | ret = pos = iwl_dump_fh(trans, &buf, true); | |
1987 | if (buf) { | |
1988 | ret = simple_read_from_buffer(user_buf, | |
1989 | count, ppos, buf, pos); | |
1990 | kfree(buf); | |
1991 | } | |
1992 | ||
1993 | return ret; | |
1994 | } | |
1995 | ||
7ff94706 | 1996 | DEBUGFS_READ_WRITE_FILE_OPS(log_event); |
1f7b6172 | 1997 | DEBUGFS_READ_WRITE_FILE_OPS(interrupt); |
16db88ba | 1998 | DEBUGFS_READ_FILE_OPS(fh_reg); |
87e5666c EG |
1999 | DEBUGFS_READ_FILE_OPS(rx_queue); |
2000 | DEBUGFS_READ_FILE_OPS(tx_queue); | |
16db88ba | 2001 | DEBUGFS_WRITE_FILE_OPS(csr); |
87e5666c EG |
2002 | |
2003 | /* | |
2004 | * Create the debugfs files and directories | |
2005 | * | |
2006 | */ | |
2007 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | |
2008 | struct dentry *dir) | |
2009 | { | |
87e5666c EG |
2010 | DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); |
2011 | DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); | |
7ff94706 | 2012 | DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR); |
1f7b6172 | 2013 | DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); |
16db88ba EG |
2014 | DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); |
2015 | DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); | |
87e5666c EG |
2016 | return 0; |
2017 | } | |
2018 | #else | |
2019 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | |
2020 | struct dentry *dir) | |
2021 | { return 0; } | |
2022 | ||
2023 | #endif /*CONFIG_IWLWIFI_DEBUGFS */ | |
2024 | ||
e6bb4c9c | 2025 | const struct iwl_trans_ops trans_ops_pcie = { |
57a1dc89 | 2026 | .start_hw = iwl_trans_pcie_start_hw, |
ed6a3803 | 2027 | .fw_alive = iwl_trans_pcie_fw_alive, |
e6bb4c9c | 2028 | .start_device = iwl_trans_pcie_start_device, |
e6bb4c9c | 2029 | .stop_device = iwl_trans_pcie_stop_device, |
48d42c42 | 2030 | |
e13c0c59 | 2031 | .wake_any_queue = iwl_trans_pcie_wake_any_queue, |
48d42c42 | 2032 | |
e6bb4c9c | 2033 | .send_cmd = iwl_trans_pcie_send_cmd, |
c85eb619 | 2034 | |
e6bb4c9c | 2035 | .tx = iwl_trans_pcie_tx, |
a0eaad71 | 2036 | .reclaim = iwl_trans_pcie_reclaim, |
34c1b7ba | 2037 | |
7f01d567 | 2038 | .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, |
288712a6 | 2039 | .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc, |
c91bd124 | 2040 | .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, |
34c1b7ba | 2041 | |
e6bb4c9c | 2042 | .kick_nic = iwl_trans_pcie_kick_nic, |
1e89cbac | 2043 | |
e6bb4c9c | 2044 | .free = iwl_trans_pcie_free, |
e20d4341 | 2045 | .stop_queue = iwl_trans_pcie_stop_queue, |
87e5666c EG |
2046 | |
2047 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, | |
5f178cd2 EG |
2048 | |
2049 | .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, | |
f22be624 | 2050 | .check_stuck_queue = iwl_trans_pcie_check_stuck_queue, |
5f178cd2 | 2051 | |
c01a4047 | 2052 | #ifdef CONFIG_PM_SLEEP |
57210f7c EG |
2053 | .suspend = iwl_trans_pcie_suspend, |
2054 | .resume = iwl_trans_pcie_resume, | |
c01a4047 | 2055 | #endif |
03905495 EG |
2056 | .write8 = iwl_trans_pcie_write8, |
2057 | .write32 = iwl_trans_pcie_write32, | |
2058 | .read32 = iwl_trans_pcie_read32, | |
e6bb4c9c | 2059 | }; |
a42a1844 | 2060 | |
a42a1844 EG |
2061 | /* PCI registers */ |
2062 | #define PCI_CFG_RETRY_TIMEOUT 0x041 | |
2063 | ||
2064 | struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | |
2065 | struct pci_dev *pdev, | |
2066 | const struct pci_device_id *ent) | |
2067 | { | |
a42a1844 EG |
2068 | struct iwl_trans_pcie *trans_pcie; |
2069 | struct iwl_trans *trans; | |
2070 | u16 pci_cmd; | |
2071 | int err; | |
2072 | ||
2073 | trans = kzalloc(sizeof(struct iwl_trans) + | |
2074 | sizeof(struct iwl_trans_pcie), GFP_KERNEL); | |
2075 | ||
2076 | if (WARN_ON(!trans)) | |
2077 | return NULL; | |
2078 | ||
2079 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
2080 | ||
2081 | trans->ops = &trans_ops_pcie; | |
2082 | trans->shrd = shrd; | |
2083 | trans_pcie->trans = trans; | |
2084 | spin_lock_init(&trans->hcmd_lock); | |
2085 | ||
2086 | /* W/A - seems to solve weird behavior. We need to remove this if we | |
2087 | * don't want to stay in L1 all the time. This wastes a lot of power */ | |
2088 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | |
2089 | PCIE_LINK_STATE_CLKPM); | |
2090 | ||
2091 | if (pci_enable_device(pdev)) { | |
2092 | err = -ENODEV; | |
2093 | goto out_no_pci; | |
2094 | } | |
2095 | ||
2096 | pci_set_master(pdev); | |
2097 | ||
2098 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | |
2099 | if (!err) | |
2100 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); | |
2101 | if (err) { | |
2102 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2103 | if (!err) | |
2104 | err = pci_set_consistent_dma_mask(pdev, | |
2105 | DMA_BIT_MASK(32)); | |
2106 | /* both attempts failed: */ | |
2107 | if (err) { | |
2108 | dev_printk(KERN_ERR, &pdev->dev, | |
2109 | "No suitable DMA available.\n"); | |
2110 | goto out_pci_disable_device; | |
2111 | } | |
2112 | } | |
2113 | ||
2114 | err = pci_request_regions(pdev, DRV_NAME); | |
2115 | if (err) { | |
2116 | dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed"); | |
2117 | goto out_pci_disable_device; | |
2118 | } | |
2119 | ||
2120 | trans_pcie->hw_base = pci_iomap(pdev, 0, 0); | |
2121 | if (!trans_pcie->hw_base) { | |
2122 | dev_printk(KERN_ERR, &pdev->dev, "pci_iomap failed"); | |
2123 | err = -ENODEV; | |
2124 | goto out_pci_release_regions; | |
2125 | } | |
2126 | ||
a42a1844 EG |
2127 | dev_printk(KERN_INFO, &pdev->dev, |
2128 | "pci_resource_len = 0x%08llx\n", | |
2129 | (unsigned long long) pci_resource_len(pdev, 0)); | |
2130 | dev_printk(KERN_INFO, &pdev->dev, | |
2131 | "pci_resource_base = %p\n", trans_pcie->hw_base); | |
2132 | ||
2133 | dev_printk(KERN_INFO, &pdev->dev, | |
2134 | "HW Revision ID = 0x%X\n", pdev->revision); | |
2135 | ||
2136 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | |
2137 | * PCI Tx retries from interfering with C3 CPU state */ | |
2138 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | |
2139 | ||
2140 | err = pci_enable_msi(pdev); | |
2141 | if (err) | |
2142 | dev_printk(KERN_ERR, &pdev->dev, | |
2143 | "pci_enable_msi failed(0X%x)", err); | |
2144 | ||
2145 | trans->dev = &pdev->dev; | |
2146 | trans->irq = pdev->irq; | |
2147 | trans_pcie->pci_dev = pdev; | |
2148 | ||
2149 | /* TODO: Move this away, not needed if not MSI */ | |
2150 | /* enable rfkill interrupt: hw bug w/a */ | |
2151 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | |
2152 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { | |
2153 | pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; | |
2154 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | |
2155 | } | |
2156 | ||
2157 | return trans; | |
2158 | ||
2159 | out_pci_release_regions: | |
2160 | pci_release_regions(pdev); | |
2161 | out_pci_disable_device: | |
2162 | pci_disable_device(pdev); | |
2163 | out_no_pci: | |
2164 | kfree(trans); | |
2165 | return NULL; | |
2166 | } | |
2167 |