iwlwifi: continue clean up - pcie/trans.c
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / pcie / trans.c
CommitLineData
c85eb619
EG
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
4e318262 8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
c85eb619
EG
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
4e318262 33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
c85eb619
EG
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
a42a1844
EG
63#include <linux/pci.h>
64#include <linux/pci-aspm.h>
e6bb4c9c 65#include <linux/interrupt.h>
87e5666c 66#include <linux/debugfs.h>
cf614297 67#include <linux/sched.h>
6d8f6eeb
EG
68#include <linux/bitops.h>
69#include <linux/gfp.h>
e6bb4c9c 70
82575102 71#include "iwl-drv.h"
c85eb619 72#include "iwl-trans.h"
522376d2
EG
73#include "iwl-csr.h"
74#include "iwl-prph.h"
7a10e3e4 75#include "iwl-agn-hw.h"
6468a01a 76#include "internal.h"
6238b008 77/* FIXME: need to abstract out TX command (once we know what it looks like) */
1023fdc4 78#include "dvm/commands.h"
0439bb62 79
c6f600fc 80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
035f7ff2 81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
c6f600fc
MV
82 (~(1<<(trans_pcie)->cmd_queue)))
83
20d3b647
JB
84static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
85 struct iwl_dma_ptr *ptr, size_t size)
02aca585
EG
86{
87 if (WARN_ON(ptr->addr))
88 return -EINVAL;
89
1042db2a 90 ptr->addr = dma_alloc_coherent(trans->dev, size,
02aca585
EG
91 &ptr->dma, GFP_KERNEL);
92 if (!ptr->addr)
93 return -ENOMEM;
94 ptr->size = size;
95 return 0;
96}
97
20d3b647
JB
98static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
99 struct iwl_dma_ptr *ptr)
1359ca4f
EG
100{
101 if (unlikely(!ptr->addr))
102 return;
103
1042db2a 104 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
1359ca4f
EG
105 memset(ptr, 0, sizeof(*ptr));
106}
107
7c5ba4a8
JB
108static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
109{
990aa6d7 110 struct iwl_txq *txq = (void *)data;
e9d364de 111 struct iwl_queue *q = &txq->q;
7c5ba4a8
JB
112 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
113 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
f22d3328 114 u32 scd_sram_addr = trans_pcie->scd_base_addr +
0adb52de 115 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
f22d3328
EG
116 u8 buf[16];
117 int i;
7c5ba4a8
JB
118
119 spin_lock(&txq->lock);
120 /* check if triggered erroneously */
121 if (txq->q.read_ptr == txq->q.write_ptr) {
122 spin_unlock(&txq->lock);
123 return;
124 }
125 spin_unlock(&txq->lock);
126
7c5ba4a8
JB
127 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
128 jiffies_to_msecs(trans_pcie->wd_timeout));
129 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
130 txq->q.read_ptr, txq->q.write_ptr);
7c5ba4a8 131
f22d3328
EG
132 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
133
134 iwl_print_hex_error(trans, buf, sizeof(buf));
135
136 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
137 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
138 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
139
12af0468
EG
140 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
141 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
142 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
143 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
144 u32 tbl_dw =
145 iwl_read_targ_mem(trans,
146 trans_pcie->scd_base_addr +
147 SCD_TRANS_TBL_OFFSET_QUEUE(i));
148
149 if (i & 0x1)
150 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
151 else
152 tbl_dw = tbl_dw & 0x0000FFFF;
153
154 IWL_ERR(trans,
155 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
156 i, active ? "" : "in", fifo, tbl_dw,
157 iwl_read_prph(trans,
158 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
159 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
160 }
7c5ba4a8 161
e9d364de
EG
162 for (i = q->read_ptr; i != q->write_ptr;
163 i = iwl_queue_inc_wrap(i, q->n_bd)) {
164 struct iwl_tx_cmd *tx_cmd =
165 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
166 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
167 get_unaligned_le32(&tx_cmd->scratch));
168 }
169
7c5ba4a8
JB
170 iwl_op_mode_nic_error(trans->op_mode);
171}
172
6d8f6eeb 173static int iwl_trans_txq_alloc(struct iwl_trans *trans,
990aa6d7 174 struct iwl_txq *txq, int slots_num,
20d3b647 175 u32 txq_id)
02aca585 176{
20d3b647 177 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
ab9e212e 178 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
02aca585
EG
179 int i;
180
bf8440e6 181 if (WARN_ON(txq->entries || txq->tfds))
02aca585
EG
182 return -EINVAL;
183
7c5ba4a8
JB
184 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
185 (unsigned long)txq);
186 txq->trans_pcie = trans_pcie;
187
1359ca4f
EG
188 txq->q.n_window = slots_num;
189
bf8440e6 190 txq->entries = kcalloc(slots_num,
990aa6d7 191 sizeof(struct iwl_pcie_txq_entry),
bf8440e6 192 GFP_KERNEL);
02aca585 193
bf8440e6 194 if (!txq->entries)
02aca585
EG
195 goto error;
196
c6f600fc 197 if (txq_id == trans_pcie->cmd_queue)
dfa2bdba 198 for (i = 0; i < slots_num; i++) {
bf8440e6
JB
199 txq->entries[i].cmd =
200 kmalloc(sizeof(struct iwl_device_cmd),
201 GFP_KERNEL);
202 if (!txq->entries[i].cmd)
dfa2bdba
EG
203 goto error;
204 }
02aca585 205
02aca585
EG
206 /* Circular buffer of transmit frame descriptors (TFDs),
207 * shared with device */
1042db2a 208 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
6d8f6eeb 209 &txq->q.dma_addr, GFP_KERNEL);
02aca585 210 if (!txq->tfds) {
6d8f6eeb 211 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
02aca585
EG
212 goto error;
213 }
214 txq->q.id = txq_id;
215
216 return 0;
217error:
bf8440e6 218 if (txq->entries && txq_id == trans_pcie->cmd_queue)
02aca585 219 for (i = 0; i < slots_num; i++)
bf8440e6
JB
220 kfree(txq->entries[i].cmd);
221 kfree(txq->entries);
222 txq->entries = NULL;
02aca585
EG
223
224 return -ENOMEM;
225
226}
227
990aa6d7 228static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
9eae88fa 229 int slots_num, u32 txq_id)
02aca585
EG
230{
231 int ret;
232
233 txq->need_update = 0;
02aca585 234
02aca585
EG
235 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
236 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
237 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
238
239 /* Initialize queue's high/low-water marks, and head/tail indexes */
6d8f6eeb 240 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
02aca585
EG
241 txq_id);
242 if (ret)
243 return ret;
244
015c15e1
JB
245 spin_lock_init(&txq->lock);
246
02aca585
EG
247 /*
248 * Tell nic where to find circular buffer of Tx Frame Descriptors for
249 * given Tx queue, and enable the DMA channel used for that queue.
250 * Circular buffer (TFD queue in DRAM) physical base address */
1042db2a 251 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
02aca585
EG
252 txq->q.dma_addr >> 8);
253
254 return 0;
255}
256
6c3fd3f0 257/*
990aa6d7 258 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
c170b867 259 */
990aa6d7 260void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
c170b867 261{
8ad71bef 262 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 263 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
c170b867 264 struct iwl_queue *q = &txq->q;
39644e9a 265 enum dma_data_direction dma_dir;
c170b867
EG
266
267 if (!q->n_bd)
268 return;
269
39644e9a
EG
270 /* In the command queue, all the TBs are mapped as BIDI
271 * so unmap them as such.
272 */
c6f600fc 273 if (txq_id == trans_pcie->cmd_queue)
39644e9a 274 dma_dir = DMA_BIDIRECTIONAL;
015c15e1 275 else
39644e9a
EG
276 dma_dir = DMA_TO_DEVICE;
277
015c15e1 278 spin_lock_bh(&txq->lock);
c170b867 279 while (q->write_ptr != q->read_ptr) {
990aa6d7 280 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
c170b867
EG
281 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
282 }
015c15e1 283 spin_unlock_bh(&txq->lock);
c170b867
EG
284}
285
990aa6d7
EG
286/*
287 * iwl_txq_free - Deallocate DMA queue.
1359ca4f
EG
288 * @txq: Transmit queue to deallocate.
289 *
290 * Empty queue by removing and destroying all BD's.
291 * Free all buffers.
292 * 0-fill, but do not free "txq" descriptor structure.
293 */
990aa6d7 294static void iwl_txq_free(struct iwl_trans *trans, int txq_id)
1359ca4f 295{
8ad71bef 296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 297 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1042db2a 298 struct device *dev = trans->dev;
1359ca4f 299 int i;
20d3b647 300
1359ca4f
EG
301 if (WARN_ON(!txq))
302 return;
303
990aa6d7 304 iwl_pcie_txq_unmap(trans, txq_id);
1359ca4f
EG
305
306 /* De-alloc array of command/tx buffers */
c6f600fc 307 if (txq_id == trans_pcie->cmd_queue)
96791422 308 for (i = 0; i < txq->q.n_window; i++) {
bf8440e6 309 kfree(txq->entries[i].cmd);
96791422 310 kfree(txq->entries[i].copy_cmd);
f4feb8ac 311 kfree(txq->entries[i].free_buf);
96791422 312 }
1359ca4f
EG
313
314 /* De-alloc circular buffer of TFDs */
315 if (txq->q.n_bd) {
ab9e212e 316 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
1359ca4f
EG
317 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
318 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
319 }
320
bf8440e6
JB
321 kfree(txq->entries);
322 txq->entries = NULL;
1359ca4f 323
7c5ba4a8
JB
324 del_timer_sync(&txq->stuck_timer);
325
1359ca4f
EG
326 /* 0-fill queue descriptor structure */
327 memset(txq, 0, sizeof(*txq));
328}
329
990aa6d7 330/*
1359ca4f
EG
331 * iwl_trans_tx_free - Free TXQ Context
332 *
333 * Destroy all TX DMA queues and structures
334 */
6d8f6eeb 335static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
1359ca4f
EG
336{
337 int txq_id;
8ad71bef 338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1359ca4f
EG
339
340 /* Tx queues */
8ad71bef 341 if (trans_pcie->txq) {
d6189124 342 for (txq_id = 0;
035f7ff2 343 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
990aa6d7 344 iwl_txq_free(trans, txq_id);
1359ca4f
EG
345 }
346
8ad71bef
EG
347 kfree(trans_pcie->txq);
348 trans_pcie->txq = NULL;
1359ca4f 349
9d6b2cb1 350 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
1359ca4f 351
6d8f6eeb 352 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
1359ca4f
EG
353}
354
990aa6d7 355/*
02aca585
EG
356 * iwl_trans_tx_alloc - allocate TX context
357 * Allocate all Tx DMA structures and initialize them
02aca585 358 */
6d8f6eeb 359static int iwl_trans_tx_alloc(struct iwl_trans *trans)
02aca585
EG
360{
361 int ret;
362 int txq_id, slots_num;
8ad71bef 363 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
02aca585 364
035f7ff2 365 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
ab9e212e
EG
366 sizeof(struct iwlagn_scd_bc_tbl);
367
02aca585
EG
368 /*It is not allowed to alloc twice, so warn when this happens.
369 * We cannot rely on the previous allocation, so free and fail */
8ad71bef 370 if (WARN_ON(trans_pcie->txq)) {
02aca585
EG
371 ret = -EINVAL;
372 goto error;
373 }
374
6d8f6eeb 375 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
ab9e212e 376 scd_bc_tbls_size);
02aca585 377 if (ret) {
6d8f6eeb 378 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
02aca585
EG
379 goto error;
380 }
381
382 /* Alloc keep-warm buffer */
9d6b2cb1 383 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
02aca585 384 if (ret) {
6d8f6eeb 385 IWL_ERR(trans, "Keep Warm allocation failed\n");
02aca585
EG
386 goto error;
387 }
388
035f7ff2 389 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
990aa6d7 390 sizeof(struct iwl_txq), GFP_KERNEL);
8ad71bef 391 if (!trans_pcie->txq) {
6d8f6eeb 392 IWL_ERR(trans, "Not enough memory for txq\n");
02aca585
EG
393 ret = ENOMEM;
394 goto error;
395 }
396
397 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
035f7ff2 398 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1745e440 399 txq_id++) {
9ba1947a 400 slots_num = (txq_id == trans_pcie->cmd_queue) ?
02aca585 401 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
8ad71bef
EG
402 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
403 slots_num, txq_id);
02aca585 404 if (ret) {
6d8f6eeb 405 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
02aca585
EG
406 goto error;
407 }
408 }
409
410 return 0;
411
412error:
ae2c30bf 413 iwl_trans_pcie_tx_free(trans);
02aca585
EG
414
415 return ret;
416}
6d8f6eeb 417static int iwl_tx_init(struct iwl_trans *trans)
02aca585 418{
20d3b647 419 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
02aca585
EG
420 int ret;
421 int txq_id, slots_num;
422 unsigned long flags;
423 bool alloc = false;
424
8ad71bef 425 if (!trans_pcie->txq) {
6d8f6eeb 426 ret = iwl_trans_tx_alloc(trans);
02aca585
EG
427 if (ret)
428 goto error;
429 alloc = true;
430 }
431
7b11488f 432 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
02aca585
EG
433
434 /* Turn off all Tx DMA fifos */
1042db2a 435 iwl_write_prph(trans, SCD_TXFACT, 0);
02aca585
EG
436
437 /* Tell NIC where to find the "keep warm" buffer */
1042db2a 438 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
83ed9015 439 trans_pcie->kw.dma >> 4);
02aca585 440
7b11488f 441 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
02aca585
EG
442
443 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
035f7ff2 444 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1745e440 445 txq_id++) {
9ba1947a 446 slots_num = (txq_id == trans_pcie->cmd_queue) ?
02aca585 447 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
8ad71bef
EG
448 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
449 slots_num, txq_id);
02aca585 450 if (ret) {
6d8f6eeb 451 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
02aca585
EG
452 goto error;
453 }
454 }
455
456 return 0;
457error:
458 /*Upon error, free only if we allocated something */
459 if (alloc)
ae2c30bf 460 iwl_trans_pcie_tx_free(trans);
02aca585
EG
461 return ret;
462}
463
7afe3705 464static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
392f8b78
EG
465{
466/*
467 * (for documentation purposes)
468 * to set power to V_AUX, do:
469
470 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
1042db2a 471 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
392f8b78
EG
472 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
473 ~APMG_PS_CTRL_MSK_PWR_SRC);
474 */
475
1042db2a 476 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
392f8b78
EG
477 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
478 ~APMG_PS_CTRL_MSK_PWR_SRC);
479}
480
af634bee
EG
481/* PCI registers */
482#define PCI_CFG_RETRY_TIMEOUT 0x041
483#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
484#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
485
7afe3705 486static void iwl_pcie_apm_config(struct iwl_trans *trans)
af634bee 487{
20d3b647 488 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7afe3705 489 u16 lctl;
af634bee 490
af634bee
EG
491 /*
492 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
493 * Check if BIOS (or OS) enabled L1-ASPM on this device.
494 * If so (likely), disable L0S, so device moves directly L0->L1;
495 * costs negligible amount of power savings.
496 * If not (unlikely), enable L0S, so there is at least some
497 * power savings, even without L1.
498 */
7afe3705 499 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
af634bee
EG
500
501 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
502 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
503 /* L1-ASPM enabled; disable(!) L0S */
504 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
505 dev_printk(KERN_INFO, trans->dev,
506 "L1 Enabled; Disabling L0S\n");
507 } else {
508 /* L1-ASPM disabled; enable(!) L0S */
509 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
510 dev_printk(KERN_INFO, trans->dev,
511 "L1 Disabled; Enabling L0S\n");
512 }
f6d0e9be 513 trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
af634bee
EG
514}
515
a6c684ee
EG
516/*
517 * Start up NIC's basic functionality after it has been reset
7afe3705 518 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
a6c684ee
EG
519 * NOTE: This does not load uCode nor start the embedded processor
520 */
7afe3705 521static int iwl_pcie_apm_init(struct iwl_trans *trans)
a6c684ee 522{
83626404 523 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
a6c684ee
EG
524 int ret = 0;
525 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
526
527 /*
528 * Use "set_bit" below rather than "write", to preserve any hardware
529 * bits already set by default after reset.
530 */
531
532 /* Disable L0S exit timer (platform NMI Work/Around) */
533 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
20d3b647 534 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
a6c684ee
EG
535
536 /*
537 * Disable L0s without affecting L1;
538 * don't wait for ICH L0s (ICH bug W/A)
539 */
540 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
20d3b647 541 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
a6c684ee
EG
542
543 /* Set FH wait threshold to maximum (HW error during stress W/A) */
544 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
545
546 /*
547 * Enable HAP INTA (interrupt from management bus) to
548 * wake device's PCI Express link L1a -> L0s
549 */
550 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 551 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
a6c684ee 552
7afe3705 553 iwl_pcie_apm_config(trans);
a6c684ee
EG
554
555 /* Configure analog phase-lock-loop before activating to D0A */
035f7ff2 556 if (trans->cfg->base_params->pll_cfg_val)
a6c684ee 557 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
035f7ff2 558 trans->cfg->base_params->pll_cfg_val);
a6c684ee
EG
559
560 /*
561 * Set "initialization complete" bit to move adapter from
562 * D0U* --> D0A* (powered-up active) state.
563 */
564 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
565
566 /*
567 * Wait for clock stabilization; once stabilized, access to
568 * device-internal resources is supported, e.g. iwl_write_prph()
569 * and accesses to uCode SRAM.
570 */
571 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
20d3b647
JB
572 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
573 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
a6c684ee
EG
574 if (ret < 0) {
575 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
576 goto out;
577 }
578
579 /*
580 * Enable DMA clock and wait for it to stabilize.
581 *
582 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
583 * do not disable clocks. This preserves any hardware bits already
584 * set by default in "CLK_CTRL_REG" after reset.
585 */
586 iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
587 udelay(20);
588
589 /* Disable L1-Active */
590 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
591 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
592
83626404 593 set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
a6c684ee
EG
594
595out:
596 return ret;
597}
598
7afe3705 599static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
cc56feb2
EG
600{
601 int ret = 0;
602
603 /* stop device's busmaster DMA activity */
604 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
605
606 ret = iwl_poll_bit(trans, CSR_RESET,
20d3b647
JB
607 CSR_RESET_REG_FLAG_MASTER_DISABLED,
608 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
cc56feb2
EG
609 if (ret)
610 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
611
612 IWL_DEBUG_INFO(trans, "stop master\n");
613
614 return ret;
615}
616
7afe3705 617static void iwl_pcie_apm_stop(struct iwl_trans *trans)
cc56feb2 618{
83626404 619 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
cc56feb2
EG
620 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
621
83626404 622 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
cc56feb2
EG
623
624 /* Stop device's DMA activity */
7afe3705 625 iwl_pcie_apm_stop_master(trans);
cc56feb2
EG
626
627 /* Reset the entire device */
628 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
629
630 udelay(10);
631
632 /*
633 * Clear "initialization complete" bit to move adapter from
634 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
635 */
636 iwl_clear_bit(trans, CSR_GP_CNTRL,
637 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
638}
639
7afe3705 640static int iwl_pcie_nic_init(struct iwl_trans *trans)
392f8b78 641{
7b11488f 642 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392f8b78
EG
643 unsigned long flags;
644
645 /* nic_init */
7b11488f 646 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
7afe3705 647 iwl_pcie_apm_init(trans);
392f8b78
EG
648
649 /* Set interrupt coalescing calibration timer to default (512 usecs) */
20d3b647 650 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
392f8b78 651
7b11488f 652 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
392f8b78 653
7afe3705 654 iwl_pcie_set_pwr_vmain(trans);
392f8b78 655
ecdb975c 656 iwl_op_mode_nic_config(trans->op_mode);
392f8b78
EG
657
658 /* Allocate the RX queue, or reset if it is already allocated */
9805c446 659 iwl_pcie_rx_init(trans);
392f8b78
EG
660
661 /* Allocate or reset and init all Tx and Command queues */
6d8f6eeb 662 if (iwl_tx_init(trans))
392f8b78
EG
663 return -ENOMEM;
664
035f7ff2 665 if (trans->cfg->base_params->shadow_reg_enable) {
392f8b78 666 /* enable shadow regs in HW */
20d3b647 667 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
d38069d1 668 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
392f8b78
EG
669 }
670
392f8b78
EG
671 return 0;
672}
673
674#define HW_READY_TIMEOUT (50)
675
676/* Note: returns poll_bit return value, which is >= 0 if success */
7afe3705 677static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
392f8b78
EG
678{
679 int ret;
680
1042db2a 681 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 682 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
392f8b78
EG
683
684 /* See if we got it */
1042db2a 685 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647
JB
686 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
687 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
688 HW_READY_TIMEOUT);
392f8b78 689
6d8f6eeb 690 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
392f8b78
EG
691 return ret;
692}
693
694/* Note: returns standard 0/-ERROR code */
7afe3705 695static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
392f8b78
EG
696{
697 int ret;
289e5501 698 int t = 0;
392f8b78 699
6d8f6eeb 700 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
392f8b78 701
7afe3705 702 ret = iwl_pcie_set_hw_ready(trans);
ebb7678d 703 /* If the card is ready, exit 0 */
392f8b78
EG
704 if (ret >= 0)
705 return 0;
706
707 /* If HW is not ready, prepare the conditions to check again */
1042db2a 708 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 709 CSR_HW_IF_CONFIG_REG_PREPARE);
392f8b78 710
289e5501 711 do {
7afe3705 712 ret = iwl_pcie_set_hw_ready(trans);
289e5501
EG
713 if (ret >= 0)
714 return 0;
392f8b78 715
289e5501
EG
716 usleep_range(200, 1000);
717 t += 200;
718 } while (t < 150000);
392f8b78 719
392f8b78
EG
720 return ret;
721}
722
cf614297
EG
723/*
724 * ucode
725 */
7afe3705 726static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
83f84d7b 727 dma_addr_t phy_addr, u32 byte_cnt)
cf614297 728{
13df1aab 729 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
cf614297
EG
730 int ret;
731
13df1aab 732 trans_pcie->ucode_write_complete = false;
cf614297
EG
733
734 iwl_write_direct32(trans,
20d3b647
JB
735 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
736 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
cf614297
EG
737
738 iwl_write_direct32(trans,
20d3b647
JB
739 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
740 dst_addr);
cf614297
EG
741
742 iwl_write_direct32(trans,
83f84d7b
JB
743 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
744 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
cf614297
EG
745
746 iwl_write_direct32(trans,
20d3b647
JB
747 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
748 (iwl_get_dma_hi_addr(phy_addr)
749 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
cf614297
EG
750
751 iwl_write_direct32(trans,
20d3b647
JB
752 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
753 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
754 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
755 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
cf614297
EG
756
757 iwl_write_direct32(trans,
20d3b647
JB
758 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
759 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
760 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
761 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
cf614297 762
13df1aab
JB
763 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
764 trans_pcie->ucode_write_complete, 5 * HZ);
cf614297 765 if (!ret) {
83f84d7b 766 IWL_ERR(trans, "Failed to load firmware chunk!\n");
cf614297
EG
767 return -ETIMEDOUT;
768 }
769
770 return 0;
771}
772
7afe3705 773static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
83f84d7b 774 const struct fw_desc *section)
cf614297 775{
83f84d7b
JB
776 u8 *v_addr;
777 dma_addr_t p_addr;
778 u32 offset;
cf614297
EG
779 int ret = 0;
780
83f84d7b
JB
781 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
782 section_num);
783
784 v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
785 if (!v_addr)
786 return -ENOMEM;
787
788 for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
789 u32 copy_size;
790
791 copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
cf614297 792
83f84d7b 793 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
7afe3705
EG
794 ret = iwl_pcie_load_firmware_chunk(trans,
795 section->offset + offset,
796 p_addr, copy_size);
83f84d7b
JB
797 if (ret) {
798 IWL_ERR(trans,
799 "Could not load the [%d] uCode section\n",
800 section_num);
801 break;
6dfa8d01 802 }
83f84d7b
JB
803 }
804
805 dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
806 return ret;
807}
808
7afe3705 809static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
0692fe41 810 const struct fw_img *image)
cf614297 811{
2d1c0044 812 int i, ret = 0;
cf614297 813
2d1c0044 814 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
83f84d7b 815 if (!image->sec[i].data)
2d1c0044 816 break;
cf614297 817
7afe3705 818 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
2d1c0044
JB
819 if (ret)
820 return ret;
821 }
cf614297
EG
822
823 /* Remove all resets to allow NIC to operate */
824 iwl_write32(trans, CSR_RESET, 0);
825
826 return 0;
827}
828
0692fe41
JB
829static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
830 const struct fw_img *fw)
392f8b78 831{
d18aa87f 832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392f8b78 833 int ret;
c9eec95c 834 bool hw_rfkill;
392f8b78 835
496bab39 836 /* This may fail if AMT took ownership of the device */
7afe3705 837 if (iwl_pcie_prepare_card_hw(trans)) {
6d8f6eeb 838 IWL_WARN(trans, "Exit HW not ready\n");
392f8b78
EG
839 return -EIO;
840 }
841
d18aa87f
JB
842 clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
843
8c46bb70
EG
844 iwl_enable_rfkill_int(trans);
845
392f8b78 846 /* If platform's RF_KILL switch is NOT set to KILL */
8d425517 847 hw_rfkill = iwl_is_rfkill_set(trans);
c9eec95c 848 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
8c46bb70 849 if (hw_rfkill)
392f8b78 850 return -ERFKILL;
392f8b78 851
1042db2a 852 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
392f8b78 853
7afe3705 854 ret = iwl_pcie_nic_init(trans);
392f8b78 855 if (ret) {
6d8f6eeb 856 IWL_ERR(trans, "Unable to init nic\n");
392f8b78
EG
857 return ret;
858 }
859
860 /* make sure rfkill handshake bits are cleared */
1042db2a
EG
861 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
862 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
392f8b78
EG
863 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
864
865 /* clear (again), then enable host interrupts */
1042db2a 866 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
6d8f6eeb 867 iwl_enable_interrupts(trans);
392f8b78
EG
868
869 /* really make sure rfkill handshake bits are cleared */
1042db2a
EG
870 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
871 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
392f8b78 872
cf614297 873 /* Load the given image to the HW */
7afe3705 874 return iwl_pcie_load_given_ucode(trans, fw);
392f8b78
EG
875}
876
b3c2ce13
EG
877/*
878 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
b3c2ce13 879 */
6d8f6eeb 880static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
b3c2ce13 881{
7b11488f
JB
882 struct iwl_trans_pcie __maybe_unused *trans_pcie =
883 IWL_TRANS_GET_PCIE_TRANS(trans);
884
1042db2a 885 iwl_write_prph(trans, SCD_TXFACT, mask);
b3c2ce13
EG
886}
887
adca1235 888static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
b3c2ce13 889{
9eae88fa 890 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
b3c2ce13 891 u32 a;
b04db9ac 892 int chan;
b3c2ce13
EG
893 u32 reg_val;
894
fc248615
EG
895 /* make sure all queue are not stopped/used */
896 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
897 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
898
83ed9015 899 trans_pcie->scd_base_addr =
1042db2a 900 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
adca1235
EG
901
902 WARN_ON(scd_base_addr != 0 &&
903 scd_base_addr != trans_pcie->scd_base_addr);
904
105183b1 905 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
b3c2ce13 906 /* reset conext data memory */
105183b1 907 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
b3c2ce13 908 a += 4)
1042db2a 909 iwl_write_targ_mem(trans, a, 0);
b3c2ce13 910 /* reset tx status memory */
105183b1 911 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
b3c2ce13 912 a += 4)
1042db2a 913 iwl_write_targ_mem(trans, a, 0);
105183b1 914 for (; a < trans_pcie->scd_base_addr +
1745e440 915 SCD_TRANS_TBL_OFFSET_QUEUE(
035f7ff2 916 trans->cfg->base_params->num_of_queues);
d6189124 917 a += 4)
1042db2a 918 iwl_write_targ_mem(trans, a, 0);
b3c2ce13 919
1042db2a 920 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
105183b1 921 trans_pcie->scd_bc_tbls.dma >> 10);
b3c2ce13 922
d012d04e
EG
923 /* The chain extension of the SCD doesn't work well. This feature is
924 * enabled by default by the HW, so we need to disable it manually.
925 */
926 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
927
b04db9ac
EG
928 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
929 trans_pcie->cmd_fifo);
b3c2ce13 930
fc248615
EG
931 /* Activate all Tx DMA/FIFO channels */
932 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
933
b3c2ce13
EG
934 /* Enable DMA channel */
935 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1042db2a 936 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
fc248615
EG
937 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
938 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
b3c2ce13
EG
939
940 /* Update FH chicken bits */
1042db2a
EG
941 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
942 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
b3c2ce13
EG
943 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
944
b3c2ce13 945 /* Enable L1-Active */
1042db2a 946 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
20d3b647 947 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
b3c2ce13
EG
948}
949
adca1235 950static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
ed6a3803 951{
990aa6d7 952 iwl_pcie_reset_ict(trans);
adca1235 953 iwl_tx_start(trans, scd_addr);
ed6a3803
EG
954}
955
990aa6d7 956/*
c170b867
EG
957 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
958 */
6d8f6eeb 959static int iwl_trans_tx_stop(struct iwl_trans *trans)
c170b867 960{
20d3b647 961 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
c2945f39 962 int ch, txq_id, ret;
c170b867
EG
963 unsigned long flags;
964
965 /* Turn off all Tx DMA fifos */
7b11488f 966 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
c170b867 967
6d8f6eeb 968 iwl_trans_txq_set_sched(trans, 0);
c170b867
EG
969
970 /* Stop each Tx DMA channel, and wait for it to be idle */
02f6f659 971 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
1042db2a 972 iwl_write_direct32(trans,
6d8f6eeb 973 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
c2945f39 974 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
20d3b647 975 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
c2945f39 976 if (ret < 0)
20d3b647 977 IWL_ERR(trans,
d6f1c316 978 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
20d3b647
JB
979 ch,
980 iwl_read_direct32(trans,
981 FH_TSSR_TX_STATUS_REG));
c170b867 982 }
7b11488f 983 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
c170b867 984
8ad71bef 985 if (!trans_pcie->txq) {
d6f1c316
JB
986 IWL_WARN(trans,
987 "Stopping tx queues that aren't allocated...\n");
c170b867
EG
988 return 0;
989 }
990
991 /* Unmap DMA from host system and free skb's */
035f7ff2 992 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1745e440 993 txq_id++)
990aa6d7 994 iwl_pcie_txq_unmap(trans, txq_id);
c170b867
EG
995
996 return 0;
997}
998
43e58856 999static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
ae2c30bf 1000{
43e58856 1001 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
20d3b647 1002 unsigned long flags;
ae2c30bf 1003
43e58856 1004 /* tell the device to stop sending interrupts */
7b11488f 1005 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
ae2c30bf 1006 iwl_disable_interrupts(trans);
7b11488f 1007 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
ae2c30bf 1008
ab6cf8e8 1009 /* device going down, Stop using ICT table */
990aa6d7 1010 iwl_pcie_disable_ict(trans);
ab6cf8e8
EG
1011
1012 /*
1013 * If a HW restart happens during firmware loading,
1014 * then the firmware loading might call this function
1015 * and later it might be called again due to the
1016 * restart. So don't process again if the device is
1017 * already dead.
1018 */
83626404 1019 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
6d8f6eeb 1020 iwl_trans_tx_stop(trans);
9805c446 1021 iwl_pcie_rx_stop(trans);
6379103e 1022
ab6cf8e8 1023 /* Power-down device's busmaster DMA clocks */
1042db2a 1024 iwl_write_prph(trans, APMG_CLK_DIS_REG,
ab6cf8e8
EG
1025 APMG_CLK_VAL_DMA_CLK_RQT);
1026 udelay(5);
1027 }
1028
1029 /* Make sure (redundant) we've released our request to stay awake */
1042db2a 1030 iwl_clear_bit(trans, CSR_GP_CNTRL,
20d3b647 1031 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ab6cf8e8
EG
1032
1033 /* Stop the device, and put it in low power state */
7afe3705 1034 iwl_pcie_apm_stop(trans);
43e58856
EG
1035
1036 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1037 * Clean again the interrupt here
1038 */
7b11488f 1039 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
43e58856 1040 iwl_disable_interrupts(trans);
7b11488f 1041 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
43e58856 1042
218733cf
EG
1043 iwl_enable_rfkill_int(trans);
1044
43e58856 1045 /* wait to make sure we flush pending tasklet*/
75595536 1046 synchronize_irq(trans_pcie->irq);
43e58856
EG
1047 tasklet_kill(&trans_pcie->irq_tasklet);
1048
1ee158d8
JB
1049 cancel_work_sync(&trans_pcie->rx_replenish);
1050
43e58856 1051 /* stop and reset the on-board processor */
1042db2a 1052 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
74fda971
DF
1053
1054 /* clear all status bits */
1055 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1056 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
1057 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
01d651d4 1058 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
f946b529 1059 clear_bit(STATUS_RFKILL, &trans_pcie->status);
ab6cf8e8
EG
1060}
1061
2dd4f9f7
JB
1062static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1063{
1064 /* let the ucode operate on its own */
1065 iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
1066 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
1067
1068 iwl_disable_interrupts(trans);
1069 iwl_clear_bit(trans, CSR_GP_CNTRL,
1070 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1071}
1072
e13c0c59 1073static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
9eae88fa 1074 struct iwl_device_cmd *dev_cmd, int txq_id)
47c1b496 1075{
e13c0c59
EG
1076 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1077 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
132f98c2 1078 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
47c1b496 1079 struct iwl_cmd_meta *out_meta;
990aa6d7 1080 struct iwl_txq *txq;
e13c0c59 1081 struct iwl_queue *q;
47c1b496
EG
1082 dma_addr_t phys_addr = 0;
1083 dma_addr_t txcmd_phys;
1084 dma_addr_t scratch_phys;
1085 u16 len, firstlen, secondlen;
1086 u8 wait_write_ptr = 0;
e13c0c59 1087 __le16 fc = hdr->frame_control;
47c1b496 1088 u8 hdr_len = ieee80211_hdrlen(fc);
631b84c5 1089 u16 __maybe_unused wifi_seq;
47c1b496 1090
8ad71bef 1091 txq = &trans_pcie->txq[txq_id];
e13c0c59
EG
1092 q = &txq->q;
1093
9eae88fa
JB
1094 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1095 WARN_ON_ONCE(1);
1096 return -EINVAL;
1097 }
015c15e1 1098
9eae88fa 1099 spin_lock(&txq->lock);
631b84c5 1100
7bc057ff
EG
1101 /* In AGG mode, the index in the ring must correspond to the WiFi
1102 * sequence number. This is a HW requirements to help the SCD to parse
1103 * the BA.
1104 * Check here that the packets are in the right place on the ring.
1105 */
1106#ifdef CONFIG_IWLWIFI_DEBUG
1107 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1108 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1109 ((wifi_seq & 0xff) != q->write_ptr),
1110 "Q: %d WiFi Seq %d tfdNum %d",
1111 txq_id, wifi_seq, q->write_ptr);
1112#endif
1113
47c1b496 1114 /* Set up driver data for this TFD */
bf8440e6
JB
1115 txq->entries[q->write_ptr].skb = skb;
1116 txq->entries[q->write_ptr].cmd = dev_cmd;
dfa2bdba
EG
1117
1118 dev_cmd->hdr.cmd = REPLY_TX;
20d3b647
JB
1119 dev_cmd->hdr.sequence =
1120 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1121 INDEX_TO_SEQ(q->write_ptr)));
47c1b496
EG
1122
1123 /* Set up first empty entry in queue's array of Tx/cmd buffers */
bf8440e6 1124 out_meta = &txq->entries[q->write_ptr].meta;
47c1b496
EG
1125
1126 /*
1127 * Use the first empty entry in this queue's command buffer array
1128 * to contain the Tx command and MAC header concatenated together
1129 * (payload data will be in another buffer).
1130 * Size of this varies, due to varying MAC header length.
1131 * If end is not dword aligned, we'll have 2 extra bytes at the end
1132 * of the MAC header (device reads on dword boundaries).
1133 * We'll tell device about this padding later.
1134 */
1135 len = sizeof(struct iwl_tx_cmd) +
1136 sizeof(struct iwl_cmd_header) + hdr_len;
1137 firstlen = (len + 3) & ~3;
1138
1139 /* Tell NIC about any 2-byte padding after MAC header */
1140 if (firstlen != len)
1141 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1142
1143 /* Physical address of this Tx command's header (not MAC header!),
1144 * within command buffer array. */
1042db2a 1145 txcmd_phys = dma_map_single(trans->dev,
47c1b496
EG
1146 &dev_cmd->hdr, firstlen,
1147 DMA_BIDIRECTIONAL);
1042db2a 1148 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
015c15e1 1149 goto out_err;
47c1b496
EG
1150 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1151 dma_unmap_len_set(out_meta, len, firstlen);
1152
1153 if (!ieee80211_has_morefrags(fc)) {
1154 txq->need_update = 1;
1155 } else {
1156 wait_write_ptr = 1;
1157 txq->need_update = 0;
1158 }
1159
1160 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1161 * if any (802.11 null frames have no payload). */
1162 secondlen = skb->len - hdr_len;
1163 if (secondlen > 0) {
1042db2a 1164 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
47c1b496 1165 secondlen, DMA_TO_DEVICE);
1042db2a
EG
1166 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1167 dma_unmap_single(trans->dev,
47c1b496
EG
1168 dma_unmap_addr(out_meta, mapping),
1169 dma_unmap_len(out_meta, len),
1170 DMA_BIDIRECTIONAL);
015c15e1 1171 goto out_err;
47c1b496
EG
1172 }
1173 }
1174
1175 /* Attach buffers to TFD */
990aa6d7 1176 iwl_pcie_tx_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
47c1b496 1177 if (secondlen > 0)
990aa6d7 1178 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, secondlen, 0);
47c1b496
EG
1179
1180 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1181 offsetof(struct iwl_tx_cmd, scratch);
1182
1183 /* take back ownership of DMA buffer to enable update */
1042db2a 1184 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
20d3b647 1185 DMA_BIDIRECTIONAL);
47c1b496
EG
1186 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1187 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1188
e13c0c59 1189 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
47c1b496 1190 le16_to_cpu(dev_cmd->hdr.sequence));
e13c0c59 1191 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
47c1b496
EG
1192
1193 /* Set up entry for this TFD in Tx byte-count array */
990aa6d7 1194 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
47c1b496 1195
1042db2a 1196 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
20d3b647 1197 DMA_BIDIRECTIONAL);
47c1b496 1198
f042c2eb 1199 trace_iwlwifi_dev_tx(trans->dev, skb,
2c208890 1200 &txq->tfds[txq->q.write_ptr],
47c1b496
EG
1201 sizeof(struct iwl_tfd),
1202 &dev_cmd->hdr, firstlen,
1203 skb->data + hdr_len, secondlen);
f042c2eb
JB
1204 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1205 skb->data + hdr_len, secondlen);
47c1b496 1206
7c5ba4a8 1207 /* start timer if queue currently empty */
49a4fc20
EG
1208 if (txq->need_update && q->read_ptr == q->write_ptr &&
1209 trans_pcie->wd_timeout)
7c5ba4a8
JB
1210 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1211
47c1b496
EG
1212 /* Tell device the write index *just past* this latest filled TFD */
1213 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
990aa6d7 1214 iwl_pcie_txq_inc_wr_ptr(trans, txq);
e13c0c59 1215
47c1b496
EG
1216 /*
1217 * At this point the frame is "transmitted" successfully
1218 * and we will get a TX status notification eventually,
1219 * regardless of the value of ret. "ret" only indicates
1220 * whether or not we should update the write pointer.
1221 */
a0eaad71 1222 if (iwl_queue_space(q) < q->high_mark) {
47c1b496
EG
1223 if (wait_write_ptr) {
1224 txq->need_update = 1;
990aa6d7 1225 iwl_pcie_txq_inc_wr_ptr(trans, txq);
47c1b496 1226 } else {
bada991b 1227 iwl_stop_queue(trans, txq);
47c1b496
EG
1228 }
1229 }
015c15e1 1230 spin_unlock(&txq->lock);
47c1b496 1231 return 0;
015c15e1
JB
1232 out_err:
1233 spin_unlock(&txq->lock);
1234 return -1;
47c1b496
EG
1235}
1236
57a1dc89 1237static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
e6bb4c9c 1238{
20d3b647 1239 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
e6bb4c9c 1240 int err;
c9eec95c 1241 bool hw_rfkill;
e6bb4c9c 1242
0c325769
EG
1243 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1244
57a1dc89
EG
1245 if (!trans_pcie->irq_requested) {
1246 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
990aa6d7 1247 iwl_pcie_tasklet, (unsigned long)trans);
e6bb4c9c 1248
990aa6d7 1249 iwl_pcie_alloc_ict(trans);
e6bb4c9c 1250
990aa6d7
EG
1251 err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
1252 IRQF_SHARED, DRV_NAME, trans);
57a1dc89
EG
1253 if (err) {
1254 IWL_ERR(trans, "Error allocating IRQ %d\n",
75595536 1255 trans_pcie->irq);
ebb7678d 1256 goto error;
57a1dc89
EG
1257 }
1258
57a1dc89 1259 trans_pcie->irq_requested = true;
e6bb4c9c
EG
1260 }
1261
7afe3705 1262 err = iwl_pcie_prepare_card_hw(trans);
ebb7678d 1263 if (err) {
d6f1c316 1264 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
f057ac4e 1265 goto err_free_irq;
ebb7678d 1266 }
a6c684ee 1267
7afe3705 1268 iwl_pcie_apm_init(trans);
a6c684ee 1269
226c02ca
EG
1270 /* From now on, the op_mode will be kept updated about RF kill state */
1271 iwl_enable_rfkill_int(trans);
1272
8d425517 1273 hw_rfkill = iwl_is_rfkill_set(trans);
c9eec95c 1274 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
d48e2074 1275
ebb7678d
EG
1276 return err;
1277
f057ac4e 1278err_free_irq:
a7be50b7 1279 trans_pcie->irq_requested = false;
75595536 1280 free_irq(trans_pcie->irq, trans);
ebb7678d 1281error:
990aa6d7 1282 iwl_pcie_free_ict(trans);
ebb7678d
EG
1283 tasklet_kill(&trans_pcie->irq_tasklet);
1284 return err;
e6bb4c9c
EG
1285}
1286
218733cf
EG
1287static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1288 bool op_mode_leaving)
cc56feb2 1289{
20d3b647 1290 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
d23f78e6 1291 bool hw_rfkill;
218733cf 1292 unsigned long flags;
d23f78e6 1293
ee7d737c
DS
1294 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1295 iwl_disable_interrupts(trans);
1296 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1297
7afe3705 1298 iwl_pcie_apm_stop(trans);
cc56feb2 1299
218733cf
EG
1300 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1301 iwl_disable_interrupts(trans);
1302 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1df06bdc 1303
218733cf
EG
1304 if (!op_mode_leaving) {
1305 /*
1306 * Even if we stop the HW, we still want the RF kill
1307 * interrupt
1308 */
1309 iwl_enable_rfkill_int(trans);
1310
1311 /*
1312 * Check again since the RF kill state may have changed while
1313 * all the interrupts were disabled, in this case we couldn't
1314 * receive the RF kill interrupt and update the state in the
1315 * op_mode.
1316 */
1317 hw_rfkill = iwl_is_rfkill_set(trans);
1318 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1319 }
cc56feb2
EG
1320}
1321
9eae88fa
JB
1322static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1323 struct sk_buff_head *skbs)
464021ff 1324{
8ad71bef 1325 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1326 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
a0eaad71
EG
1327 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1328 int tfd_num = ssn & (txq->q.n_bd - 1);
a0eaad71 1329
015c15e1
JB
1330 spin_lock(&txq->lock);
1331
a0eaad71 1332 if (txq->q.read_ptr != tfd_num) {
9eae88fa
JB
1333 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1334 txq_id, txq->q.read_ptr, tfd_num, ssn);
990aa6d7 1335 iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
e755f882 1336 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
bada991b 1337 iwl_wake_queue(trans, txq);
a0eaad71 1338 }
015c15e1
JB
1339
1340 spin_unlock(&txq->lock);
a0eaad71
EG
1341}
1342
03905495
EG
1343static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1344{
05f5b97e 1345 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1346}
1347
1348static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1349{
05f5b97e 1350 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1351}
1352
1353static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1354{
05f5b97e 1355 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1356}
1357
c6f600fc 1358static void iwl_trans_pcie_configure(struct iwl_trans *trans,
9eae88fa 1359 const struct iwl_trans_config *trans_cfg)
c6f600fc
MV
1360{
1361 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1362
1363 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
b04db9ac 1364 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
d663ee73
JB
1365 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1366 trans_pcie->n_no_reclaim_cmds = 0;
1367 else
1368 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1369 if (trans_pcie->n_no_reclaim_cmds)
1370 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1371 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
9eae88fa 1372
b2cf410c
JB
1373 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1374 if (trans_pcie->rx_buf_size_8k)
1375 trans_pcie->rx_page_order = get_order(8 * 1024);
1376 else
1377 trans_pcie->rx_page_order = get_order(4 * 1024);
7c5ba4a8
JB
1378
1379 trans_pcie->wd_timeout =
1380 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
d9fb6465
JB
1381
1382 trans_pcie->command_names = trans_cfg->command_names;
c6f600fc
MV
1383}
1384
d1ff5253 1385void iwl_trans_pcie_free(struct iwl_trans *trans)
34c1b7ba 1386{
20d3b647 1387 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
a42a1844 1388
ae2c30bf 1389 iwl_trans_pcie_tx_free(trans);
9805c446 1390 iwl_pcie_rx_free(trans);
6379103e 1391
57a1dc89 1392 if (trans_pcie->irq_requested == true) {
75595536 1393 free_irq(trans_pcie->irq, trans);
990aa6d7 1394 iwl_pcie_free_ict(trans);
57a1dc89 1395 }
a42a1844
EG
1396
1397 pci_disable_msi(trans_pcie->pci_dev);
05f5b97e 1398 iounmap(trans_pcie->hw_base);
a42a1844
EG
1399 pci_release_regions(trans_pcie->pci_dev);
1400 pci_disable_device(trans_pcie->pci_dev);
59c647b6 1401 kmem_cache_destroy(trans->dev_cmd_pool);
a42a1844 1402
6d8f6eeb 1403 kfree(trans);
34c1b7ba
EG
1404}
1405
47107e84
DF
1406static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1407{
1408 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1409
1410 if (state)
01d651d4 1411 set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
47107e84 1412 else
01d651d4 1413 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
47107e84
DF
1414}
1415
c01a4047 1416#ifdef CONFIG_PM_SLEEP
57210f7c
EG
1417static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1418{
57210f7c
EG
1419 return 0;
1420}
1421
1422static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1423{
c9eec95c 1424 bool hw_rfkill;
57210f7c 1425
8c46bb70
EG
1426 iwl_enable_rfkill_int(trans);
1427
8d425517 1428 hw_rfkill = iwl_is_rfkill_set(trans);
8c46bb70 1429 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
8722c899 1430
8c46bb70 1431 if (!hw_rfkill)
8722c899
SG
1432 iwl_enable_interrupts(trans);
1433
57210f7c
EG
1434 return 0;
1435}
c01a4047 1436#endif /* CONFIG_PM_SLEEP */
57210f7c 1437
5f178cd2
EG
1438#define IWL_FLUSH_WAIT_MS 2000
1439
990aa6d7 1440static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
5f178cd2 1441{
8ad71bef 1442 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1443 struct iwl_txq *txq;
5f178cd2
EG
1444 struct iwl_queue *q;
1445 int cnt;
1446 unsigned long now = jiffies;
1447 int ret = 0;
1448
1449 /* waiting for all the tx frames complete might take a while */
035f7ff2 1450 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
9ba1947a 1451 if (cnt == trans_pcie->cmd_queue)
5f178cd2 1452 continue;
8ad71bef 1453 txq = &trans_pcie->txq[cnt];
5f178cd2
EG
1454 q = &txq->q;
1455 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1456 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1457 msleep(1);
1458
1459 if (q->read_ptr != q->write_ptr) {
1460 IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1461 ret = -ETIMEDOUT;
1462 break;
1463 }
1464 }
1465 return ret;
1466}
1467
ff620849
EG
1468static const char *get_fh_string(int cmd)
1469{
d9fb6465 1470#define IWL_CMD(x) case x: return #x
ff620849
EG
1471 switch (cmd) {
1472 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1473 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1474 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1475 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1476 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1477 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1478 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1479 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1480 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1481 default:
1482 return "UNKNOWN";
1483 }
d9fb6465 1484#undef IWL_CMD
ff620849
EG
1485}
1486
990aa6d7 1487int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
ff620849
EG
1488{
1489 int i;
ff620849
EG
1490 static const u32 fh_tbl[] = {
1491 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1492 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1493 FH_RSCSR_CHNL0_WPTR,
1494 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1495 FH_MEM_RSSR_SHARED_CTRL_REG,
1496 FH_MEM_RSSR_RX_STATUS_REG,
1497 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1498 FH_TSSR_TX_STATUS_REG,
1499 FH_TSSR_TX_ERROR_REG
1500 };
94543a8d
JB
1501
1502#ifdef CONFIG_IWLWIFI_DEBUGFS
1503 if (buf) {
1504 int pos = 0;
1505 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1506
ff620849
EG
1507 *buf = kmalloc(bufsz, GFP_KERNEL);
1508 if (!*buf)
1509 return -ENOMEM;
94543a8d 1510
ff620849
EG
1511 pos += scnprintf(*buf + pos, bufsz - pos,
1512 "FH register values:\n");
94543a8d
JB
1513
1514 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
ff620849
EG
1515 pos += scnprintf(*buf + pos, bufsz - pos,
1516 " %34s: 0X%08x\n",
1517 get_fh_string(fh_tbl[i]),
1042db2a 1518 iwl_read_direct32(trans, fh_tbl[i]));
94543a8d 1519
ff620849
EG
1520 return pos;
1521 }
1522#endif
94543a8d 1523
ff620849 1524 IWL_ERR(trans, "FH register values:\n");
94543a8d 1525 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
ff620849
EG
1526 IWL_ERR(trans, " %34s: 0X%08x\n",
1527 get_fh_string(fh_tbl[i]),
1042db2a 1528 iwl_read_direct32(trans, fh_tbl[i]));
94543a8d 1529
ff620849
EG
1530 return 0;
1531}
1532
1533static const char *get_csr_string(int cmd)
1534{
d9fb6465 1535#define IWL_CMD(x) case x: return #x
ff620849
EG
1536 switch (cmd) {
1537 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1538 IWL_CMD(CSR_INT_COALESCING);
1539 IWL_CMD(CSR_INT);
1540 IWL_CMD(CSR_INT_MASK);
1541 IWL_CMD(CSR_FH_INT_STATUS);
1542 IWL_CMD(CSR_GPIO_IN);
1543 IWL_CMD(CSR_RESET);
1544 IWL_CMD(CSR_GP_CNTRL);
1545 IWL_CMD(CSR_HW_REV);
1546 IWL_CMD(CSR_EEPROM_REG);
1547 IWL_CMD(CSR_EEPROM_GP);
1548 IWL_CMD(CSR_OTP_GP_REG);
1549 IWL_CMD(CSR_GIO_REG);
1550 IWL_CMD(CSR_GP_UCODE_REG);
1551 IWL_CMD(CSR_GP_DRIVER_REG);
1552 IWL_CMD(CSR_UCODE_DRV_GP1);
1553 IWL_CMD(CSR_UCODE_DRV_GP2);
1554 IWL_CMD(CSR_LED_REG);
1555 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1556 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1557 IWL_CMD(CSR_ANA_PLL_CFG);
1558 IWL_CMD(CSR_HW_REV_WA_REG);
1559 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1560 default:
1561 return "UNKNOWN";
1562 }
d9fb6465 1563#undef IWL_CMD
ff620849
EG
1564}
1565
990aa6d7 1566void iwl_pcie_dump_csr(struct iwl_trans *trans)
ff620849
EG
1567{
1568 int i;
1569 static const u32 csr_tbl[] = {
1570 CSR_HW_IF_CONFIG_REG,
1571 CSR_INT_COALESCING,
1572 CSR_INT,
1573 CSR_INT_MASK,
1574 CSR_FH_INT_STATUS,
1575 CSR_GPIO_IN,
1576 CSR_RESET,
1577 CSR_GP_CNTRL,
1578 CSR_HW_REV,
1579 CSR_EEPROM_REG,
1580 CSR_EEPROM_GP,
1581 CSR_OTP_GP_REG,
1582 CSR_GIO_REG,
1583 CSR_GP_UCODE_REG,
1584 CSR_GP_DRIVER_REG,
1585 CSR_UCODE_DRV_GP1,
1586 CSR_UCODE_DRV_GP2,
1587 CSR_LED_REG,
1588 CSR_DRAM_INT_TBL_REG,
1589 CSR_GIO_CHICKEN_BITS,
1590 CSR_ANA_PLL_CFG,
1591 CSR_HW_REV_WA_REG,
1592 CSR_DBG_HPET_MEM_REG
1593 };
1594 IWL_ERR(trans, "CSR values:\n");
1595 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1596 "CSR_INT_PERIODIC_REG)\n");
1597 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1598 IWL_ERR(trans, " %25s: 0X%08x\n",
1599 get_csr_string(csr_tbl[i]),
1042db2a 1600 iwl_read32(trans, csr_tbl[i]));
ff620849
EG
1601 }
1602}
1603
87e5666c
EG
1604#ifdef CONFIG_IWLWIFI_DEBUGFS
1605/* create and remove of files */
1606#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
5a878bf6 1607 if (!debugfs_create_file(#name, mode, parent, trans, \
87e5666c 1608 &iwl_dbgfs_##name##_ops)) \
9da987ac 1609 goto err; \
87e5666c
EG
1610} while (0)
1611
1612/* file operation */
1613#define DEBUGFS_READ_FUNC(name) \
1614static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1615 char __user *user_buf, \
1616 size_t count, loff_t *ppos);
1617
1618#define DEBUGFS_WRITE_FUNC(name) \
1619static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1620 const char __user *user_buf, \
1621 size_t count, loff_t *ppos);
1622
1623
87e5666c
EG
1624#define DEBUGFS_READ_FILE_OPS(name) \
1625 DEBUGFS_READ_FUNC(name); \
1626static const struct file_operations iwl_dbgfs_##name##_ops = { \
1627 .read = iwl_dbgfs_##name##_read, \
234e3405 1628 .open = simple_open, \
87e5666c
EG
1629 .llseek = generic_file_llseek, \
1630};
1631
16db88ba
EG
1632#define DEBUGFS_WRITE_FILE_OPS(name) \
1633 DEBUGFS_WRITE_FUNC(name); \
1634static const struct file_operations iwl_dbgfs_##name##_ops = { \
1635 .write = iwl_dbgfs_##name##_write, \
234e3405 1636 .open = simple_open, \
16db88ba
EG
1637 .llseek = generic_file_llseek, \
1638};
1639
87e5666c
EG
1640#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1641 DEBUGFS_READ_FUNC(name); \
1642 DEBUGFS_WRITE_FUNC(name); \
1643static const struct file_operations iwl_dbgfs_##name##_ops = { \
1644 .write = iwl_dbgfs_##name##_write, \
1645 .read = iwl_dbgfs_##name##_read, \
234e3405 1646 .open = simple_open, \
87e5666c
EG
1647 .llseek = generic_file_llseek, \
1648};
1649
87e5666c 1650static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
20d3b647
JB
1651 char __user *user_buf,
1652 size_t count, loff_t *ppos)
8ad71bef 1653{
5a878bf6 1654 struct iwl_trans *trans = file->private_data;
8ad71bef 1655 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1656 struct iwl_txq *txq;
87e5666c
EG
1657 struct iwl_queue *q;
1658 char *buf;
1659 int pos = 0;
1660 int cnt;
1661 int ret;
1745e440
WYG
1662 size_t bufsz;
1663
035f7ff2 1664 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
87e5666c 1665
f9e75447 1666 if (!trans_pcie->txq)
87e5666c 1667 return -EAGAIN;
f9e75447 1668
87e5666c
EG
1669 buf = kzalloc(bufsz, GFP_KERNEL);
1670 if (!buf)
1671 return -ENOMEM;
1672
035f7ff2 1673 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
8ad71bef 1674 txq = &trans_pcie->txq[cnt];
87e5666c
EG
1675 q = &txq->q;
1676 pos += scnprintf(buf + pos, bufsz - pos,
9eae88fa 1677 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
87e5666c 1678 cnt, q->read_ptr, q->write_ptr,
9eae88fa
JB
1679 !!test_bit(cnt, trans_pcie->queue_used),
1680 !!test_bit(cnt, trans_pcie->queue_stopped));
87e5666c
EG
1681 }
1682 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1683 kfree(buf);
1684 return ret;
1685}
1686
1687static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
20d3b647
JB
1688 char __user *user_buf,
1689 size_t count, loff_t *ppos)
1690{
5a878bf6 1691 struct iwl_trans *trans = file->private_data;
20d3b647 1692 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1693 struct iwl_rxq *rxq = &trans_pcie->rxq;
87e5666c
EG
1694 char buf[256];
1695 int pos = 0;
1696 const size_t bufsz = sizeof(buf);
1697
1698 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1699 rxq->read);
1700 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1701 rxq->write);
1702 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1703 rxq->free_count);
1704 if (rxq->rb_stts) {
1705 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1706 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1707 } else {
1708 pos += scnprintf(buf + pos, bufsz - pos,
1709 "closed_rb_num: Not Allocated\n");
1710 }
1711 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1712}
1713
1f7b6172
EG
1714static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1715 char __user *user_buf,
20d3b647
JB
1716 size_t count, loff_t *ppos)
1717{
1f7b6172 1718 struct iwl_trans *trans = file->private_data;
20d3b647 1719 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1f7b6172
EG
1720 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1721
1722 int pos = 0;
1723 char *buf;
1724 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1725 ssize_t ret;
1726
1727 buf = kzalloc(bufsz, GFP_KERNEL);
f9e75447 1728 if (!buf)
1f7b6172 1729 return -ENOMEM;
1f7b6172
EG
1730
1731 pos += scnprintf(buf + pos, bufsz - pos,
1732 "Interrupt Statistics Report:\n");
1733
1734 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1735 isr_stats->hw);
1736 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1737 isr_stats->sw);
1738 if (isr_stats->sw || isr_stats->hw) {
1739 pos += scnprintf(buf + pos, bufsz - pos,
1740 "\tLast Restarting Code: 0x%X\n",
1741 isr_stats->err_code);
1742 }
1743#ifdef CONFIG_IWLWIFI_DEBUG
1744 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1745 isr_stats->sch);
1746 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1747 isr_stats->alive);
1748#endif
1749 pos += scnprintf(buf + pos, bufsz - pos,
1750 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1751
1752 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1753 isr_stats->ctkill);
1754
1755 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1756 isr_stats->wakeup);
1757
1758 pos += scnprintf(buf + pos, bufsz - pos,
1759 "Rx command responses:\t\t %u\n", isr_stats->rx);
1760
1761 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1762 isr_stats->tx);
1763
1764 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1765 isr_stats->unhandled);
1766
1767 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1768 kfree(buf);
1769 return ret;
1770}
1771
1772static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1773 const char __user *user_buf,
1774 size_t count, loff_t *ppos)
1775{
1776 struct iwl_trans *trans = file->private_data;
20d3b647 1777 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1f7b6172
EG
1778 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1779
1780 char buf[8];
1781 int buf_size;
1782 u32 reset_flag;
1783
1784 memset(buf, 0, sizeof(buf));
1785 buf_size = min(count, sizeof(buf) - 1);
1786 if (copy_from_user(buf, user_buf, buf_size))
1787 return -EFAULT;
1788 if (sscanf(buf, "%x", &reset_flag) != 1)
1789 return -EFAULT;
1790 if (reset_flag == 0)
1791 memset(isr_stats, 0, sizeof(*isr_stats));
1792
1793 return count;
1794}
1795
16db88ba 1796static ssize_t iwl_dbgfs_csr_write(struct file *file,
20d3b647
JB
1797 const char __user *user_buf,
1798 size_t count, loff_t *ppos)
16db88ba
EG
1799{
1800 struct iwl_trans *trans = file->private_data;
1801 char buf[8];
1802 int buf_size;
1803 int csr;
1804
1805 memset(buf, 0, sizeof(buf));
1806 buf_size = min(count, sizeof(buf) - 1);
1807 if (copy_from_user(buf, user_buf, buf_size))
1808 return -EFAULT;
1809 if (sscanf(buf, "%d", &csr) != 1)
1810 return -EFAULT;
1811
990aa6d7 1812 iwl_pcie_dump_csr(trans);
16db88ba
EG
1813
1814 return count;
1815}
1816
16db88ba 1817static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
20d3b647
JB
1818 char __user *user_buf,
1819 size_t count, loff_t *ppos)
16db88ba
EG
1820{
1821 struct iwl_trans *trans = file->private_data;
94543a8d 1822 char *buf = NULL;
16db88ba
EG
1823 int pos = 0;
1824 ssize_t ret = -EFAULT;
1825
990aa6d7 1826 ret = pos = iwl_pcie_dump_fh(trans, &buf);
16db88ba
EG
1827 if (buf) {
1828 ret = simple_read_from_buffer(user_buf,
1829 count, ppos, buf, pos);
1830 kfree(buf);
1831 }
1832
1833 return ret;
1834}
1835
48dffd39
JB
1836static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
1837 const char __user *user_buf,
1838 size_t count, loff_t *ppos)
1839{
1840 struct iwl_trans *trans = file->private_data;
1841
1842 if (!trans->op_mode)
1843 return -EAGAIN;
1844
24172f39 1845 local_bh_disable();
48dffd39 1846 iwl_op_mode_nic_error(trans->op_mode);
24172f39 1847 local_bh_enable();
48dffd39
JB
1848
1849 return count;
1850}
1851
1f7b6172 1852DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
16db88ba 1853DEBUGFS_READ_FILE_OPS(fh_reg);
87e5666c
EG
1854DEBUGFS_READ_FILE_OPS(rx_queue);
1855DEBUGFS_READ_FILE_OPS(tx_queue);
16db88ba 1856DEBUGFS_WRITE_FILE_OPS(csr);
48dffd39 1857DEBUGFS_WRITE_FILE_OPS(fw_restart);
87e5666c
EG
1858
1859/*
1860 * Create the debugfs files and directories
1861 *
1862 */
1863static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
20d3b647 1864 struct dentry *dir)
87e5666c 1865{
87e5666c
EG
1866 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1867 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1f7b6172 1868 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
16db88ba
EG
1869 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1870 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
48dffd39 1871 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
87e5666c 1872 return 0;
9da987ac
MV
1873
1874err:
1875 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1876 return -ENOMEM;
87e5666c
EG
1877}
1878#else
1879static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
20d3b647
JB
1880 struct dentry *dir)
1881{
1882 return 0;
1883}
87e5666c
EG
1884#endif /*CONFIG_IWLWIFI_DEBUGFS */
1885
d1ff5253 1886static const struct iwl_trans_ops trans_ops_pcie = {
57a1dc89 1887 .start_hw = iwl_trans_pcie_start_hw,
cc56feb2 1888 .stop_hw = iwl_trans_pcie_stop_hw,
ed6a3803 1889 .fw_alive = iwl_trans_pcie_fw_alive,
cf614297 1890 .start_fw = iwl_trans_pcie_start_fw,
e6bb4c9c 1891 .stop_device = iwl_trans_pcie_stop_device,
48d42c42 1892
2dd4f9f7
JB
1893 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
1894
990aa6d7 1895 .send_cmd = iwl_pcie_send_cmd,
c85eb619 1896
e6bb4c9c 1897 .tx = iwl_trans_pcie_tx,
a0eaad71 1898 .reclaim = iwl_trans_pcie_reclaim,
34c1b7ba 1899
990aa6d7
EG
1900 .txq_disable = iwl_pcie_txq_disable,
1901 .txq_enable = iwl_pcie_txq_enable,
34c1b7ba 1902
87e5666c 1903 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
5f178cd2 1904
990aa6d7 1905 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
5f178cd2 1906
c01a4047 1907#ifdef CONFIG_PM_SLEEP
57210f7c
EG
1908 .suspend = iwl_trans_pcie_suspend,
1909 .resume = iwl_trans_pcie_resume,
c01a4047 1910#endif
03905495
EG
1911 .write8 = iwl_trans_pcie_write8,
1912 .write32 = iwl_trans_pcie_write32,
1913 .read32 = iwl_trans_pcie_read32,
c6f600fc 1914 .configure = iwl_trans_pcie_configure,
47107e84 1915 .set_pmi = iwl_trans_pcie_set_pmi,
e6bb4c9c 1916};
a42a1844 1917
87ce05a2 1918struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
035f7ff2
EG
1919 const struct pci_device_id *ent,
1920 const struct iwl_cfg *cfg)
a42a1844 1921{
a42a1844
EG
1922 struct iwl_trans_pcie *trans_pcie;
1923 struct iwl_trans *trans;
1924 u16 pci_cmd;
1925 int err;
1926
1927 trans = kzalloc(sizeof(struct iwl_trans) +
20d3b647 1928 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
a42a1844 1929
dbeca583 1930 if (!trans)
a42a1844
EG
1931 return NULL;
1932
1933 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1934
1935 trans->ops = &trans_ops_pcie;
035f7ff2 1936 trans->cfg = cfg;
a42a1844 1937 trans_pcie->trans = trans;
7b11488f 1938 spin_lock_init(&trans_pcie->irq_lock);
13df1aab 1939 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
a42a1844
EG
1940
1941 /* W/A - seems to solve weird behavior. We need to remove this if we
1942 * don't want to stay in L1 all the time. This wastes a lot of power */
1943 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
20d3b647 1944 PCIE_LINK_STATE_CLKPM);
a42a1844
EG
1945
1946 if (pci_enable_device(pdev)) {
1947 err = -ENODEV;
1948 goto out_no_pci;
1949 }
1950
1951 pci_set_master(pdev);
1952
1953 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1954 if (!err)
1955 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
1956 if (err) {
1957 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1958 if (!err)
1959 err = pci_set_consistent_dma_mask(pdev,
20d3b647 1960 DMA_BIT_MASK(32));
a42a1844
EG
1961 /* both attempts failed: */
1962 if (err) {
1963 dev_printk(KERN_ERR, &pdev->dev,
1964 "No suitable DMA available.\n");
1965 goto out_pci_disable_device;
1966 }
1967 }
1968
1969 err = pci_request_regions(pdev, DRV_NAME);
1970 if (err) {
d6f1c316
JB
1971 dev_printk(KERN_ERR, &pdev->dev,
1972 "pci_request_regions failed\n");
a42a1844
EG
1973 goto out_pci_disable_device;
1974 }
1975
05f5b97e 1976 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
a42a1844 1977 if (!trans_pcie->hw_base) {
d6f1c316 1978 dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
a42a1844
EG
1979 err = -ENODEV;
1980 goto out_pci_release_regions;
1981 }
1982
a42a1844
EG
1983 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1984 * PCI Tx retries from interfering with C3 CPU state */
1985 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1986
1987 err = pci_enable_msi(pdev);
9f904b38 1988 if (err) {
a42a1844 1989 dev_printk(KERN_ERR, &pdev->dev,
d6f1c316 1990 "pci_enable_msi failed(0X%x)\n", err);
9f904b38
EG
1991 /* enable rfkill interrupt: hw bug w/a */
1992 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1993 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1994 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1995 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1996 }
1997 }
a42a1844
EG
1998
1999 trans->dev = &pdev->dev;
75595536 2000 trans_pcie->irq = pdev->irq;
a42a1844 2001 trans_pcie->pci_dev = pdev;
08079a49 2002 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
99673ee5 2003 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
9ca85961
EG
2004 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2005 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
a42a1844 2006
69a10b29 2007 /* Initialize the wait queue for commands */
f946b529 2008 init_waitqueue_head(&trans_pcie->wait_command_queue);
8b5bed90 2009 spin_lock_init(&trans->reg_lock);
69a10b29 2010
3ec45882
JB
2011 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
2012 "iwl_cmd_pool:%s", dev_name(trans->dev));
59c647b6
EG
2013
2014 trans->dev_cmd_headroom = 0;
2015 trans->dev_cmd_pool =
3ec45882 2016 kmem_cache_create(trans->dev_cmd_pool_name,
59c647b6
EG
2017 sizeof(struct iwl_device_cmd)
2018 + trans->dev_cmd_headroom,
2019 sizeof(void *),
2020 SLAB_HWCACHE_ALIGN,
2021 NULL);
2022
2023 if (!trans->dev_cmd_pool)
2024 goto out_pci_disable_msi;
2025
a42a1844
EG
2026 return trans;
2027
59c647b6
EG
2028out_pci_disable_msi:
2029 pci_disable_msi(pdev);
a42a1844
EG
2030out_pci_release_regions:
2031 pci_release_regions(pdev);
2032out_pci_disable_device:
2033 pci_disable_device(pdev);
2034out_no_pci:
2035 kfree(trans);
2036 return NULL;
2037}
This page took 0.325693 seconds and 5 git commands to generate.