iwlwifi: mvm: disable beacon filtering escape timer
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / pcie / trans.c
CommitLineData
c85eb619
EG
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
51368bf7 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
8b4139dc 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
c85eb619
EG
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
410dc5aa 26 * in the file called COPYING.
c85eb619
EG
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
51368bf7 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
8b4139dc 35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
c85eb619
EG
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
a42a1844
EG
65#include <linux/pci.h>
66#include <linux/pci-aspm.h>
e6bb4c9c 67#include <linux/interrupt.h>
87e5666c 68#include <linux/debugfs.h>
cf614297 69#include <linux/sched.h>
6d8f6eeb
EG
70#include <linux/bitops.h>
71#include <linux/gfp.h>
48eb7b34 72#include <linux/vmalloc.h>
e6bb4c9c 73
82575102 74#include "iwl-drv.h"
c85eb619 75#include "iwl-trans.h"
522376d2
EG
76#include "iwl-csr.h"
77#include "iwl-prph.h"
7a10e3e4 78#include "iwl-agn-hw.h"
4d075007 79#include "iwl-fw-error-dump.h"
6468a01a 80#include "internal.h"
0439bb62 81
c2d20201
EG
82static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
83{
84 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
85
86 if (!trans_pcie->fw_mon_page)
87 return;
88
89 dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
90 trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
91 __free_pages(trans_pcie->fw_mon_page,
92 get_order(trans_pcie->fw_mon_size));
93 trans_pcie->fw_mon_page = NULL;
94 trans_pcie->fw_mon_phys = 0;
95 trans_pcie->fw_mon_size = 0;
96}
97
98static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
99{
100 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
101 struct page *page;
102 dma_addr_t phys;
103 u32 size;
104 u8 power;
105
106 if (trans_pcie->fw_mon_page) {
107 dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
108 trans_pcie->fw_mon_size,
109 DMA_FROM_DEVICE);
110 return;
111 }
112
113 phys = 0;
114 for (power = 26; power >= 11; power--) {
115 int order;
116
117 size = BIT(power);
118 order = get_order(size);
119 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
120 order);
121 if (!page)
122 continue;
123
124 phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
125 DMA_FROM_DEVICE);
126 if (dma_mapping_error(trans->dev, phys)) {
127 __free_pages(page, order);
128 continue;
129 }
130 IWL_INFO(trans,
131 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
132 size, order);
133 break;
134 }
135
40a76905 136 if (WARN_ON_ONCE(!page))
c2d20201
EG
137 return;
138
139 trans_pcie->fw_mon_page = page;
140 trans_pcie->fw_mon_phys = phys;
141 trans_pcie->fw_mon_size = size;
142}
143
a812cba9
AB
144static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
145{
146 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
147 ((reg & 0x0000ffff) | (2 << 28)));
148 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
149}
150
151static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
152{
153 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
154 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
155 ((reg & 0x0000ffff) | (3 << 28)));
156}
157
ddaf5a5b 158static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
392f8b78 159{
ddaf5a5b
JB
160 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
161 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
162 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
163 ~APMG_PS_CTRL_MSK_PWR_SRC);
164 else
165 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
166 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
167 ~APMG_PS_CTRL_MSK_PWR_SRC);
392f8b78
EG
168}
169
af634bee
EG
170/* PCI registers */
171#define PCI_CFG_RETRY_TIMEOUT 0x041
af634bee 172
7afe3705 173static void iwl_pcie_apm_config(struct iwl_trans *trans)
af634bee 174{
20d3b647 175 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7afe3705 176 u16 lctl;
9180ac50 177 u16 cap;
af634bee 178
af634bee
EG
179 /*
180 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
181 * Check if BIOS (or OS) enabled L1-ASPM on this device.
182 * If so (likely), disable L0S, so device moves directly L0->L1;
183 * costs negligible amount of power savings.
184 * If not (unlikely), enable L0S, so there is at least some
185 * power savings, even without L1.
186 */
7afe3705 187 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
9180ac50 188 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
af634bee 189 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
9180ac50 190 else
af634bee 191 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
438a0f0a 192 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
9180ac50
EG
193
194 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
195 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
196 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
197 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
198 trans->ltr_enabled ? "En" : "Dis");
af634bee
EG
199}
200
a6c684ee
EG
201/*
202 * Start up NIC's basic functionality after it has been reset
7afe3705 203 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
a6c684ee
EG
204 * NOTE: This does not load uCode nor start the embedded processor
205 */
7afe3705 206static int iwl_pcie_apm_init(struct iwl_trans *trans)
a6c684ee
EG
207{
208 int ret = 0;
209 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
210
211 /*
212 * Use "set_bit" below rather than "write", to preserve any hardware
213 * bits already set by default after reset.
214 */
215
216 /* Disable L0S exit timer (platform NMI Work/Around) */
e4a9f8ce
EH
217 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
218 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
219 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
a6c684ee
EG
220
221 /*
222 * Disable L0s without affecting L1;
223 * don't wait for ICH L0s (ICH bug W/A)
224 */
225 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
20d3b647 226 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
a6c684ee
EG
227
228 /* Set FH wait threshold to maximum (HW error during stress W/A) */
229 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
230
231 /*
232 * Enable HAP INTA (interrupt from management bus) to
233 * wake device's PCI Express link L1a -> L0s
234 */
235 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 236 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
a6c684ee 237
7afe3705 238 iwl_pcie_apm_config(trans);
a6c684ee
EG
239
240 /* Configure analog phase-lock-loop before activating to D0A */
035f7ff2 241 if (trans->cfg->base_params->pll_cfg_val)
a6c684ee 242 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
035f7ff2 243 trans->cfg->base_params->pll_cfg_val);
a6c684ee
EG
244
245 /*
246 * Set "initialization complete" bit to move adapter from
247 * D0U* --> D0A* (powered-up active) state.
248 */
249 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
250
251 /*
252 * Wait for clock stabilization; once stabilized, access to
253 * device-internal resources is supported, e.g. iwl_write_prph()
254 * and accesses to uCode SRAM.
255 */
256 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
20d3b647
JB
257 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
258 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
a6c684ee
EG
259 if (ret < 0) {
260 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
261 goto out;
262 }
263
2d93aee1
EG
264 if (trans->cfg->host_interrupt_operation_mode) {
265 /*
266 * This is a bit of an abuse - This is needed for 7260 / 3160
267 * only check host_interrupt_operation_mode even if this is
268 * not related to host_interrupt_operation_mode.
269 *
270 * Enable the oscillator to count wake up time for L1 exit. This
271 * consumes slightly more power (100uA) - but allows to be sure
272 * that we wake up from L1 on time.
273 *
274 * This looks weird: read twice the same register, discard the
275 * value, set a bit, and yet again, read that same register
276 * just to discard the value. But that's the way the hardware
277 * seems to like it.
278 */
279 iwl_read_prph(trans, OSC_CLK);
280 iwl_read_prph(trans, OSC_CLK);
281 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
282 iwl_read_prph(trans, OSC_CLK);
283 iwl_read_prph(trans, OSC_CLK);
284 }
285
a6c684ee
EG
286 /*
287 * Enable DMA clock and wait for it to stabilize.
288 *
3073d8c0
EH
289 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
290 * bits do not disable clocks. This preserves any hardware
291 * bits already set by default in "CLK_CTRL_REG" after reset.
a6c684ee 292 */
3073d8c0
EH
293 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
294 iwl_write_prph(trans, APMG_CLK_EN_REG,
295 APMG_CLK_VAL_DMA_CLK_RQT);
296 udelay(20);
297
298 /* Disable L1-Active */
299 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
300 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
301
302 /* Clear the interrupt in APMG if the NIC is in RFKILL */
303 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
304 APMG_RTC_INT_STT_RFKILL);
305 }
889b1696 306
eb7ff77e 307 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
a6c684ee
EG
308
309out:
310 return ret;
311}
312
a812cba9
AB
313/*
314 * Enable LP XTAL to avoid HW bug where device may consume much power if
315 * FW is not loaded after device reset. LP XTAL is disabled by default
316 * after device HW reset. Do it only if XTAL is fed by internal source.
317 * Configure device's "persistence" mode to avoid resetting XTAL again when
318 * SHRD_HW_RST occurs in S3.
319 */
320static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
321{
322 int ret;
323 u32 apmg_gp1_reg;
324 u32 apmg_xtal_cfg_reg;
325 u32 dl_cfg_reg;
326
327 /* Force XTAL ON */
328 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
329 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
330
331 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
332 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
333
334 udelay(10);
335
336 /*
337 * Set "initialization complete" bit to move adapter from
338 * D0U* --> D0A* (powered-up active) state.
339 */
340 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
341
342 /*
343 * Wait for clock stabilization; once stabilized, access to
344 * device-internal resources is possible.
345 */
346 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
347 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
348 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
349 25000);
350 if (WARN_ON(ret < 0)) {
351 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
352 /* Release XTAL ON request */
353 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
354 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
355 return;
356 }
357
358 /*
359 * Clear "disable persistence" to avoid LP XTAL resetting when
360 * SHRD_HW_RST is applied in S3.
361 */
362 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
363 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
364
365 /*
366 * Force APMG XTAL to be active to prevent its disabling by HW
367 * caused by APMG idle state.
368 */
369 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
370 SHR_APMG_XTAL_CFG_REG);
371 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
372 apmg_xtal_cfg_reg |
373 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
374
375 /*
376 * Reset entire device again - do controller reset (results in
377 * SHRD_HW_RST). Turn MAC off before proceeding.
378 */
379 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
380
381 udelay(10);
382
383 /* Enable LP XTAL by indirect access through CSR */
384 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
385 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
386 SHR_APMG_GP1_WF_XTAL_LP_EN |
387 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
388
389 /* Clear delay line clock power up */
390 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
391 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
392 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
393
394 /*
395 * Enable persistence mode to avoid LP XTAL resetting when
396 * SHRD_HW_RST is applied in S3.
397 */
398 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
399 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
400
401 /*
402 * Clear "initialization complete" bit to move adapter from
403 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
404 */
405 iwl_clear_bit(trans, CSR_GP_CNTRL,
406 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
407
408 /* Activates XTAL resources monitor */
409 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
410 CSR_MONITOR_XTAL_RESOURCES);
411
412 /* Release XTAL ON request */
413 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
414 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
415 udelay(10);
416
417 /* Release APMG XTAL */
418 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
419 apmg_xtal_cfg_reg &
420 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
421}
422
7afe3705 423static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
cc56feb2
EG
424{
425 int ret = 0;
426
427 /* stop device's busmaster DMA activity */
428 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
429
430 ret = iwl_poll_bit(trans, CSR_RESET,
20d3b647
JB
431 CSR_RESET_REG_FLAG_MASTER_DISABLED,
432 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
7f2ac8fb 433 if (ret < 0)
cc56feb2
EG
434 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
435
436 IWL_DEBUG_INFO(trans, "stop master\n");
437
438 return ret;
439}
440
7afe3705 441static void iwl_pcie_apm_stop(struct iwl_trans *trans)
cc56feb2
EG
442{
443 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
444
eb7ff77e 445 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
cc56feb2
EG
446
447 /* Stop device's DMA activity */
7afe3705 448 iwl_pcie_apm_stop_master(trans);
cc56feb2 449
a812cba9
AB
450 if (trans->cfg->lp_xtal_workaround) {
451 iwl_pcie_apm_lp_xtal_enable(trans);
452 return;
453 }
454
cc56feb2
EG
455 /* Reset the entire device */
456 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
457
458 udelay(10);
459
460 /*
461 * Clear "initialization complete" bit to move adapter from
462 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
463 */
464 iwl_clear_bit(trans, CSR_GP_CNTRL,
465 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
466}
467
7afe3705 468static int iwl_pcie_nic_init(struct iwl_trans *trans)
392f8b78 469{
7b11488f 470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392f8b78
EG
471
472 /* nic_init */
7b70bd63 473 spin_lock(&trans_pcie->irq_lock);
7afe3705 474 iwl_pcie_apm_init(trans);
392f8b78 475
7b70bd63 476 spin_unlock(&trans_pcie->irq_lock);
392f8b78 477
3073d8c0
EH
478 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
479 iwl_pcie_set_pwr(trans, false);
392f8b78 480
ecdb975c 481 iwl_op_mode_nic_config(trans->op_mode);
392f8b78
EG
482
483 /* Allocate the RX queue, or reset if it is already allocated */
9805c446 484 iwl_pcie_rx_init(trans);
392f8b78
EG
485
486 /* Allocate or reset and init all Tx and Command queues */
f02831be 487 if (iwl_pcie_tx_init(trans))
392f8b78
EG
488 return -ENOMEM;
489
035f7ff2 490 if (trans->cfg->base_params->shadow_reg_enable) {
392f8b78 491 /* enable shadow regs in HW */
20d3b647 492 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
d38069d1 493 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
392f8b78
EG
494 }
495
392f8b78
EG
496 return 0;
497}
498
499#define HW_READY_TIMEOUT (50)
500
501/* Note: returns poll_bit return value, which is >= 0 if success */
7afe3705 502static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
392f8b78
EG
503{
504 int ret;
505
1042db2a 506 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 507 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
392f8b78
EG
508
509 /* See if we got it */
1042db2a 510 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647
JB
511 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
512 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
513 HW_READY_TIMEOUT);
392f8b78 514
6a08f514
EG
515 if (ret >= 0)
516 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
517
6d8f6eeb 518 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
392f8b78
EG
519 return ret;
520}
521
522/* Note: returns standard 0/-ERROR code */
7afe3705 523static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
392f8b78
EG
524{
525 int ret;
289e5501 526 int t = 0;
501fd989 527 int iter;
392f8b78 528
6d8f6eeb 529 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
392f8b78 530
7afe3705 531 ret = iwl_pcie_set_hw_ready(trans);
ebb7678d 532 /* If the card is ready, exit 0 */
392f8b78
EG
533 if (ret >= 0)
534 return 0;
535
501fd989
EG
536 for (iter = 0; iter < 10; iter++) {
537 /* If HW is not ready, prepare the conditions to check again */
538 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
539 CSR_HW_IF_CONFIG_REG_PREPARE);
540
541 do {
542 ret = iwl_pcie_set_hw_ready(trans);
543 if (ret >= 0)
544 return 0;
392f8b78 545
501fd989
EG
546 usleep_range(200, 1000);
547 t += 200;
548 } while (t < 150000);
549 msleep(25);
550 }
392f8b78 551
7f2ac8fb 552 IWL_ERR(trans, "Couldn't prepare the card\n");
392f8b78 553
392f8b78
EG
554 return ret;
555}
556
cf614297
EG
557/*
558 * ucode
559 */
7afe3705 560static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
83f84d7b 561 dma_addr_t phy_addr, u32 byte_cnt)
cf614297 562{
13df1aab 563 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
cf614297
EG
564 int ret;
565
13df1aab 566 trans_pcie->ucode_write_complete = false;
cf614297
EG
567
568 iwl_write_direct32(trans,
20d3b647
JB
569 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
570 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
cf614297
EG
571
572 iwl_write_direct32(trans,
20d3b647
JB
573 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
574 dst_addr);
cf614297
EG
575
576 iwl_write_direct32(trans,
83f84d7b
JB
577 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
578 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
cf614297
EG
579
580 iwl_write_direct32(trans,
20d3b647
JB
581 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
582 (iwl_get_dma_hi_addr(phy_addr)
583 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
cf614297
EG
584
585 iwl_write_direct32(trans,
20d3b647
JB
586 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
587 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
588 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
589 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
cf614297
EG
590
591 iwl_write_direct32(trans,
20d3b647
JB
592 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
593 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
594 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
595 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
cf614297 596
13df1aab
JB
597 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
598 trans_pcie->ucode_write_complete, 5 * HZ);
cf614297 599 if (!ret) {
83f84d7b 600 IWL_ERR(trans, "Failed to load firmware chunk!\n");
cf614297
EG
601 return -ETIMEDOUT;
602 }
603
604 return 0;
605}
606
7afe3705 607static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
83f84d7b 608 const struct fw_desc *section)
cf614297 609{
83f84d7b
JB
610 u8 *v_addr;
611 dma_addr_t p_addr;
c571573a 612 u32 offset, chunk_sz = section->len;
cf614297
EG
613 int ret = 0;
614
83f84d7b
JB
615 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
616 section_num);
617
c571573a
EG
618 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
619 GFP_KERNEL | __GFP_NOWARN);
620 if (!v_addr) {
621 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
622 chunk_sz = PAGE_SIZE;
623 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
624 &p_addr, GFP_KERNEL);
625 if (!v_addr)
626 return -ENOMEM;
627 }
83f84d7b 628
c571573a 629 for (offset = 0; offset < section->len; offset += chunk_sz) {
83f84d7b
JB
630 u32 copy_size;
631
c571573a 632 copy_size = min_t(u32, chunk_sz, section->len - offset);
cf614297 633
83f84d7b 634 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
7afe3705
EG
635 ret = iwl_pcie_load_firmware_chunk(trans,
636 section->offset + offset,
637 p_addr, copy_size);
83f84d7b
JB
638 if (ret) {
639 IWL_ERR(trans,
640 "Could not load the [%d] uCode section\n",
641 section_num);
642 break;
6dfa8d01 643 }
83f84d7b
JB
644 }
645
c571573a 646 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
83f84d7b
JB
647 return ret;
648}
649
189fa2fa
EH
650static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
651 const struct fw_img *image,
034846cf
EH
652 int cpu,
653 int *first_ucode_section)
e2d6f4e7
EH
654{
655 int shift_param;
189fa2fa 656 int i, ret = 0;
034846cf 657 u32 last_read_idx = 0;
e2d6f4e7
EH
658
659 if (cpu == 1) {
660 shift_param = 0;
034846cf 661 *first_ucode_section = 0;
e2d6f4e7
EH
662 } else {
663 shift_param = 16;
034846cf 664 (*first_ucode_section)++;
e2d6f4e7
EH
665 }
666
034846cf
EH
667 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
668 last_read_idx = i;
669
670 if (!image->sec[i].data ||
671 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
672 IWL_DEBUG_FW(trans,
673 "Break since Data not valid or Empty section, sec = %d\n",
674 i);
189fa2fa 675 break;
034846cf
EH
676 }
677
678 if (i == (*first_ucode_section) + 1)
189fa2fa
EH
679 /* set CPU to started */
680 iwl_set_bits_prph(trans,
681 CSR_UCODE_LOAD_STATUS_ADDR,
682 LMPM_CPU_HDRS_LOADING_COMPLETED
683 << shift_param);
e2d6f4e7 684
189fa2fa
EH
685 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
686 if (ret)
687 return ret;
e2d6f4e7 688 }
189fa2fa
EH
689 /* image loading complete */
690 iwl_set_bits_prph(trans,
691 CSR_UCODE_LOAD_STATUS_ADDR,
692 LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
e2d6f4e7 693
034846cf
EH
694 *first_ucode_section = last_read_idx;
695
189fa2fa
EH
696 return 0;
697}
e2d6f4e7 698
189fa2fa
EH
699static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
700 const struct fw_img *image,
034846cf
EH
701 int cpu,
702 int *first_ucode_section)
189fa2fa
EH
703{
704 int shift_param;
189fa2fa 705 int i, ret = 0;
034846cf 706 u32 last_read_idx = 0;
189fa2fa
EH
707
708 if (cpu == 1) {
709 shift_param = 0;
034846cf 710 *first_ucode_section = 0;
189fa2fa
EH
711 } else {
712 shift_param = 16;
034846cf 713 (*first_ucode_section)++;
189fa2fa
EH
714 }
715
034846cf
EH
716 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
717 last_read_idx = i;
718
719 if (!image->sec[i].data ||
720 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
721 IWL_DEBUG_FW(trans,
722 "Break since Data not valid or Empty section, sec = %d\n",
723 i);
189fa2fa 724 break;
034846cf
EH
725 }
726
189fa2fa
EH
727 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
728 if (ret)
729 return ret;
e2d6f4e7
EH
730 }
731
189fa2fa
EH
732 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
733 iwl_set_bits_prph(trans,
734 CSR_UCODE_LOAD_STATUS_ADDR,
735 (LMPM_CPU_UCODE_LOADING_COMPLETED |
736 LMPM_CPU_HDRS_LOADING_COMPLETED |
737 LMPM_CPU_UCODE_LOADING_STARTED) <<
738 shift_param);
739
034846cf
EH
740 *first_ucode_section = last_read_idx;
741
e2d6f4e7
EH
742 return 0;
743}
744
7afe3705 745static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
0692fe41 746 const struct fw_img *image)
cf614297 747{
c2d20201 748 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
189fa2fa 749 int ret = 0;
034846cf 750 int first_ucode_section;
cf614297 751
e2d6f4e7
EH
752 IWL_DEBUG_FW(trans,
753 "working with %s CPU\n",
754 image->is_dual_cpus ? "Dual" : "Single");
755
756 /* configure the ucode to be ready to get the secured image */
c7583d7d 757 if (iwl_has_secure_boot(trans->hw_rev, trans->cfg->device_family)) {
e2d6f4e7 758 /* set secure boot inspector addresses */
189fa2fa
EH
759 iwl_write_prph(trans,
760 LMPM_SECURE_INSPECTOR_CODE_ADDR,
761 LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
e2d6f4e7 762
189fa2fa
EH
763 iwl_write_prph(trans,
764 LMPM_SECURE_INSPECTOR_DATA_ADDR,
765 LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
e2d6f4e7 766
189fa2fa
EH
767 /* set CPU1 header address */
768 iwl_write_prph(trans,
769 LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
770 LMPM_SECURE_CPU1_HDR_MEM_SPACE);
771
772 /* load to FW the binary Secured sections of CPU1 */
034846cf
EH
773 ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
774 &first_ucode_section);
2d1c0044
JB
775 if (ret)
776 return ret;
cf614297 777
189fa2fa
EH
778 } else {
779 /* load to FW the binary Non secured sections of CPU1 */
034846cf
EH
780 ret = iwl_pcie_load_cpu_sections(trans, image, 1,
781 &first_ucode_section);
e2d6f4e7
EH
782 if (ret)
783 return ret;
e2d6f4e7
EH
784 }
785
786 if (image->is_dual_cpus) {
189fa2fa
EH
787 /* set CPU2 header address */
788 iwl_write_prph(trans,
789 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
790 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
e2d6f4e7 791
189fa2fa 792 /* load to FW the binary sections of CPU2 */
c7583d7d
EH
793 if (iwl_has_secure_boot(trans->hw_rev,
794 trans->cfg->device_family))
034846cf
EH
795 ret = iwl_pcie_load_cpu_secured_sections(
796 trans, image, 2,
797 &first_ucode_section);
189fa2fa 798 else
034846cf
EH
799 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
800 &first_ucode_section);
189fa2fa
EH
801 if (ret)
802 return ret;
e2d6f4e7 803 }
cf614297 804
c2d20201
EG
805 /* supported for 7000 only for the moment */
806 if (iwlwifi_mod_params.fw_monitor &&
807 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
808 iwl_pcie_alloc_fw_monitor(trans);
809
810 if (trans_pcie->fw_mon_size) {
811 iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
812 trans_pcie->fw_mon_phys >> 4);
813 iwl_write_prph(trans, MON_BUFF_END_ADDR,
814 (trans_pcie->fw_mon_phys +
815 trans_pcie->fw_mon_size) >> 4);
816 }
817 }
818
e12ba844
EH
819 /* release CPU reset */
820 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
821 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
822 else
823 iwl_write32(trans, CSR_RESET, 0);
824
c7583d7d 825 if (iwl_has_secure_boot(trans->hw_rev, trans->cfg->device_family)) {
189fa2fa
EH
826 /* wait for image verification to complete */
827 ret = iwl_poll_prph_bit(trans,
828 LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
829 LMPM_SECURE_BOOT_STATUS_SUCCESS,
830 LMPM_SECURE_BOOT_STATUS_SUCCESS,
831 LMPM_SECURE_TIME_OUT);
832
833 if (ret < 0) {
834 IWL_ERR(trans, "Time out on secure boot process\n");
835 return ret;
836 }
837 }
838
cf614297
EG
839 return 0;
840}
841
0692fe41 842static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
6ae02f3e 843 const struct fw_img *fw, bool run_in_rfkill)
392f8b78
EG
844{
845 int ret;
c9eec95c 846 bool hw_rfkill;
392f8b78 847
496bab39 848 /* This may fail if AMT took ownership of the device */
7afe3705 849 if (iwl_pcie_prepare_card_hw(trans)) {
6d8f6eeb 850 IWL_WARN(trans, "Exit HW not ready\n");
392f8b78
EG
851 return -EIO;
852 }
853
8c46bb70
EG
854 iwl_enable_rfkill_int(trans);
855
392f8b78 856 /* If platform's RF_KILL switch is NOT set to KILL */
8d425517 857 hw_rfkill = iwl_is_rfkill_set(trans);
4620020b 858 if (hw_rfkill)
eb7ff77e 859 set_bit(STATUS_RFKILL, &trans->status);
4620020b 860 else
eb7ff77e 861 clear_bit(STATUS_RFKILL, &trans->status);
14cfca71 862 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
6ae02f3e 863 if (hw_rfkill && !run_in_rfkill)
392f8b78 864 return -ERFKILL;
392f8b78 865
1042db2a 866 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
392f8b78 867
7afe3705 868 ret = iwl_pcie_nic_init(trans);
392f8b78 869 if (ret) {
6d8f6eeb 870 IWL_ERR(trans, "Unable to init nic\n");
392f8b78
EG
871 return ret;
872 }
873
874 /* make sure rfkill handshake bits are cleared */
1042db2a
EG
875 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
876 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
392f8b78
EG
877 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
878
879 /* clear (again), then enable host interrupts */
1042db2a 880 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
6d8f6eeb 881 iwl_enable_interrupts(trans);
392f8b78
EG
882
883 /* really make sure rfkill handshake bits are cleared */
1042db2a
EG
884 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
885 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
392f8b78 886
cf614297 887 /* Load the given image to the HW */
7afe3705 888 return iwl_pcie_load_given_ucode(trans, fw);
b3c2ce13
EG
889}
890
adca1235 891static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
ed6a3803 892{
990aa6d7 893 iwl_pcie_reset_ict(trans);
f02831be 894 iwl_pcie_tx_start(trans, scd_addr);
c170b867
EG
895}
896
43e58856 897static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
ae2c30bf 898{
43e58856 899 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3dc3374f
EG
900 bool hw_rfkill, was_hw_rfkill;
901
902 was_hw_rfkill = iwl_is_rfkill_set(trans);
ae2c30bf 903
43e58856 904 /* tell the device to stop sending interrupts */
7b70bd63 905 spin_lock(&trans_pcie->irq_lock);
ae2c30bf 906 iwl_disable_interrupts(trans);
7b70bd63 907 spin_unlock(&trans_pcie->irq_lock);
ae2c30bf 908
ab6cf8e8 909 /* device going down, Stop using ICT table */
990aa6d7 910 iwl_pcie_disable_ict(trans);
ab6cf8e8
EG
911
912 /*
913 * If a HW restart happens during firmware loading,
914 * then the firmware loading might call this function
915 * and later it might be called again due to the
916 * restart. So don't process again if the device is
917 * already dead.
918 */
31b8b343
EG
919 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
920 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
f02831be 921 iwl_pcie_tx_stop(trans);
9805c446 922 iwl_pcie_rx_stop(trans);
6379103e 923
ab6cf8e8 924 /* Power-down device's busmaster DMA clocks */
1042db2a 925 iwl_write_prph(trans, APMG_CLK_DIS_REG,
ab6cf8e8
EG
926 APMG_CLK_VAL_DMA_CLK_RQT);
927 udelay(5);
928 }
929
930 /* Make sure (redundant) we've released our request to stay awake */
1042db2a 931 iwl_clear_bit(trans, CSR_GP_CNTRL,
20d3b647 932 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ab6cf8e8
EG
933
934 /* Stop the device, and put it in low power state */
7afe3705 935 iwl_pcie_apm_stop(trans);
43e58856
EG
936
937 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
938 * Clean again the interrupt here
939 */
7b70bd63 940 spin_lock(&trans_pcie->irq_lock);
43e58856 941 iwl_disable_interrupts(trans);
7b70bd63 942 spin_unlock(&trans_pcie->irq_lock);
43e58856 943
43e58856 944 /* stop and reset the on-board processor */
522713c8
EG
945 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
946 udelay(20);
74fda971
DF
947
948 /* clear all status bits */
eb7ff77e
AN
949 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
950 clear_bit(STATUS_INT_ENABLED, &trans->status);
eb7ff77e
AN
951 clear_bit(STATUS_TPOWER_PMI, &trans->status);
952 clear_bit(STATUS_RFKILL, &trans->status);
a4082843
AN
953
954 /*
955 * Even if we stop the HW, we still want the RF kill
956 * interrupt
957 */
958 iwl_enable_rfkill_int(trans);
959
960 /*
961 * Check again since the RF kill state may have changed while
962 * all the interrupts were disabled, in this case we couldn't
963 * receive the RF kill interrupt and update the state in the
964 * op_mode.
3dc3374f
EG
965 * Don't call the op_mode if the rkfill state hasn't changed.
966 * This allows the op_mode to call stop_device from the rfkill
967 * notification without endless recursion. Under very rare
968 * circumstances, we might have a small recursion if the rfkill
969 * state changed exactly now while we were called from stop_device.
970 * This is very unlikely but can happen and is supported.
a4082843
AN
971 */
972 hw_rfkill = iwl_is_rfkill_set(trans);
973 if (hw_rfkill)
eb7ff77e 974 set_bit(STATUS_RFKILL, &trans->status);
a4082843 975 else
eb7ff77e 976 clear_bit(STATUS_RFKILL, &trans->status);
3dc3374f 977 if (hw_rfkill != was_hw_rfkill)
14cfca71
JB
978 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
979}
980
981void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
982{
983 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
984 iwl_trans_pcie_stop_device(trans);
ab6cf8e8
EG
985}
986
debff618 987static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
2dd4f9f7 988{
2dd4f9f7 989 iwl_disable_interrupts(trans);
debff618
JB
990
991 /*
992 * in testing mode, the host stays awake and the
993 * hardware won't be reset (not even partially)
994 */
995 if (test)
996 return;
997
ddaf5a5b
JB
998 iwl_pcie_disable_ict(trans);
999
2dd4f9f7
JB
1000 iwl_clear_bit(trans, CSR_GP_CNTRL,
1001 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ddaf5a5b
JB
1002 iwl_clear_bit(trans, CSR_GP_CNTRL,
1003 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1004
1005 /*
1006 * reset TX queues -- some of their registers reset during S3
1007 * so if we don't reset everything here the D3 image would try
1008 * to execute some invalid memory upon resume
1009 */
1010 iwl_trans_pcie_tx_reset(trans);
1011
1012 iwl_pcie_set_pwr(trans, true);
1013}
1014
1015static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
debff618
JB
1016 enum iwl_d3_status *status,
1017 bool test)
ddaf5a5b
JB
1018{
1019 u32 val;
1020 int ret;
1021
debff618
JB
1022 if (test) {
1023 iwl_enable_interrupts(trans);
1024 *status = IWL_D3_STATUS_ALIVE;
1025 return 0;
1026 }
1027
ddaf5a5b
JB
1028 /*
1029 * Also enables interrupts - none will happen as the device doesn't
1030 * know we're waking it up, only when the opmode actually tells it
1031 * after this call.
1032 */
1033 iwl_pcie_reset_ict(trans);
1034
1035 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1036 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1037
01e58a28
EG
1038 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1039 udelay(2);
1040
ddaf5a5b
JB
1041 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1042 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1043 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1044 25000);
7f2ac8fb 1045 if (ret < 0) {
ddaf5a5b
JB
1046 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1047 return ret;
1048 }
1049
a3ead656
EG
1050 iwl_pcie_set_pwr(trans, false);
1051
ddaf5a5b
JB
1052 iwl_trans_pcie_tx_reset(trans);
1053
1054 ret = iwl_pcie_rx_init(trans);
1055 if (ret) {
1056 IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
1057 return ret;
1058 }
1059
a3ead656
EG
1060 val = iwl_read32(trans, CSR_RESET);
1061 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1062 *status = IWL_D3_STATUS_RESET;
1063 else
1064 *status = IWL_D3_STATUS_ALIVE;
1065
ddaf5a5b 1066 return 0;
2dd4f9f7
JB
1067}
1068
57a1dc89 1069static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
e6bb4c9c 1070{
c9eec95c 1071 bool hw_rfkill;
a8b691e6 1072 int err;
e6bb4c9c 1073
7afe3705 1074 err = iwl_pcie_prepare_card_hw(trans);
ebb7678d 1075 if (err) {
d6f1c316 1076 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
a8b691e6 1077 return err;
ebb7678d 1078 }
a6c684ee 1079
2997494f 1080 /* Reset the entire device */
ce836c76 1081 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
2997494f
EG
1082
1083 usleep_range(10, 15);
1084
7afe3705 1085 iwl_pcie_apm_init(trans);
a6c684ee 1086
226c02ca
EG
1087 /* From now on, the op_mode will be kept updated about RF kill state */
1088 iwl_enable_rfkill_int(trans);
1089
8d425517 1090 hw_rfkill = iwl_is_rfkill_set(trans);
4620020b 1091 if (hw_rfkill)
eb7ff77e 1092 set_bit(STATUS_RFKILL, &trans->status);
4620020b 1093 else
eb7ff77e 1094 clear_bit(STATUS_RFKILL, &trans->status);
14cfca71 1095 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
d48e2074 1096
a8b691e6 1097 return 0;
e6bb4c9c
EG
1098}
1099
a4082843 1100static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
cc56feb2 1101{
20d3b647 1102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
d23f78e6 1103
a4082843 1104 /* disable interrupts - don't enable HW RF kill interrupt */
7b70bd63 1105 spin_lock(&trans_pcie->irq_lock);
ee7d737c 1106 iwl_disable_interrupts(trans);
7b70bd63 1107 spin_unlock(&trans_pcie->irq_lock);
ee7d737c 1108
7afe3705 1109 iwl_pcie_apm_stop(trans);
cc56feb2 1110
7b70bd63 1111 spin_lock(&trans_pcie->irq_lock);
218733cf 1112 iwl_disable_interrupts(trans);
7b70bd63 1113 spin_unlock(&trans_pcie->irq_lock);
1df06bdc 1114
8d96bb61 1115 iwl_pcie_disable_ict(trans);
cc56feb2
EG
1116}
1117
03905495
EG
1118static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1119{
05f5b97e 1120 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1121}
1122
1123static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1124{
05f5b97e 1125 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1126}
1127
1128static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1129{
05f5b97e 1130 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1131}
1132
6a06b6c1
EG
1133static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1134{
f9477c17
AP
1135 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1136 ((reg & 0x000FFFFF) | (3 << 24)));
6a06b6c1
EG
1137 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1138}
1139
1140static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1141 u32 val)
1142{
1143 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
f9477c17 1144 ((addr & 0x000FFFFF) | (3 << 24)));
6a06b6c1
EG
1145 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1146}
1147
f14d6b39
JB
1148static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1149{
1150 WARN_ON(1);
1151 return 0;
1152}
1153
c6f600fc 1154static void iwl_trans_pcie_configure(struct iwl_trans *trans,
9eae88fa 1155 const struct iwl_trans_config *trans_cfg)
c6f600fc
MV
1156{
1157 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1158
1159 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
b04db9ac 1160 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
d663ee73
JB
1161 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1162 trans_pcie->n_no_reclaim_cmds = 0;
1163 else
1164 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1165 if (trans_pcie->n_no_reclaim_cmds)
1166 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1167 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
9eae88fa 1168
b2cf410c
JB
1169 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1170 if (trans_pcie->rx_buf_size_8k)
1171 trans_pcie->rx_page_order = get_order(8 * 1024);
1172 else
1173 trans_pcie->rx_page_order = get_order(4 * 1024);
7c5ba4a8
JB
1174
1175 trans_pcie->wd_timeout =
1176 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
d9fb6465
JB
1177
1178 trans_pcie->command_names = trans_cfg->command_names;
046db346 1179 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
3a736bcb 1180 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
f14d6b39
JB
1181
1182 /* Initialize NAPI here - it should be before registering to mac80211
1183 * in the opmode but after the HW struct is allocated.
1184 * As this function may be called again in some corner cases don't
1185 * do anything if NAPI was already initialized.
1186 */
1187 if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
1188 init_dummy_netdev(&trans_pcie->napi_dev);
1189 iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
1190 &trans_pcie->napi_dev,
1191 iwl_pcie_dummy_napi_poll, 64);
1192 }
c6f600fc
MV
1193}
1194
d1ff5253 1195void iwl_trans_pcie_free(struct iwl_trans *trans)
34c1b7ba 1196{
20d3b647 1197 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
a42a1844 1198
0aa86df6 1199 synchronize_irq(trans_pcie->pci_dev->irq);
0aa86df6 1200
f02831be 1201 iwl_pcie_tx_free(trans);
9805c446 1202 iwl_pcie_rx_free(trans);
6379103e 1203
a8b691e6
JB
1204 free_irq(trans_pcie->pci_dev->irq, trans);
1205 iwl_pcie_free_ict(trans);
a42a1844
EG
1206
1207 pci_disable_msi(trans_pcie->pci_dev);
05f5b97e 1208 iounmap(trans_pcie->hw_base);
a42a1844
EG
1209 pci_release_regions(trans_pcie->pci_dev);
1210 pci_disable_device(trans_pcie->pci_dev);
59c647b6 1211 kmem_cache_destroy(trans->dev_cmd_pool);
a42a1844 1212
f14d6b39
JB
1213 if (trans_pcie->napi.poll)
1214 netif_napi_del(&trans_pcie->napi);
1215
c2d20201
EG
1216 iwl_pcie_free_fw_monitor(trans);
1217
6d8f6eeb 1218 kfree(trans);
34c1b7ba
EG
1219}
1220
47107e84
DF
1221static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1222{
47107e84 1223 if (state)
eb7ff77e 1224 set_bit(STATUS_TPOWER_PMI, &trans->status);
47107e84 1225 else
eb7ff77e 1226 clear_bit(STATUS_TPOWER_PMI, &trans->status);
47107e84
DF
1227}
1228
e56b04ef
LE
1229static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1230 unsigned long *flags)
7a65d170
EG
1231{
1232 int ret;
cfb4e624
JB
1233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1234
1235 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
7a65d170 1236
b9439491
EG
1237 if (trans_pcie->cmd_in_flight)
1238 goto out;
1239
7a65d170 1240 /* this bit wakes up the NIC */
e139dc4a
LE
1241 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1242 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
01e58a28
EG
1243 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1244 udelay(2);
7a65d170
EG
1245
1246 /*
1247 * These bits say the device is running, and should keep running for
1248 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1249 * but they do not indicate that embedded SRAM is restored yet;
1250 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1251 * to/from host DRAM when sleeping/waking for power-saving.
1252 * Each direction takes approximately 1/4 millisecond; with this
1253 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1254 * series of register accesses are expected (e.g. reading Event Log),
1255 * to keep device from sleeping.
1256 *
1257 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1258 * SRAM is okay/restored. We don't check that here because this call
1259 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1260 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1261 *
1262 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1263 * and do not save/restore SRAM when power cycling.
1264 */
1265 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1266 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1267 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1268 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
1269 if (unlikely(ret < 0)) {
1270 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1271 if (!silent) {
1272 u32 val = iwl_read32(trans, CSR_GP_CNTRL);
1273 WARN_ONCE(1,
1274 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1275 val);
cfb4e624 1276 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
7a65d170
EG
1277 return false;
1278 }
1279 }
1280
b9439491 1281out:
e56b04ef
LE
1282 /*
1283 * Fool sparse by faking we release the lock - sparse will
1284 * track nic_access anyway.
1285 */
cfb4e624 1286 __release(&trans_pcie->reg_lock);
7a65d170
EG
1287 return true;
1288}
1289
e56b04ef
LE
1290static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1291 unsigned long *flags)
7a65d170 1292{
cfb4e624 1293 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
e56b04ef 1294
cfb4e624 1295 lockdep_assert_held(&trans_pcie->reg_lock);
e56b04ef
LE
1296
1297 /*
1298 * Fool sparse by faking we acquiring the lock - sparse will
1299 * track nic_access anyway.
1300 */
cfb4e624 1301 __acquire(&trans_pcie->reg_lock);
e56b04ef 1302
b9439491
EG
1303 if (trans_pcie->cmd_in_flight)
1304 goto out;
1305
e139dc4a
LE
1306 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1307 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
7a65d170
EG
1308 /*
1309 * Above we read the CSR_GP_CNTRL register, which will flush
1310 * any previous writes, but we need the write that clears the
1311 * MAC_ACCESS_REQ bit to be performed before any other writes
1312 * scheduled on different CPUs (after we drop reg_lock).
1313 */
1314 mmiowb();
b9439491 1315out:
cfb4e624 1316 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
7a65d170
EG
1317}
1318
4fd442db
EG
1319static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1320 void *buf, int dwords)
1321{
1322 unsigned long flags;
1323 int offs, ret = 0;
1324 u32 *vals = buf;
1325
e56b04ef 1326 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
4fd442db
EG
1327 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1328 for (offs = 0; offs < dwords; offs++)
1329 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
e56b04ef 1330 iwl_trans_release_nic_access(trans, &flags);
4fd442db
EG
1331 } else {
1332 ret = -EBUSY;
1333 }
4fd442db
EG
1334 return ret;
1335}
1336
1337static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
bf0fd5da 1338 const void *buf, int dwords)
4fd442db
EG
1339{
1340 unsigned long flags;
1341 int offs, ret = 0;
bf0fd5da 1342 const u32 *vals = buf;
4fd442db 1343
e56b04ef 1344 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
4fd442db
EG
1345 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1346 for (offs = 0; offs < dwords; offs++)
01387ffd
EG
1347 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1348 vals ? vals[offs] : 0);
e56b04ef 1349 iwl_trans_release_nic_access(trans, &flags);
4fd442db
EG
1350 } else {
1351 ret = -EBUSY;
1352 }
4fd442db
EG
1353 return ret;
1354}
7a65d170 1355
5f178cd2
EG
1356#define IWL_FLUSH_WAIT_MS 2000
1357
3cafdbe6 1358static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
5f178cd2 1359{
8ad71bef 1360 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1361 struct iwl_txq *txq;
5f178cd2
EG
1362 struct iwl_queue *q;
1363 int cnt;
1364 unsigned long now = jiffies;
1c3fea82
EG
1365 u32 scd_sram_addr;
1366 u8 buf[16];
5f178cd2
EG
1367 int ret = 0;
1368
1369 /* waiting for all the tx frames complete might take a while */
035f7ff2 1370 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
fa1a91fd
EG
1371 u8 wr_ptr;
1372
9ba1947a 1373 if (cnt == trans_pcie->cmd_queue)
5f178cd2 1374 continue;
3cafdbe6
EG
1375 if (!test_bit(cnt, trans_pcie->queue_used))
1376 continue;
1377 if (!(BIT(cnt) & txq_bm))
1378 continue;
748fa67c
EG
1379
1380 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
8ad71bef 1381 txq = &trans_pcie->txq[cnt];
5f178cd2 1382 q = &txq->q;
fa1a91fd
EG
1383 wr_ptr = ACCESS_ONCE(q->write_ptr);
1384
1385 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1386 !time_after(jiffies,
1387 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1388 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1389
1390 if (WARN_ONCE(wr_ptr != write_ptr,
1391 "WR pointer moved while flushing %d -> %d\n",
1392 wr_ptr, write_ptr))
1393 return -ETIMEDOUT;
5f178cd2 1394 msleep(1);
fa1a91fd 1395 }
5f178cd2
EG
1396
1397 if (q->read_ptr != q->write_ptr) {
1c3fea82
EG
1398 IWL_ERR(trans,
1399 "fail to flush all tx fifo queues Q %d\n", cnt);
5f178cd2
EG
1400 ret = -ETIMEDOUT;
1401 break;
1402 }
748fa67c 1403 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
5f178cd2 1404 }
1c3fea82
EG
1405
1406 if (!ret)
1407 return 0;
1408
1409 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1410 txq->q.read_ptr, txq->q.write_ptr);
1411
1412 scd_sram_addr = trans_pcie->scd_base_addr +
1413 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1414 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1415
1416 iwl_print_hex_error(trans, buf, sizeof(buf));
1417
1418 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1419 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1420 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1421
1422 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1423 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1424 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1425 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1426 u32 tbl_dw =
1427 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1428 SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1429
1430 if (cnt & 0x1)
1431 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1432 else
1433 tbl_dw = tbl_dw & 0x0000FFFF;
1434
1435 IWL_ERR(trans,
1436 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1437 cnt, active ? "" : "in", fifo, tbl_dw,
83f32a4b
JB
1438 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1439 (TFD_QUEUE_SIZE_MAX - 1),
1c3fea82
EG
1440 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1441 }
1442
5f178cd2
EG
1443 return ret;
1444}
1445
e139dc4a
LE
1446static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1447 u32 mask, u32 value)
1448{
e56b04ef 1449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
e139dc4a
LE
1450 unsigned long flags;
1451
e56b04ef 1452 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
e139dc4a 1453 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
e56b04ef 1454 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
e139dc4a
LE
1455}
1456
ff620849
EG
1457static const char *get_csr_string(int cmd)
1458{
d9fb6465 1459#define IWL_CMD(x) case x: return #x
ff620849
EG
1460 switch (cmd) {
1461 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1462 IWL_CMD(CSR_INT_COALESCING);
1463 IWL_CMD(CSR_INT);
1464 IWL_CMD(CSR_INT_MASK);
1465 IWL_CMD(CSR_FH_INT_STATUS);
1466 IWL_CMD(CSR_GPIO_IN);
1467 IWL_CMD(CSR_RESET);
1468 IWL_CMD(CSR_GP_CNTRL);
1469 IWL_CMD(CSR_HW_REV);
1470 IWL_CMD(CSR_EEPROM_REG);
1471 IWL_CMD(CSR_EEPROM_GP);
1472 IWL_CMD(CSR_OTP_GP_REG);
1473 IWL_CMD(CSR_GIO_REG);
1474 IWL_CMD(CSR_GP_UCODE_REG);
1475 IWL_CMD(CSR_GP_DRIVER_REG);
1476 IWL_CMD(CSR_UCODE_DRV_GP1);
1477 IWL_CMD(CSR_UCODE_DRV_GP2);
1478 IWL_CMD(CSR_LED_REG);
1479 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1480 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1481 IWL_CMD(CSR_ANA_PLL_CFG);
1482 IWL_CMD(CSR_HW_REV_WA_REG);
a812cba9 1483 IWL_CMD(CSR_MONITOR_STATUS_REG);
ff620849
EG
1484 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1485 default:
1486 return "UNKNOWN";
1487 }
d9fb6465 1488#undef IWL_CMD
ff620849
EG
1489}
1490
990aa6d7 1491void iwl_pcie_dump_csr(struct iwl_trans *trans)
ff620849
EG
1492{
1493 int i;
1494 static const u32 csr_tbl[] = {
1495 CSR_HW_IF_CONFIG_REG,
1496 CSR_INT_COALESCING,
1497 CSR_INT,
1498 CSR_INT_MASK,
1499 CSR_FH_INT_STATUS,
1500 CSR_GPIO_IN,
1501 CSR_RESET,
1502 CSR_GP_CNTRL,
1503 CSR_HW_REV,
1504 CSR_EEPROM_REG,
1505 CSR_EEPROM_GP,
1506 CSR_OTP_GP_REG,
1507 CSR_GIO_REG,
1508 CSR_GP_UCODE_REG,
1509 CSR_GP_DRIVER_REG,
1510 CSR_UCODE_DRV_GP1,
1511 CSR_UCODE_DRV_GP2,
1512 CSR_LED_REG,
1513 CSR_DRAM_INT_TBL_REG,
1514 CSR_GIO_CHICKEN_BITS,
1515 CSR_ANA_PLL_CFG,
a812cba9 1516 CSR_MONITOR_STATUS_REG,
ff620849
EG
1517 CSR_HW_REV_WA_REG,
1518 CSR_DBG_HPET_MEM_REG
1519 };
1520 IWL_ERR(trans, "CSR values:\n");
1521 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1522 "CSR_INT_PERIODIC_REG)\n");
1523 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1524 IWL_ERR(trans, " %25s: 0X%08x\n",
1525 get_csr_string(csr_tbl[i]),
1042db2a 1526 iwl_read32(trans, csr_tbl[i]));
ff620849
EG
1527 }
1528}
1529
87e5666c
EG
1530#ifdef CONFIG_IWLWIFI_DEBUGFS
1531/* create and remove of files */
1532#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
5a878bf6 1533 if (!debugfs_create_file(#name, mode, parent, trans, \
87e5666c 1534 &iwl_dbgfs_##name##_ops)) \
9da987ac 1535 goto err; \
87e5666c
EG
1536} while (0)
1537
1538/* file operation */
87e5666c 1539#define DEBUGFS_READ_FILE_OPS(name) \
87e5666c
EG
1540static const struct file_operations iwl_dbgfs_##name##_ops = { \
1541 .read = iwl_dbgfs_##name##_read, \
234e3405 1542 .open = simple_open, \
87e5666c
EG
1543 .llseek = generic_file_llseek, \
1544};
1545
16db88ba 1546#define DEBUGFS_WRITE_FILE_OPS(name) \
16db88ba
EG
1547static const struct file_operations iwl_dbgfs_##name##_ops = { \
1548 .write = iwl_dbgfs_##name##_write, \
234e3405 1549 .open = simple_open, \
16db88ba
EG
1550 .llseek = generic_file_llseek, \
1551};
1552
87e5666c 1553#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
87e5666c
EG
1554static const struct file_operations iwl_dbgfs_##name##_ops = { \
1555 .write = iwl_dbgfs_##name##_write, \
1556 .read = iwl_dbgfs_##name##_read, \
234e3405 1557 .open = simple_open, \
87e5666c
EG
1558 .llseek = generic_file_llseek, \
1559};
1560
87e5666c 1561static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
20d3b647
JB
1562 char __user *user_buf,
1563 size_t count, loff_t *ppos)
8ad71bef 1564{
5a878bf6 1565 struct iwl_trans *trans = file->private_data;
8ad71bef 1566 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1567 struct iwl_txq *txq;
87e5666c
EG
1568 struct iwl_queue *q;
1569 char *buf;
1570 int pos = 0;
1571 int cnt;
1572 int ret;
1745e440
WYG
1573 size_t bufsz;
1574
035f7ff2 1575 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
87e5666c 1576
f9e75447 1577 if (!trans_pcie->txq)
87e5666c 1578 return -EAGAIN;
f9e75447 1579
87e5666c
EG
1580 buf = kzalloc(bufsz, GFP_KERNEL);
1581 if (!buf)
1582 return -ENOMEM;
1583
035f7ff2 1584 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
8ad71bef 1585 txq = &trans_pcie->txq[cnt];
87e5666c
EG
1586 q = &txq->q;
1587 pos += scnprintf(buf + pos, bufsz - pos,
f40faf62 1588 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
87e5666c 1589 cnt, q->read_ptr, q->write_ptr,
9eae88fa 1590 !!test_bit(cnt, trans_pcie->queue_used),
f40faf62
AL
1591 !!test_bit(cnt, trans_pcie->queue_stopped),
1592 txq->need_update,
1593 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
87e5666c
EG
1594 }
1595 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1596 kfree(buf);
1597 return ret;
1598}
1599
1600static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
20d3b647
JB
1601 char __user *user_buf,
1602 size_t count, loff_t *ppos)
1603{
5a878bf6 1604 struct iwl_trans *trans = file->private_data;
20d3b647 1605 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1606 struct iwl_rxq *rxq = &trans_pcie->rxq;
87e5666c
EG
1607 char buf[256];
1608 int pos = 0;
1609 const size_t bufsz = sizeof(buf);
1610
1611 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1612 rxq->read);
1613 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1614 rxq->write);
f40faf62
AL
1615 pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
1616 rxq->write_actual);
1617 pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
1618 rxq->need_update);
87e5666c
EG
1619 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1620 rxq->free_count);
1621 if (rxq->rb_stts) {
1622 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1623 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1624 } else {
1625 pos += scnprintf(buf + pos, bufsz - pos,
1626 "closed_rb_num: Not Allocated\n");
1627 }
1628 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1629}
1630
1f7b6172
EG
1631static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1632 char __user *user_buf,
20d3b647
JB
1633 size_t count, loff_t *ppos)
1634{
1f7b6172 1635 struct iwl_trans *trans = file->private_data;
20d3b647 1636 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1f7b6172
EG
1637 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1638
1639 int pos = 0;
1640 char *buf;
1641 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1642 ssize_t ret;
1643
1644 buf = kzalloc(bufsz, GFP_KERNEL);
f9e75447 1645 if (!buf)
1f7b6172 1646 return -ENOMEM;
1f7b6172
EG
1647
1648 pos += scnprintf(buf + pos, bufsz - pos,
1649 "Interrupt Statistics Report:\n");
1650
1651 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1652 isr_stats->hw);
1653 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1654 isr_stats->sw);
1655 if (isr_stats->sw || isr_stats->hw) {
1656 pos += scnprintf(buf + pos, bufsz - pos,
1657 "\tLast Restarting Code: 0x%X\n",
1658 isr_stats->err_code);
1659 }
1660#ifdef CONFIG_IWLWIFI_DEBUG
1661 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1662 isr_stats->sch);
1663 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1664 isr_stats->alive);
1665#endif
1666 pos += scnprintf(buf + pos, bufsz - pos,
1667 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1668
1669 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1670 isr_stats->ctkill);
1671
1672 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1673 isr_stats->wakeup);
1674
1675 pos += scnprintf(buf + pos, bufsz - pos,
1676 "Rx command responses:\t\t %u\n", isr_stats->rx);
1677
1678 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1679 isr_stats->tx);
1680
1681 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1682 isr_stats->unhandled);
1683
1684 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1685 kfree(buf);
1686 return ret;
1687}
1688
1689static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1690 const char __user *user_buf,
1691 size_t count, loff_t *ppos)
1692{
1693 struct iwl_trans *trans = file->private_data;
20d3b647 1694 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1f7b6172
EG
1695 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1696
1697 char buf[8];
1698 int buf_size;
1699 u32 reset_flag;
1700
1701 memset(buf, 0, sizeof(buf));
1702 buf_size = min(count, sizeof(buf) - 1);
1703 if (copy_from_user(buf, user_buf, buf_size))
1704 return -EFAULT;
1705 if (sscanf(buf, "%x", &reset_flag) != 1)
1706 return -EFAULT;
1707 if (reset_flag == 0)
1708 memset(isr_stats, 0, sizeof(*isr_stats));
1709
1710 return count;
1711}
1712
16db88ba 1713static ssize_t iwl_dbgfs_csr_write(struct file *file,
20d3b647
JB
1714 const char __user *user_buf,
1715 size_t count, loff_t *ppos)
16db88ba
EG
1716{
1717 struct iwl_trans *trans = file->private_data;
1718 char buf[8];
1719 int buf_size;
1720 int csr;
1721
1722 memset(buf, 0, sizeof(buf));
1723 buf_size = min(count, sizeof(buf) - 1);
1724 if (copy_from_user(buf, user_buf, buf_size))
1725 return -EFAULT;
1726 if (sscanf(buf, "%d", &csr) != 1)
1727 return -EFAULT;
1728
990aa6d7 1729 iwl_pcie_dump_csr(trans);
16db88ba
EG
1730
1731 return count;
1732}
1733
16db88ba 1734static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
20d3b647
JB
1735 char __user *user_buf,
1736 size_t count, loff_t *ppos)
16db88ba
EG
1737{
1738 struct iwl_trans *trans = file->private_data;
94543a8d 1739 char *buf = NULL;
56c2477f 1740 ssize_t ret;
16db88ba 1741
56c2477f
JB
1742 ret = iwl_dump_fh(trans, &buf);
1743 if (ret < 0)
1744 return ret;
1745 if (!buf)
1746 return -EINVAL;
1747 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
1748 kfree(buf);
16db88ba
EG
1749 return ret;
1750}
1751
1f7b6172 1752DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
16db88ba 1753DEBUGFS_READ_FILE_OPS(fh_reg);
87e5666c
EG
1754DEBUGFS_READ_FILE_OPS(rx_queue);
1755DEBUGFS_READ_FILE_OPS(tx_queue);
16db88ba 1756DEBUGFS_WRITE_FILE_OPS(csr);
87e5666c
EG
1757
1758/*
1759 * Create the debugfs files and directories
1760 *
1761 */
1762static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
20d3b647 1763 struct dentry *dir)
87e5666c 1764{
87e5666c
EG
1765 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1766 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1f7b6172 1767 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
16db88ba
EG
1768 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1769 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
87e5666c 1770 return 0;
9da987ac
MV
1771
1772err:
1773 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1774 return -ENOMEM;
87e5666c 1775}
aadede6e
JB
1776#else
1777static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1778 struct dentry *dir)
1779{
1780 return 0;
1781}
1782#endif /*CONFIG_IWLWIFI_DEBUGFS */
4d075007
JB
1783
1784static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
1785{
1786 u32 cmdlen = 0;
1787 int i;
1788
1789 for (i = 0; i < IWL_NUM_OF_TBS; i++)
1790 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
1791
1792 return cmdlen;
1793}
1794
67c65f2c
EG
1795static const struct {
1796 u32 start, end;
1797} iwl_prph_dump_addr[] = {
1798 { .start = 0x00a00000, .end = 0x00a00000 },
1799 { .start = 0x00a0000c, .end = 0x00a00024 },
1800 { .start = 0x00a0002c, .end = 0x00a0003c },
1801 { .start = 0x00a00410, .end = 0x00a00418 },
1802 { .start = 0x00a00420, .end = 0x00a00420 },
1803 { .start = 0x00a00428, .end = 0x00a00428 },
1804 { .start = 0x00a00430, .end = 0x00a0043c },
1805 { .start = 0x00a00444, .end = 0x00a00444 },
1806 { .start = 0x00a004c0, .end = 0x00a004cc },
1807 { .start = 0x00a004d8, .end = 0x00a004d8 },
1808 { .start = 0x00a004e0, .end = 0x00a004f0 },
1809 { .start = 0x00a00840, .end = 0x00a00840 },
1810 { .start = 0x00a00850, .end = 0x00a00858 },
1811 { .start = 0x00a01004, .end = 0x00a01008 },
1812 { .start = 0x00a01010, .end = 0x00a01010 },
1813 { .start = 0x00a01018, .end = 0x00a01018 },
1814 { .start = 0x00a01024, .end = 0x00a01024 },
1815 { .start = 0x00a0102c, .end = 0x00a01034 },
1816 { .start = 0x00a0103c, .end = 0x00a01040 },
1817 { .start = 0x00a01048, .end = 0x00a01094 },
1818 { .start = 0x00a01c00, .end = 0x00a01c20 },
1819 { .start = 0x00a01c58, .end = 0x00a01c58 },
1820 { .start = 0x00a01c7c, .end = 0x00a01c7c },
1821 { .start = 0x00a01c28, .end = 0x00a01c54 },
1822 { .start = 0x00a01c5c, .end = 0x00a01c5c },
1823 { .start = 0x00a01c84, .end = 0x00a01c84 },
1824 { .start = 0x00a01ce0, .end = 0x00a01d0c },
1825 { .start = 0x00a01d18, .end = 0x00a01d20 },
1826 { .start = 0x00a01d2c, .end = 0x00a01d30 },
1827 { .start = 0x00a01d40, .end = 0x00a01d5c },
1828 { .start = 0x00a01d80, .end = 0x00a01d80 },
1829 { .start = 0x00a01d98, .end = 0x00a01d98 },
1830 { .start = 0x00a01dc0, .end = 0x00a01dfc },
1831 { .start = 0x00a01e00, .end = 0x00a01e2c },
1832 { .start = 0x00a01e40, .end = 0x00a01e60 },
1833 { .start = 0x00a01e84, .end = 0x00a01e90 },
1834 { .start = 0x00a01e9c, .end = 0x00a01ec4 },
1835 { .start = 0x00a01ed0, .end = 0x00a01ed0 },
1836 { .start = 0x00a01f00, .end = 0x00a01f14 },
1837 { .start = 0x00a01f44, .end = 0x00a01f58 },
1838 { .start = 0x00a01f80, .end = 0x00a01fa8 },
1839 { .start = 0x00a01fb0, .end = 0x00a01fbc },
1840 { .start = 0x00a01ff8, .end = 0x00a01ffc },
1841 { .start = 0x00a02000, .end = 0x00a02048 },
1842 { .start = 0x00a02068, .end = 0x00a020f0 },
1843 { .start = 0x00a02100, .end = 0x00a02118 },
1844 { .start = 0x00a02140, .end = 0x00a0214c },
1845 { .start = 0x00a02168, .end = 0x00a0218c },
1846 { .start = 0x00a021c0, .end = 0x00a021c0 },
1847 { .start = 0x00a02400, .end = 0x00a02410 },
1848 { .start = 0x00a02418, .end = 0x00a02420 },
1849 { .start = 0x00a02428, .end = 0x00a0242c },
1850 { .start = 0x00a02434, .end = 0x00a02434 },
1851 { .start = 0x00a02440, .end = 0x00a02460 },
1852 { .start = 0x00a02468, .end = 0x00a024b0 },
1853 { .start = 0x00a024c8, .end = 0x00a024cc },
1854 { .start = 0x00a02500, .end = 0x00a02504 },
1855 { .start = 0x00a0250c, .end = 0x00a02510 },
1856 { .start = 0x00a02540, .end = 0x00a02554 },
1857 { .start = 0x00a02580, .end = 0x00a025f4 },
1858 { .start = 0x00a02600, .end = 0x00a0260c },
1859 { .start = 0x00a02648, .end = 0x00a02650 },
1860 { .start = 0x00a02680, .end = 0x00a02680 },
1861 { .start = 0x00a026c0, .end = 0x00a026d0 },
1862 { .start = 0x00a02700, .end = 0x00a0270c },
1863 { .start = 0x00a02804, .end = 0x00a02804 },
1864 { .start = 0x00a02818, .end = 0x00a0281c },
1865 { .start = 0x00a02c00, .end = 0x00a02db4 },
1866 { .start = 0x00a02df4, .end = 0x00a02fb0 },
1867 { .start = 0x00a03000, .end = 0x00a03014 },
1868 { .start = 0x00a0301c, .end = 0x00a0302c },
1869 { .start = 0x00a03034, .end = 0x00a03038 },
1870 { .start = 0x00a03040, .end = 0x00a03048 },
1871 { .start = 0x00a03060, .end = 0x00a03068 },
1872 { .start = 0x00a03070, .end = 0x00a03074 },
1873 { .start = 0x00a0307c, .end = 0x00a0307c },
1874 { .start = 0x00a03080, .end = 0x00a03084 },
1875 { .start = 0x00a0308c, .end = 0x00a03090 },
1876 { .start = 0x00a03098, .end = 0x00a03098 },
1877 { .start = 0x00a030a0, .end = 0x00a030a0 },
1878 { .start = 0x00a030a8, .end = 0x00a030b4 },
1879 { .start = 0x00a030bc, .end = 0x00a030bc },
1880 { .start = 0x00a030c0, .end = 0x00a0312c },
1881 { .start = 0x00a03c00, .end = 0x00a03c5c },
1882 { .start = 0x00a04400, .end = 0x00a04454 },
1883 { .start = 0x00a04460, .end = 0x00a04474 },
1884 { .start = 0x00a044c0, .end = 0x00a044ec },
1885 { .start = 0x00a04500, .end = 0x00a04504 },
1886 { .start = 0x00a04510, .end = 0x00a04538 },
1887 { .start = 0x00a04540, .end = 0x00a04548 },
1888 { .start = 0x00a04560, .end = 0x00a0457c },
1889 { .start = 0x00a04590, .end = 0x00a04598 },
1890 { .start = 0x00a045c0, .end = 0x00a045f4 },
1891};
1892
1893static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
1894 struct iwl_fw_error_dump_data **data)
1895{
1896 struct iwl_fw_error_dump_prph *prph;
1897 unsigned long flags;
1898 u32 prph_len = 0, i;
1899
1900 if (!iwl_trans_grab_nic_access(trans, false, &flags))
1901 return 0;
1902
1903 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
1904 /* The range includes both boundaries */
1905 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
1906 iwl_prph_dump_addr[i].start + 4;
1907 int reg;
1908 __le32 *val;
1909
87dd634a 1910 prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
67c65f2c
EG
1911
1912 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
1913 (*data)->len = cpu_to_le32(sizeof(*prph) +
1914 num_bytes_in_chunk);
1915 prph = (void *)(*data)->data;
1916 prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
1917 val = (void *)prph->data;
1918
1919 for (reg = iwl_prph_dump_addr[i].start;
1920 reg <= iwl_prph_dump_addr[i].end;
1921 reg += 4)
1922 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
1923 reg));
1924 *data = iwl_fw_error_next_data(*data);
1925 }
1926
1927 iwl_trans_release_nic_access(trans, &flags);
1928
1929 return prph_len;
1930}
1931
473ad712
EG
1932#define IWL_CSR_TO_DUMP (0x250)
1933
1934static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
1935 struct iwl_fw_error_dump_data **data)
1936{
1937 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
1938 __le32 *val;
1939 int i;
1940
1941 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
1942 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
1943 val = (void *)(*data)->data;
1944
1945 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
1946 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
1947
1948 *data = iwl_fw_error_next_data(*data);
1949
1950 return csr_len;
1951}
1952
48eb7b34
EG
1953static
1954struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
4d075007
JB
1955{
1956 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1957 struct iwl_fw_error_dump_data *data;
1958 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
1959 struct iwl_fw_error_dump_txcmd *txcmd;
48eb7b34 1960 struct iwl_trans_dump_data *dump_data;
4d075007
JB
1961 u32 len;
1962 int i, ptr;
1963
473ad712
EG
1964 /* transport dump header */
1965 len = sizeof(*dump_data);
1966
1967 /* host commands */
1968 len += sizeof(*data) +
c2d20201
EG
1969 cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
1970
473ad712
EG
1971 /* CSR registers */
1972 len += sizeof(*data) + IWL_CSR_TO_DUMP;
1973
1974 /* PRPH registers */
67c65f2c
EG
1975 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
1976 /* The range includes both boundaries */
1977 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
1978 iwl_prph_dump_addr[i].start + 4;
1979
1980 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
1981 num_bytes_in_chunk;
1982 }
1983
473ad712 1984 /* FW monitor */
c2d20201 1985 if (trans_pcie->fw_mon_page)
c544e9c4 1986 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
c2d20201
EG
1987 trans_pcie->fw_mon_size;
1988
48eb7b34
EG
1989 dump_data = vzalloc(len);
1990 if (!dump_data)
1991 return NULL;
4d075007
JB
1992
1993 len = 0;
48eb7b34 1994 data = (void *)dump_data->data;
4d075007
JB
1995 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
1996 txcmd = (void *)data->data;
1997 spin_lock_bh(&cmdq->lock);
1998 ptr = cmdq->q.write_ptr;
1999 for (i = 0; i < cmdq->q.n_window; i++) {
2000 u8 idx = get_cmd_index(&cmdq->q, ptr);
2001 u32 caplen, cmdlen;
2002
2003 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
2004 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
2005
2006 if (cmdlen) {
2007 len += sizeof(*txcmd) + caplen;
2008 txcmd->cmdlen = cpu_to_le32(cmdlen);
2009 txcmd->caplen = cpu_to_le32(caplen);
2010 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
2011 txcmd = (void *)((u8 *)txcmd->data + caplen);
2012 }
2013
2014 ptr = iwl_queue_dec_wrap(ptr);
2015 }
2016 spin_unlock_bh(&cmdq->lock);
2017
2018 data->len = cpu_to_le32(len);
c2d20201 2019 len += sizeof(*data);
67c65f2c
EG
2020 data = iwl_fw_error_next_data(data);
2021
2022 len += iwl_trans_pcie_dump_prph(trans, &data);
473ad712 2023 len += iwl_trans_pcie_dump_csr(trans, &data);
67c65f2c 2024 /* data is already pointing to the next section */
c2d20201
EG
2025
2026 if (trans_pcie->fw_mon_page) {
c544e9c4 2027 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
c2d20201 2028
c2d20201
EG
2029 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
2030 data->len = cpu_to_le32(trans_pcie->fw_mon_size +
2031 sizeof(*fw_mon_data));
2032 fw_mon_data = (void *)data->data;
2033 fw_mon_data->fw_mon_wr_ptr =
2034 cpu_to_le32(iwl_read_prph(trans, MON_BUFF_WRPTR));
2035 fw_mon_data->fw_mon_cycle_cnt =
2036 cpu_to_le32(iwl_read_prph(trans, MON_BUFF_CYCLE_CNT));
2037 fw_mon_data->fw_mon_base_ptr =
2038 cpu_to_le32(iwl_read_prph(trans, MON_BUFF_BASE_ADDR));
2039
2040 /*
2041 * The firmware is now asserted, it won't write anything to
2042 * the buffer. CPU can take ownership to fetch the data.
2043 * The buffer will be handed back to the device before the
2044 * firmware will be restarted.
2045 */
2046 dma_sync_single_for_cpu(trans->dev, trans_pcie->fw_mon_phys,
2047 trans_pcie->fw_mon_size,
2048 DMA_FROM_DEVICE);
2049 memcpy(fw_mon_data->data, page_address(trans_pcie->fw_mon_page),
2050 trans_pcie->fw_mon_size);
2051
2052 len += sizeof(*data) + sizeof(*fw_mon_data) +
2053 trans_pcie->fw_mon_size;
2054 }
2055
48eb7b34
EG
2056 dump_data->len = len;
2057
2058 return dump_data;
4d075007 2059}
87e5666c 2060
d1ff5253 2061static const struct iwl_trans_ops trans_ops_pcie = {
57a1dc89 2062 .start_hw = iwl_trans_pcie_start_hw,
a4082843 2063 .op_mode_leave = iwl_trans_pcie_op_mode_leave,
ed6a3803 2064 .fw_alive = iwl_trans_pcie_fw_alive,
cf614297 2065 .start_fw = iwl_trans_pcie_start_fw,
e6bb4c9c 2066 .stop_device = iwl_trans_pcie_stop_device,
48d42c42 2067
ddaf5a5b
JB
2068 .d3_suspend = iwl_trans_pcie_d3_suspend,
2069 .d3_resume = iwl_trans_pcie_d3_resume,
2dd4f9f7 2070
f02831be 2071 .send_cmd = iwl_trans_pcie_send_hcmd,
c85eb619 2072
e6bb4c9c 2073 .tx = iwl_trans_pcie_tx,
a0eaad71 2074 .reclaim = iwl_trans_pcie_reclaim,
34c1b7ba 2075
d0624be6 2076 .txq_disable = iwl_trans_pcie_txq_disable,
4beaf6c2 2077 .txq_enable = iwl_trans_pcie_txq_enable,
34c1b7ba 2078
87e5666c 2079 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
5f178cd2 2080
990aa6d7 2081 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
5f178cd2 2082
03905495
EG
2083 .write8 = iwl_trans_pcie_write8,
2084 .write32 = iwl_trans_pcie_write32,
2085 .read32 = iwl_trans_pcie_read32,
6a06b6c1
EG
2086 .read_prph = iwl_trans_pcie_read_prph,
2087 .write_prph = iwl_trans_pcie_write_prph,
4fd442db
EG
2088 .read_mem = iwl_trans_pcie_read_mem,
2089 .write_mem = iwl_trans_pcie_write_mem,
c6f600fc 2090 .configure = iwl_trans_pcie_configure,
47107e84 2091 .set_pmi = iwl_trans_pcie_set_pmi,
7a65d170 2092 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
e139dc4a
LE
2093 .release_nic_access = iwl_trans_pcie_release_nic_access,
2094 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
4d075007 2095
4d075007 2096 .dump_data = iwl_trans_pcie_dump_data,
e6bb4c9c 2097};
a42a1844 2098
87ce05a2 2099struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
035f7ff2
EG
2100 const struct pci_device_id *ent,
2101 const struct iwl_cfg *cfg)
a42a1844 2102{
a42a1844
EG
2103 struct iwl_trans_pcie *trans_pcie;
2104 struct iwl_trans *trans;
2105 u16 pci_cmd;
2106 int err;
2107
2108 trans = kzalloc(sizeof(struct iwl_trans) +
20d3b647 2109 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
6965a354
LC
2110 if (!trans) {
2111 err = -ENOMEM;
2112 goto out;
2113 }
a42a1844
EG
2114
2115 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2116
2117 trans->ops = &trans_ops_pcie;
035f7ff2 2118 trans->cfg = cfg;
2bfb5092 2119 trans_lockdep_init(trans);
a42a1844 2120 trans_pcie->trans = trans;
7b11488f 2121 spin_lock_init(&trans_pcie->irq_lock);
e56b04ef 2122 spin_lock_init(&trans_pcie->reg_lock);
13df1aab 2123 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
a42a1844 2124
d819c6cf
JB
2125 err = pci_enable_device(pdev);
2126 if (err)
2127 goto out_no_pci;
2128
f2532b04
EG
2129 if (!cfg->base_params->pcie_l1_allowed) {
2130 /*
2131 * W/A - seems to solve weird behavior. We need to remove this
2132 * if we don't want to stay in L1 all the time. This wastes a
2133 * lot of power.
2134 */
2135 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
2136 PCIE_LINK_STATE_L1 |
2137 PCIE_LINK_STATE_CLKPM);
2138 }
a42a1844 2139
a42a1844
EG
2140 pci_set_master(pdev);
2141
2142 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2143 if (!err)
2144 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2145 if (err) {
2146 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2147 if (!err)
2148 err = pci_set_consistent_dma_mask(pdev,
20d3b647 2149 DMA_BIT_MASK(32));
a42a1844
EG
2150 /* both attempts failed: */
2151 if (err) {
6a4b09f8 2152 dev_err(&pdev->dev, "No suitable DMA available\n");
a42a1844
EG
2153 goto out_pci_disable_device;
2154 }
2155 }
2156
2157 err = pci_request_regions(pdev, DRV_NAME);
2158 if (err) {
6a4b09f8 2159 dev_err(&pdev->dev, "pci_request_regions failed\n");
a42a1844
EG
2160 goto out_pci_disable_device;
2161 }
2162
05f5b97e 2163 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
a42a1844 2164 if (!trans_pcie->hw_base) {
6a4b09f8 2165 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
a42a1844
EG
2166 err = -ENODEV;
2167 goto out_pci_release_regions;
2168 }
2169
a42a1844
EG
2170 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2171 * PCI Tx retries from interfering with C3 CPU state */
2172 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2173
83f7a85f
EG
2174 trans->dev = &pdev->dev;
2175 trans_pcie->pci_dev = pdev;
2176 iwl_disable_interrupts(trans);
2177
a42a1844 2178 err = pci_enable_msi(pdev);
9f904b38 2179 if (err) {
6a4b09f8 2180 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
9f904b38
EG
2181 /* enable rfkill interrupt: hw bug w/a */
2182 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2183 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2184 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2185 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2186 }
2187 }
a42a1844 2188
08079a49 2189 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
b513ee7f
LK
2190 /*
2191 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2192 * changed, and now the revision step also includes bit 0-1 (no more
2193 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2194 * in the old format.
2195 */
2196 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
2197 trans->hw_rev = (trans->hw_rev & 0xfff0) |
1fc0e221 2198 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
b513ee7f 2199
99673ee5 2200 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
9ca85961
EG
2201 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2202 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
a42a1844 2203
69a10b29 2204 /* Initialize the wait queue for commands */
f946b529 2205 init_waitqueue_head(&trans_pcie->wait_command_queue);
69a10b29 2206
3ec45882
JB
2207 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
2208 "iwl_cmd_pool:%s", dev_name(trans->dev));
59c647b6
EG
2209
2210 trans->dev_cmd_headroom = 0;
2211 trans->dev_cmd_pool =
3ec45882 2212 kmem_cache_create(trans->dev_cmd_pool_name,
59c647b6
EG
2213 sizeof(struct iwl_device_cmd)
2214 + trans->dev_cmd_headroom,
2215 sizeof(void *),
2216 SLAB_HWCACHE_ALIGN,
2217 NULL);
2218
6965a354
LC
2219 if (!trans->dev_cmd_pool) {
2220 err = -ENOMEM;
59c647b6 2221 goto out_pci_disable_msi;
6965a354 2222 }
59c647b6 2223
a8b691e6
JB
2224 if (iwl_pcie_alloc_ict(trans))
2225 goto out_free_cmd_pool;
2226
85bf9da1 2227 err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
6965a354
LC
2228 iwl_pcie_irq_handler,
2229 IRQF_SHARED, DRV_NAME, trans);
2230 if (err) {
a8b691e6
JB
2231 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2232 goto out_free_ict;
2233 }
2234
83f7a85f
EG
2235 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2236
a42a1844
EG
2237 return trans;
2238
a8b691e6
JB
2239out_free_ict:
2240 iwl_pcie_free_ict(trans);
2241out_free_cmd_pool:
2242 kmem_cache_destroy(trans->dev_cmd_pool);
59c647b6
EG
2243out_pci_disable_msi:
2244 pci_disable_msi(pdev);
a42a1844
EG
2245out_pci_release_regions:
2246 pci_release_regions(pdev);
2247out_pci_disable_device:
2248 pci_disable_device(pdev);
2249out_no_pci:
2250 kfree(trans);
6965a354
LC
2251out:
2252 return ERR_PTR(err);
a42a1844 2253}
This page took 0.445903 seconds and 5 git commands to generate.