Merge branch 'linux-3.17' of git://anongit.freedesktop.org/git/nouveau/linux-2.6...
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / pci.h
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #ifndef _PCI_H_
19 #define _PCI_H_
20
21 #include <linux/interrupt.h>
22
23 #include "hw.h"
24 #include "ce.h"
25
26 /* FW dump area */
27 #define REG_DUMP_COUNT_QCA988X 60
28
29 /*
30 * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
31 */
32 #define DIAG_TRANSFER_LIMIT 2048
33
34 /*
35 * maximum number of bytes that can be
36 * handled atomically by DiagRead/DiagWrite
37 */
38 #define DIAG_TRANSFER_LIMIT 2048
39
40 struct bmi_xfer {
41 bool tx_done;
42 bool rx_done;
43 bool wait_for_resp;
44 u32 resp_len;
45 };
46
47 /*
48 * PCI-specific Target state
49 *
50 * NOTE: Structure is shared between Host software and Target firmware!
51 *
52 * Much of this may be of interest to the Host so
53 * HOST_INTEREST->hi_interconnect_state points here
54 * (and all members are 32-bit quantities in order to
55 * facilitate Host access). In particular, Host software is
56 * required to initialize pipe_cfg_addr and svc_to_pipe_map.
57 */
58 struct pcie_state {
59 /* Pipe configuration Target address */
60 /* NB: ce_pipe_config[CE_COUNT] */
61 u32 pipe_cfg_addr;
62
63 /* Service to pipe map Target address */
64 /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
65 u32 svc_to_pipe_map;
66
67 /* number of MSI interrupts requested */
68 u32 msi_requested;
69
70 /* number of MSI interrupts granted */
71 u32 msi_granted;
72
73 /* Message Signalled Interrupt address */
74 u32 msi_addr;
75
76 /* Base data */
77 u32 msi_data;
78
79 /*
80 * Data for firmware interrupt;
81 * MSI data for other interrupts are
82 * in various SoC registers
83 */
84 u32 msi_fw_intr_data;
85
86 /* PCIE_PWR_METHOD_* */
87 u32 power_mgmt_method;
88
89 /* PCIE_CONFIG_FLAG_* */
90 u32 config_flags;
91 };
92
93 /* PCIE_CONFIG_FLAG definitions */
94 #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
95
96 /* Host software's Copy Engine configuration. */
97 #define CE_ATTR_FLAGS 0
98
99 /*
100 * Configuration information for a Copy Engine pipe.
101 * Passed from Host to Target during startup (one per CE).
102 *
103 * NOTE: Structure is shared between Host software and Target firmware!
104 */
105 struct ce_pipe_config {
106 u32 pipenum;
107 u32 pipedir;
108 u32 nentries;
109 u32 nbytes_max;
110 u32 flags;
111 u32 reserved;
112 };
113
114 /*
115 * Directions for interconnect pipe configuration.
116 * These definitions may be used during configuration and are shared
117 * between Host and Target.
118 *
119 * Pipe Directions are relative to the Host, so PIPEDIR_IN means
120 * "coming IN over air through Target to Host" as with a WiFi Rx operation.
121 * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
122 * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
123 * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
124 * over the interconnect.
125 */
126 #define PIPEDIR_NONE 0
127 #define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
128 #define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
129 #define PIPEDIR_INOUT 3 /* bidirectional */
130
131 /* Establish a mapping between a service/direction and a pipe. */
132 struct service_to_pipe {
133 u32 service_id;
134 u32 pipedir;
135 u32 pipenum;
136 };
137
138 enum ath10k_pci_features {
139 ATH10K_PCI_FEATURE_MSI_X = 0,
140 ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
141
142 /* keep last */
143 ATH10K_PCI_FEATURE_COUNT
144 };
145
146 /* Per-pipe state. */
147 struct ath10k_pci_pipe {
148 /* Handle of underlying Copy Engine */
149 struct ath10k_ce_pipe *ce_hdl;
150
151 /* Our pipe number; facilitiates use of pipe_info ptrs. */
152 u8 pipe_num;
153
154 /* Convenience back pointer to hif_ce_state. */
155 struct ath10k *hif_ce_state;
156
157 size_t buf_sz;
158
159 /* protects compl_free and num_send_allowed */
160 spinlock_t pipe_lock;
161
162 struct ath10k_pci *ar_pci;
163 struct tasklet_struct intr;
164 };
165
166 struct ath10k_pci {
167 struct pci_dev *pdev;
168 struct device *dev;
169 struct ath10k *ar;
170 void __iomem *mem;
171
172 DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
173
174 /*
175 * Number of MSI interrupts granted, 0 --> using legacy PCI line
176 * interrupts.
177 */
178 int num_msi_intrs;
179
180 struct tasklet_struct intr_tq;
181 struct tasklet_struct msi_fw_err;
182 struct tasklet_struct early_irq_tasklet;
183
184 int started;
185
186 atomic_t keep_awake_count;
187 bool verified_awake;
188
189 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
190
191 struct ath10k_hif_cb msg_callbacks_current;
192
193 /* Copy Engine used for Diagnostic Accesses */
194 struct ath10k_ce_pipe *ce_diag;
195
196 /* FIXME: document what this really protects */
197 spinlock_t ce_lock;
198
199 /* Map CE id to ce_state */
200 struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
201 };
202
203 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
204 {
205 return ar->hif.priv;
206 }
207
208 static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
209 {
210 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
211
212 return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
213 }
214
215 static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
216 {
217 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
218
219 iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
220 }
221
222 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
223 #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
224
225 #define BAR_NUM 0
226
227 #define CDC_WAR_MAGIC_STR 0xceef0000
228 #define CDC_WAR_DATA_CE 4
229
230 /*
231 * TODO: Should be a function call specific to each Target-type.
232 * This convoluted macro converts from Target CPU Virtual Address Space to CE
233 * Address Space. As part of this process, we conservatively fetch the current
234 * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
235 * for this device; but that's not guaranteed.
236 */
237 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
238 (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
239 CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
240 0x100000 | ((addr) & 0xfffff))
241
242 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
243 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
244
245 /*
246 * This API allows the Host to access Target registers directly
247 * and relatively efficiently over PCIe.
248 * This allows the Host to avoid extra overhead associated with
249 * sending a message to firmware and waiting for a response message
250 * from firmware, as is done on other interconnects.
251 *
252 * Yet there is some complexity with direct accesses because the
253 * Target's power state is not known a priori. The Host must issue
254 * special PCIe reads/writes in order to explicitly wake the Target
255 * and to verify that it is awake and will remain awake.
256 *
257 * Usage:
258 *
259 * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
260 * These calls must be bracketed by ath10k_pci_wake and
261 * ath10k_pci_sleep. A single BEGIN/END pair is adequate for
262 * multiple READ/WRITE operations.
263 *
264 * Use ath10k_pci_wake to put the Target in a state in
265 * which it is legal for the Host to directly access it. This
266 * may involve waking the Target from a low power state, which
267 * may take up to 2Ms!
268 *
269 * Use ath10k_pci_sleep to tell the Target that as far as
270 * this code path is concerned, it no longer needs to remain
271 * directly accessible. BEGIN/END is under a reference counter;
272 * multiple code paths may issue BEGIN/END on a single targid.
273 */
274 static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
275 u32 value)
276 {
277 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
278
279 iowrite32(value, ar_pci->mem + offset);
280 }
281
282 static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
283 {
284 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
285
286 return ioread32(ar_pci->mem + offset);
287 }
288
289 static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
290 {
291 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
292 }
293
294 static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
295 {
296 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
297 }
298
299 int ath10k_do_pci_wake(struct ath10k *ar);
300 void ath10k_do_pci_sleep(struct ath10k *ar);
301
302 static inline int ath10k_pci_wake(struct ath10k *ar)
303 {
304 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
305
306 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
307 return ath10k_do_pci_wake(ar);
308
309 return 0;
310 }
311
312 static inline void ath10k_pci_sleep(struct ath10k *ar)
313 {
314 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
315
316 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
317 ath10k_do_pci_sleep(ar);
318 }
319
320 #endif /* _PCI_H_ */
This page took 0.039316 seconds and 6 git commands to generate.