Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
18 | #include <linux/pci.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/spinlock.h> | |
650b91fb | 22 | #include <linux/bitops.h> |
5e3dd157 KV |
23 | |
24 | #include "core.h" | |
25 | #include "debug.h" | |
26 | ||
27 | #include "targaddrs.h" | |
28 | #include "bmi.h" | |
29 | ||
30 | #include "hif.h" | |
31 | #include "htc.h" | |
32 | ||
33 | #include "ce.h" | |
34 | #include "pci.h" | |
35 | ||
35098463 KV |
36 | enum ath10k_pci_reset_mode { |
37 | ATH10K_PCI_RESET_AUTO = 0, | |
38 | ATH10K_PCI_RESET_WARM_ONLY = 1, | |
39 | }; | |
40 | ||
cfe9c45b | 41 | static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; |
35098463 | 42 | static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; |
cfe9c45b | 43 | |
cfe9c45b MK |
44 | module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); |
45 | MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); | |
46 | ||
35098463 KV |
47 | module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); |
48 | MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); | |
49 | ||
0399eca8 KV |
50 | /* how long wait to wait for target to initialise, in ms */ |
51 | #define ATH10K_PCI_TARGET_WAIT 3000 | |
61c95cea | 52 | #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 |
0399eca8 | 53 | |
9baa3c34 | 54 | static const struct pci_device_id ath10k_pci_id_table[] = { |
5e3dd157 | 55 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ |
36582e5d | 56 | { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ |
d63955b3 | 57 | { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ |
8a055a8a | 58 | { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ |
e565c312 | 59 | { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ |
651b4cdc | 60 | { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ |
a226b519 | 61 | { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ |
6fd3dd71 | 62 | { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ |
5e3dd157 KV |
63 | {0} |
64 | }; | |
65 | ||
7505f7c3 MK |
66 | static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { |
67 | /* QCA988X pre 2.0 chips are not supported because they need some nasty | |
68 | * hacks. ath10k doesn't have them and these devices crash horribly | |
69 | * because of that. | |
70 | */ | |
71 | { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, | |
36582e5d MK |
72 | |
73 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, | |
74 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, | |
75 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, | |
76 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, | |
77 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, | |
78 | ||
d63955b3 MK |
79 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, |
80 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, | |
81 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, | |
82 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, | |
83 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, | |
36582e5d | 84 | |
8a055a8a | 85 | { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, |
034074f3 | 86 | |
651b4cdc VT |
87 | { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, |
88 | ||
e565c312 AK |
89 | { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, |
90 | ||
034074f3 | 91 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, |
12551ced | 92 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, |
651b4cdc | 93 | |
6fd3dd71 | 94 | { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, |
7505f7c3 MK |
95 | }; |
96 | ||
728f95ee | 97 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar); |
fc36e3ff | 98 | static int ath10k_pci_cold_reset(struct ath10k *ar); |
6e4202c3 | 99 | static int ath10k_pci_safe_chip_reset(struct ath10k *ar); |
fc15ca13 MK |
100 | static int ath10k_pci_init_irq(struct ath10k *ar); |
101 | static int ath10k_pci_deinit_irq(struct ath10k *ar); | |
102 | static int ath10k_pci_request_irq(struct ath10k *ar); | |
103 | static void ath10k_pci_free_irq(struct ath10k *ar); | |
85622cde MK |
104 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, |
105 | struct ath10k_ce_pipe *rx_pipe, | |
106 | struct bmi_xfer *xfer); | |
6e4202c3 | 107 | static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); |
0e5b2950 | 108 | static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); |
9d9bdbb0 | 109 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); |
a70587b3 RM |
110 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); |
111 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); | |
6419fdbb | 112 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); |
afb0bf7f | 113 | static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); |
5e3dd157 | 114 | |
2727a743 | 115 | static struct ce_attr host_ce_config_wlan[] = { |
48e9c225 KV |
116 | /* CE0: host->target HTC control and raw streams */ |
117 | { | |
118 | .flags = CE_ATTR_FLAGS, | |
119 | .src_nentries = 16, | |
120 | .src_sz_max = 256, | |
121 | .dest_nentries = 0, | |
0e5b2950 | 122 | .send_cb = ath10k_pci_htc_tx_cb, |
48e9c225 KV |
123 | }, |
124 | ||
125 | /* CE1: target->host HTT + HTC control */ | |
126 | { | |
127 | .flags = CE_ATTR_FLAGS, | |
128 | .src_nentries = 0, | |
63838640 | 129 | .src_sz_max = 2048, |
48e9c225 | 130 | .dest_nentries = 512, |
6419fdbb | 131 | .recv_cb = ath10k_pci_htt_htc_rx_cb, |
48e9c225 KV |
132 | }, |
133 | ||
134 | /* CE2: target->host WMI */ | |
135 | { | |
136 | .flags = CE_ATTR_FLAGS, | |
137 | .src_nentries = 0, | |
138 | .src_sz_max = 2048, | |
30abb330 | 139 | .dest_nentries = 128, |
9d9bdbb0 | 140 | .recv_cb = ath10k_pci_htc_rx_cb, |
48e9c225 KV |
141 | }, |
142 | ||
143 | /* CE3: host->target WMI */ | |
144 | { | |
145 | .flags = CE_ATTR_FLAGS, | |
146 | .src_nentries = 32, | |
147 | .src_sz_max = 2048, | |
148 | .dest_nentries = 0, | |
0e5b2950 | 149 | .send_cb = ath10k_pci_htc_tx_cb, |
48e9c225 KV |
150 | }, |
151 | ||
152 | /* CE4: host->target HTT */ | |
153 | { | |
154 | .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, | |
155 | .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, | |
156 | .src_sz_max = 256, | |
157 | .dest_nentries = 0, | |
a70587b3 | 158 | .send_cb = ath10k_pci_htt_tx_cb, |
48e9c225 KV |
159 | }, |
160 | ||
a70587b3 | 161 | /* CE5: target->host HTT (HIF->HTT) */ |
48e9c225 KV |
162 | { |
163 | .flags = CE_ATTR_FLAGS, | |
164 | .src_nentries = 0, | |
a70587b3 RM |
165 | .src_sz_max = 512, |
166 | .dest_nentries = 512, | |
167 | .recv_cb = ath10k_pci_htt_rx_cb, | |
48e9c225 KV |
168 | }, |
169 | ||
170 | /* CE6: target autonomous hif_memcpy */ | |
171 | { | |
172 | .flags = CE_ATTR_FLAGS, | |
173 | .src_nentries = 0, | |
174 | .src_sz_max = 0, | |
175 | .dest_nentries = 0, | |
176 | }, | |
177 | ||
178 | /* CE7: ce_diag, the Diagnostic Window */ | |
179 | { | |
180 | .flags = CE_ATTR_FLAGS, | |
181 | .src_nentries = 2, | |
182 | .src_sz_max = DIAG_TRANSFER_LIMIT, | |
183 | .dest_nentries = 2, | |
184 | }, | |
050af069 VT |
185 | |
186 | /* CE8: target->host pktlog */ | |
187 | { | |
188 | .flags = CE_ATTR_FLAGS, | |
189 | .src_nentries = 0, | |
190 | .src_sz_max = 2048, | |
191 | .dest_nentries = 128, | |
afb0bf7f | 192 | .recv_cb = ath10k_pci_pktlog_rx_cb, |
050af069 VT |
193 | }, |
194 | ||
195 | /* CE9 target autonomous qcache memcpy */ | |
196 | { | |
197 | .flags = CE_ATTR_FLAGS, | |
198 | .src_nentries = 0, | |
199 | .src_sz_max = 0, | |
200 | .dest_nentries = 0, | |
201 | }, | |
202 | ||
203 | /* CE10: target autonomous hif memcpy */ | |
204 | { | |
205 | .flags = CE_ATTR_FLAGS, | |
206 | .src_nentries = 0, | |
207 | .src_sz_max = 0, | |
208 | .dest_nentries = 0, | |
209 | }, | |
210 | ||
211 | /* CE11: target autonomous hif memcpy */ | |
212 | { | |
213 | .flags = CE_ATTR_FLAGS, | |
214 | .src_nentries = 0, | |
215 | .src_sz_max = 0, | |
216 | .dest_nentries = 0, | |
217 | }, | |
5e3dd157 KV |
218 | }; |
219 | ||
220 | /* Target firmware's Copy Engine configuration. */ | |
2727a743 | 221 | static struct ce_pipe_config target_ce_config_wlan[] = { |
d88effba KV |
222 | /* CE0: host->target HTC control and raw streams */ |
223 | { | |
0fdc14e4 MK |
224 | .pipenum = __cpu_to_le32(0), |
225 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), | |
226 | .nentries = __cpu_to_le32(32), | |
227 | .nbytes_max = __cpu_to_le32(256), | |
228 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
229 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
230 | }, |
231 | ||
232 | /* CE1: target->host HTT + HTC control */ | |
233 | { | |
0fdc14e4 MK |
234 | .pipenum = __cpu_to_le32(1), |
235 | .pipedir = __cpu_to_le32(PIPEDIR_IN), | |
236 | .nentries = __cpu_to_le32(32), | |
63838640 | 237 | .nbytes_max = __cpu_to_le32(2048), |
0fdc14e4 MK |
238 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
239 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
240 | }, |
241 | ||
242 | /* CE2: target->host WMI */ | |
243 | { | |
0fdc14e4 MK |
244 | .pipenum = __cpu_to_le32(2), |
245 | .pipedir = __cpu_to_le32(PIPEDIR_IN), | |
30abb330 | 246 | .nentries = __cpu_to_le32(64), |
0fdc14e4 MK |
247 | .nbytes_max = __cpu_to_le32(2048), |
248 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
249 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
250 | }, |
251 | ||
252 | /* CE3: host->target WMI */ | |
253 | { | |
0fdc14e4 MK |
254 | .pipenum = __cpu_to_le32(3), |
255 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), | |
256 | .nentries = __cpu_to_le32(32), | |
257 | .nbytes_max = __cpu_to_le32(2048), | |
258 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
259 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
260 | }, |
261 | ||
262 | /* CE4: host->target HTT */ | |
263 | { | |
0fdc14e4 MK |
264 | .pipenum = __cpu_to_le32(4), |
265 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), | |
266 | .nentries = __cpu_to_le32(256), | |
267 | .nbytes_max = __cpu_to_le32(256), | |
268 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
269 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
270 | }, |
271 | ||
5e3dd157 | 272 | /* NB: 50% of src nentries, since tx has 2 frags */ |
d88effba | 273 | |
a70587b3 | 274 | /* CE5: target->host HTT (HIF->HTT) */ |
d88effba | 275 | { |
0fdc14e4 | 276 | .pipenum = __cpu_to_le32(5), |
a70587b3 | 277 | .pipedir = __cpu_to_le32(PIPEDIR_IN), |
0fdc14e4 | 278 | .nentries = __cpu_to_le32(32), |
a70587b3 | 279 | .nbytes_max = __cpu_to_le32(512), |
0fdc14e4 MK |
280 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
281 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
282 | }, |
283 | ||
284 | /* CE6: Reserved for target autonomous hif_memcpy */ | |
285 | { | |
0fdc14e4 MK |
286 | .pipenum = __cpu_to_le32(6), |
287 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), | |
288 | .nentries = __cpu_to_le32(32), | |
289 | .nbytes_max = __cpu_to_le32(4096), | |
290 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
291 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
292 | }, |
293 | ||
5e3dd157 | 294 | /* CE7 used only by Host */ |
050af069 VT |
295 | { |
296 | .pipenum = __cpu_to_le32(7), | |
297 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), | |
298 | .nentries = __cpu_to_le32(0), | |
299 | .nbytes_max = __cpu_to_le32(0), | |
300 | .flags = __cpu_to_le32(0), | |
301 | .reserved = __cpu_to_le32(0), | |
302 | }, | |
303 | ||
304 | /* CE8 target->host packtlog */ | |
305 | { | |
306 | .pipenum = __cpu_to_le32(8), | |
307 | .pipedir = __cpu_to_le32(PIPEDIR_IN), | |
308 | .nentries = __cpu_to_le32(64), | |
309 | .nbytes_max = __cpu_to_le32(2048), | |
310 | .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), | |
311 | .reserved = __cpu_to_le32(0), | |
312 | }, | |
313 | ||
314 | /* CE9 target autonomous qcache memcpy */ | |
315 | { | |
316 | .pipenum = __cpu_to_le32(9), | |
317 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), | |
318 | .nentries = __cpu_to_le32(32), | |
319 | .nbytes_max = __cpu_to_le32(2048), | |
320 | .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), | |
321 | .reserved = __cpu_to_le32(0), | |
322 | }, | |
323 | ||
324 | /* It not necessary to send target wlan configuration for CE10 & CE11 | |
325 | * as these CEs are not actively used in target. | |
326 | */ | |
5e3dd157 KV |
327 | }; |
328 | ||
d7bfb7aa MK |
329 | /* |
330 | * Map from service/endpoint to Copy Engine. | |
331 | * This table is derived from the CE_PCI TABLE, above. | |
332 | * It is passed to the Target at startup for use by firmware. | |
333 | */ | |
2727a743 | 334 | static struct service_to_pipe target_service_to_ce_map_wlan[] = { |
d7bfb7aa | 335 | { |
0fdc14e4 MK |
336 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), |
337 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
338 | __cpu_to_le32(3), | |
d7bfb7aa MK |
339 | }, |
340 | { | |
0fdc14e4 MK |
341 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), |
342 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
343 | __cpu_to_le32(2), | |
d7bfb7aa MK |
344 | }, |
345 | { | |
0fdc14e4 MK |
346 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), |
347 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
348 | __cpu_to_le32(3), | |
d7bfb7aa MK |
349 | }, |
350 | { | |
0fdc14e4 MK |
351 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), |
352 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
353 | __cpu_to_le32(2), | |
d7bfb7aa MK |
354 | }, |
355 | { | |
0fdc14e4 MK |
356 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), |
357 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
358 | __cpu_to_le32(3), | |
d7bfb7aa MK |
359 | }, |
360 | { | |
0fdc14e4 MK |
361 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), |
362 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
363 | __cpu_to_le32(2), | |
d7bfb7aa MK |
364 | }, |
365 | { | |
0fdc14e4 MK |
366 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), |
367 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
368 | __cpu_to_le32(3), | |
d7bfb7aa MK |
369 | }, |
370 | { | |
0fdc14e4 MK |
371 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), |
372 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
373 | __cpu_to_le32(2), | |
d7bfb7aa MK |
374 | }, |
375 | { | |
0fdc14e4 MK |
376 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), |
377 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
378 | __cpu_to_le32(3), | |
d7bfb7aa MK |
379 | }, |
380 | { | |
0fdc14e4 MK |
381 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), |
382 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
383 | __cpu_to_le32(2), | |
d7bfb7aa MK |
384 | }, |
385 | { | |
0fdc14e4 MK |
386 | __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), |
387 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
388 | __cpu_to_le32(0), | |
d7bfb7aa MK |
389 | }, |
390 | { | |
0fdc14e4 MK |
391 | __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), |
392 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
393 | __cpu_to_le32(1), | |
d7bfb7aa | 394 | }, |
0fdc14e4 MK |
395 | { /* not used */ |
396 | __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), | |
397 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
398 | __cpu_to_le32(0), | |
d7bfb7aa | 399 | }, |
0fdc14e4 MK |
400 | { /* not used */ |
401 | __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), | |
402 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
403 | __cpu_to_le32(1), | |
d7bfb7aa MK |
404 | }, |
405 | { | |
0fdc14e4 MK |
406 | __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), |
407 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
408 | __cpu_to_le32(4), | |
d7bfb7aa MK |
409 | }, |
410 | { | |
0fdc14e4 MK |
411 | __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), |
412 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
a70587b3 | 413 | __cpu_to_le32(5), |
d7bfb7aa MK |
414 | }, |
415 | ||
416 | /* (Additions here) */ | |
417 | ||
0fdc14e4 MK |
418 | { /* must be last */ |
419 | __cpu_to_le32(0), | |
420 | __cpu_to_le32(0), | |
421 | __cpu_to_le32(0), | |
d7bfb7aa MK |
422 | }, |
423 | }; | |
424 | ||
77258d40 MK |
425 | static bool ath10k_pci_is_awake(struct ath10k *ar) |
426 | { | |
427 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
428 | u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
429 | RTC_STATE_ADDRESS); | |
430 | ||
431 | return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; | |
432 | } | |
433 | ||
434 | static void __ath10k_pci_wake(struct ath10k *ar) | |
435 | { | |
436 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
437 | ||
438 | lockdep_assert_held(&ar_pci->ps_lock); | |
439 | ||
440 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", | |
441 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
442 | ||
443 | iowrite32(PCIE_SOC_WAKE_V_MASK, | |
444 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
445 | PCIE_SOC_WAKE_ADDRESS); | |
446 | } | |
447 | ||
448 | static void __ath10k_pci_sleep(struct ath10k *ar) | |
449 | { | |
450 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
451 | ||
452 | lockdep_assert_held(&ar_pci->ps_lock); | |
453 | ||
454 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", | |
455 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
456 | ||
457 | iowrite32(PCIE_SOC_WAKE_RESET, | |
458 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
459 | PCIE_SOC_WAKE_ADDRESS); | |
460 | ar_pci->ps_awake = false; | |
461 | } | |
462 | ||
463 | static int ath10k_pci_wake_wait(struct ath10k *ar) | |
464 | { | |
465 | int tot_delay = 0; | |
466 | int curr_delay = 5; | |
467 | ||
468 | while (tot_delay < PCIE_WAKE_TIMEOUT) { | |
39b91b81 MK |
469 | if (ath10k_pci_is_awake(ar)) { |
470 | if (tot_delay > PCIE_WAKE_LATE_US) | |
471 | ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n", | |
472 | tot_delay / 1000); | |
77258d40 | 473 | return 0; |
39b91b81 | 474 | } |
77258d40 MK |
475 | |
476 | udelay(curr_delay); | |
477 | tot_delay += curr_delay; | |
478 | ||
479 | if (curr_delay < 50) | |
480 | curr_delay += 5; | |
481 | } | |
482 | ||
483 | return -ETIMEDOUT; | |
484 | } | |
485 | ||
1aaf8efb AK |
486 | static int ath10k_pci_force_wake(struct ath10k *ar) |
487 | { | |
488 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
489 | unsigned long flags; | |
490 | int ret = 0; | |
491 | ||
d9d6a5ae RM |
492 | if (ar_pci->pci_ps) |
493 | return ret; | |
494 | ||
1aaf8efb AK |
495 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
496 | ||
497 | if (!ar_pci->ps_awake) { | |
498 | iowrite32(PCIE_SOC_WAKE_V_MASK, | |
499 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
500 | PCIE_SOC_WAKE_ADDRESS); | |
501 | ||
502 | ret = ath10k_pci_wake_wait(ar); | |
503 | if (ret == 0) | |
504 | ar_pci->ps_awake = true; | |
505 | } | |
506 | ||
507 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
508 | ||
509 | return ret; | |
510 | } | |
511 | ||
512 | static void ath10k_pci_force_sleep(struct ath10k *ar) | |
513 | { | |
514 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
515 | unsigned long flags; | |
516 | ||
517 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
518 | ||
519 | iowrite32(PCIE_SOC_WAKE_RESET, | |
520 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
521 | PCIE_SOC_WAKE_ADDRESS); | |
522 | ar_pci->ps_awake = false; | |
523 | ||
524 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
525 | } | |
526 | ||
77258d40 MK |
527 | static int ath10k_pci_wake(struct ath10k *ar) |
528 | { | |
529 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
530 | unsigned long flags; | |
531 | int ret = 0; | |
532 | ||
1aaf8efb AK |
533 | if (ar_pci->pci_ps == 0) |
534 | return ret; | |
535 | ||
77258d40 MK |
536 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
537 | ||
538 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", | |
539 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
540 | ||
541 | /* This function can be called very frequently. To avoid excessive | |
542 | * CPU stalls for MMIO reads use a cache var to hold the device state. | |
543 | */ | |
544 | if (!ar_pci->ps_awake) { | |
545 | __ath10k_pci_wake(ar); | |
546 | ||
547 | ret = ath10k_pci_wake_wait(ar); | |
548 | if (ret == 0) | |
549 | ar_pci->ps_awake = true; | |
550 | } | |
551 | ||
552 | if (ret == 0) { | |
553 | ar_pci->ps_wake_refcount++; | |
554 | WARN_ON(ar_pci->ps_wake_refcount == 0); | |
555 | } | |
556 | ||
557 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
558 | ||
559 | return ret; | |
560 | } | |
561 | ||
562 | static void ath10k_pci_sleep(struct ath10k *ar) | |
563 | { | |
564 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
565 | unsigned long flags; | |
566 | ||
1aaf8efb AK |
567 | if (ar_pci->pci_ps == 0) |
568 | return; | |
569 | ||
77258d40 MK |
570 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
571 | ||
572 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", | |
573 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
574 | ||
575 | if (WARN_ON(ar_pci->ps_wake_refcount == 0)) | |
576 | goto skip; | |
577 | ||
578 | ar_pci->ps_wake_refcount--; | |
579 | ||
580 | mod_timer(&ar_pci->ps_timer, jiffies + | |
581 | msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); | |
582 | ||
583 | skip: | |
584 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
585 | } | |
586 | ||
587 | static void ath10k_pci_ps_timer(unsigned long ptr) | |
588 | { | |
589 | struct ath10k *ar = (void *)ptr; | |
590 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
591 | unsigned long flags; | |
592 | ||
593 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
594 | ||
595 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", | |
596 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
597 | ||
598 | if (ar_pci->ps_wake_refcount > 0) | |
599 | goto skip; | |
600 | ||
601 | __ath10k_pci_sleep(ar); | |
602 | ||
603 | skip: | |
604 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
605 | } | |
606 | ||
607 | static void ath10k_pci_sleep_sync(struct ath10k *ar) | |
608 | { | |
609 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
610 | unsigned long flags; | |
611 | ||
1aaf8efb AK |
612 | if (ar_pci->pci_ps == 0) { |
613 | ath10k_pci_force_sleep(ar); | |
614 | return; | |
615 | } | |
616 | ||
77258d40 MK |
617 | del_timer_sync(&ar_pci->ps_timer); |
618 | ||
619 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
620 | WARN_ON(ar_pci->ps_wake_refcount > 0); | |
621 | __ath10k_pci_sleep(ar); | |
622 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
623 | } | |
624 | ||
4ddb3299 | 625 | static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) |
77258d40 MK |
626 | { |
627 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
628 | int ret; | |
629 | ||
aeae5b4c MK |
630 | if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { |
631 | ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", | |
632 | offset, offset + sizeof(value), ar_pci->mem_len); | |
633 | return; | |
634 | } | |
635 | ||
77258d40 MK |
636 | ret = ath10k_pci_wake(ar); |
637 | if (ret) { | |
638 | ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", | |
639 | value, offset, ret); | |
640 | return; | |
641 | } | |
642 | ||
643 | iowrite32(value, ar_pci->mem + offset); | |
644 | ath10k_pci_sleep(ar); | |
645 | } | |
646 | ||
4ddb3299 | 647 | static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) |
77258d40 MK |
648 | { |
649 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
650 | u32 val; | |
651 | int ret; | |
652 | ||
aeae5b4c MK |
653 | if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { |
654 | ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", | |
655 | offset, offset + sizeof(val), ar_pci->mem_len); | |
656 | return 0; | |
657 | } | |
658 | ||
77258d40 MK |
659 | ret = ath10k_pci_wake(ar); |
660 | if (ret) { | |
661 | ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", | |
662 | offset, ret); | |
663 | return 0xffffffff; | |
664 | } | |
665 | ||
666 | val = ioread32(ar_pci->mem + offset); | |
667 | ath10k_pci_sleep(ar); | |
668 | ||
669 | return val; | |
670 | } | |
671 | ||
4ddb3299 RM |
672 | inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) |
673 | { | |
674 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
675 | ||
676 | ar_pci->bus_ops->write32(ar, offset, value); | |
677 | } | |
678 | ||
679 | inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) | |
680 | { | |
681 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
682 | ||
683 | return ar_pci->bus_ops->read32(ar, offset); | |
684 | } | |
685 | ||
77258d40 MK |
686 | u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) |
687 | { | |
688 | return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); | |
689 | } | |
690 | ||
691 | void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) | |
692 | { | |
693 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); | |
694 | } | |
695 | ||
696 | u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) | |
697 | { | |
698 | return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); | |
699 | } | |
700 | ||
701 | void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) | |
702 | { | |
703 | ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); | |
704 | } | |
705 | ||
f52f5171 | 706 | bool ath10k_pci_irq_pending(struct ath10k *ar) |
e539887b MK |
707 | { |
708 | u32 cause; | |
709 | ||
710 | /* Check if the shared legacy irq is for us */ | |
711 | cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + | |
712 | PCIE_INTR_CAUSE_ADDRESS); | |
713 | if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) | |
714 | return true; | |
715 | ||
716 | return false; | |
717 | } | |
718 | ||
f52f5171 | 719 | void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) |
2685218b MK |
720 | { |
721 | /* IMPORTANT: INTR_CLR register has to be set after | |
722 | * INTR_ENABLE is set to 0, otherwise interrupt can not be | |
723 | * really cleared. */ | |
724 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, | |
725 | 0); | |
726 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, | |
727 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | |
728 | ||
729 | /* IMPORTANT: this extra read transaction is required to | |
730 | * flush the posted write buffer. */ | |
cfbc06a9 KV |
731 | (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
732 | PCIE_INTR_ENABLE_ADDRESS); | |
2685218b MK |
733 | } |
734 | ||
f52f5171 | 735 | void ath10k_pci_enable_legacy_irq(struct ath10k *ar) |
2685218b MK |
736 | { |
737 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + | |
738 | PCIE_INTR_ENABLE_ADDRESS, | |
739 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | |
740 | ||
741 | /* IMPORTANT: this extra read transaction is required to | |
742 | * flush the posted write buffer. */ | |
cfbc06a9 KV |
743 | (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
744 | PCIE_INTR_ENABLE_ADDRESS); | |
2685218b MK |
745 | } |
746 | ||
403d627b | 747 | static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) |
ab977bd0 | 748 | { |
ab977bd0 MK |
749 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
750 | ||
cfe9011a | 751 | if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) |
403d627b | 752 | return "msi"; |
d8bb26b9 KV |
753 | |
754 | return "legacy"; | |
ab977bd0 MK |
755 | } |
756 | ||
728f95ee | 757 | static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) |
ab977bd0 | 758 | { |
728f95ee | 759 | struct ath10k *ar = pipe->hif_ce_state; |
ab977bd0 | 760 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
728f95ee MK |
761 | struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; |
762 | struct sk_buff *skb; | |
763 | dma_addr_t paddr; | |
ab977bd0 MK |
764 | int ret; |
765 | ||
728f95ee MK |
766 | skb = dev_alloc_skb(pipe->buf_sz); |
767 | if (!skb) | |
768 | return -ENOMEM; | |
769 | ||
770 | WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); | |
771 | ||
772 | paddr = dma_map_single(ar->dev, skb->data, | |
773 | skb->len + skb_tailroom(skb), | |
774 | DMA_FROM_DEVICE); | |
775 | if (unlikely(dma_mapping_error(ar->dev, paddr))) { | |
7aa7a72a | 776 | ath10k_warn(ar, "failed to dma map pci rx buf\n"); |
728f95ee MK |
777 | dev_kfree_skb_any(skb); |
778 | return -EIO; | |
779 | } | |
780 | ||
8582bf3b | 781 | ATH10K_SKB_RXCB(skb)->paddr = paddr; |
728f95ee | 782 | |
ab4e3db0 | 783 | spin_lock_bh(&ar_pci->ce_lock); |
728f95ee | 784 | ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); |
ab4e3db0 | 785 | spin_unlock_bh(&ar_pci->ce_lock); |
ab977bd0 | 786 | if (ret) { |
728f95ee MK |
787 | dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), |
788 | DMA_FROM_DEVICE); | |
789 | dev_kfree_skb_any(skb); | |
ab977bd0 MK |
790 | return ret; |
791 | } | |
792 | ||
793 | return 0; | |
794 | } | |
795 | ||
ab4e3db0 | 796 | static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) |
ab977bd0 | 797 | { |
728f95ee MK |
798 | struct ath10k *ar = pipe->hif_ce_state; |
799 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
800 | struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; | |
801 | int ret, num; | |
802 | ||
728f95ee MK |
803 | if (pipe->buf_sz == 0) |
804 | return; | |
805 | ||
806 | if (!ce_pipe->dest_ring) | |
807 | return; | |
808 | ||
ab4e3db0 | 809 | spin_lock_bh(&ar_pci->ce_lock); |
728f95ee | 810 | num = __ath10k_ce_rx_num_free_bufs(ce_pipe); |
ab4e3db0 | 811 | spin_unlock_bh(&ar_pci->ce_lock); |
128abd09 RM |
812 | |
813 | while (num >= 0) { | |
728f95ee MK |
814 | ret = __ath10k_pci_rx_post_buf(pipe); |
815 | if (ret) { | |
ab4e3db0 RM |
816 | if (ret == -ENOSPC) |
817 | break; | |
7aa7a72a | 818 | ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); |
728f95ee MK |
819 | mod_timer(&ar_pci->rx_post_retry, jiffies + |
820 | ATH10K_PCI_RX_POST_RETRY_MS); | |
821 | break; | |
822 | } | |
128abd09 | 823 | num--; |
728f95ee MK |
824 | } |
825 | } | |
826 | ||
f52f5171 | 827 | void ath10k_pci_rx_post(struct ath10k *ar) |
728f95ee MK |
828 | { |
829 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
830 | int i; | |
831 | ||
728f95ee | 832 | for (i = 0; i < CE_COUNT; i++) |
ab4e3db0 | 833 | ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); |
728f95ee MK |
834 | } |
835 | ||
f52f5171 | 836 | void ath10k_pci_rx_replenish_retry(unsigned long ptr) |
728f95ee MK |
837 | { |
838 | struct ath10k *ar = (void *)ptr; | |
839 | ||
840 | ath10k_pci_rx_post(ar); | |
ab977bd0 MK |
841 | } |
842 | ||
418ca599 VT |
843 | static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) |
844 | { | |
845 | u32 val = 0; | |
846 | ||
847 | switch (ar->hw_rev) { | |
848 | case ATH10K_HW_QCA988X: | |
6fd3dd71 | 849 | case ATH10K_HW_QCA9887: |
418ca599 | 850 | case ATH10K_HW_QCA6174: |
a226b519 | 851 | case ATH10K_HW_QCA9377: |
418ca599 VT |
852 | val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
853 | CORE_CTRL_ADDRESS) & | |
3c7e256a | 854 | 0x7ff) << 21; |
418ca599 | 855 | break; |
e565c312 | 856 | case ATH10K_HW_QCA9888: |
418ca599 | 857 | case ATH10K_HW_QCA99X0: |
651b4cdc | 858 | case ATH10K_HW_QCA9984: |
0b523ced | 859 | case ATH10K_HW_QCA4019: |
418ca599 VT |
860 | val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); |
861 | break; | |
862 | } | |
863 | ||
864 | val |= 0x100000 | (addr & 0xfffff); | |
865 | return val; | |
866 | } | |
867 | ||
5e3dd157 KV |
868 | /* |
869 | * Diagnostic read/write access is provided for startup/config/debug usage. | |
870 | * Caller must guarantee proper alignment, when applicable, and single user | |
871 | * at any moment. | |
872 | */ | |
873 | static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, | |
874 | int nbytes) | |
875 | { | |
876 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
877 | int ret = 0; | |
24d9ef5e | 878 | u32 *buf; |
1e56d512 | 879 | unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; |
2aa39115 | 880 | struct ath10k_ce_pipe *ce_diag; |
5e3dd157 KV |
881 | /* Host buffer address in CE space */ |
882 | u32 ce_data; | |
883 | dma_addr_t ce_data_base = 0; | |
884 | void *data_buf = NULL; | |
885 | int i; | |
886 | ||
eef25405 KV |
887 | spin_lock_bh(&ar_pci->ce_lock); |
888 | ||
5e3dd157 KV |
889 | ce_diag = ar_pci->ce_diag; |
890 | ||
891 | /* | |
892 | * Allocate a temporary bounce buffer to hold caller's data | |
893 | * to be DMA'ed from Target. This guarantees | |
894 | * 1) 4-byte alignment | |
895 | * 2) Buffer in DMA-able space | |
896 | */ | |
1e56d512 ARN |
897 | alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); |
898 | ||
68c03249 | 899 | data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, |
1e56d512 | 900 | alloc_nbytes, |
68c03249 MK |
901 | &ce_data_base, |
902 | GFP_ATOMIC); | |
5e3dd157 KV |
903 | |
904 | if (!data_buf) { | |
905 | ret = -ENOMEM; | |
906 | goto done; | |
907 | } | |
1e56d512 | 908 | memset(data_buf, 0, alloc_nbytes); |
5e3dd157 | 909 | |
1e56d512 | 910 | remaining_bytes = nbytes; |
5e3dd157 KV |
911 | ce_data = ce_data_base; |
912 | while (remaining_bytes) { | |
913 | nbytes = min_t(unsigned int, remaining_bytes, | |
914 | DIAG_TRANSFER_LIMIT); | |
915 | ||
24d9ef5e | 916 | ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); |
5e3dd157 KV |
917 | if (ret != 0) |
918 | goto done; | |
919 | ||
920 | /* Request CE to send from Target(!) address to Host buffer */ | |
921 | /* | |
922 | * The address supplied by the caller is in the | |
923 | * Target CPU virtual address space. | |
924 | * | |
925 | * In order to use this address with the diagnostic CE, | |
926 | * convert it from Target CPU virtual address space | |
927 | * to CE address space | |
928 | */ | |
418ca599 | 929 | address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); |
5e3dd157 | 930 | |
eef25405 KV |
931 | ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, |
932 | 0); | |
5e3dd157 KV |
933 | if (ret) |
934 | goto done; | |
935 | ||
936 | i = 0; | |
765952e4 RM |
937 | while (ath10k_ce_completed_send_next_nolock(ce_diag, |
938 | NULL) != 0) { | |
5e3dd157 KV |
939 | mdelay(1); |
940 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
941 | ret = -EBUSY; | |
942 | goto done; | |
943 | } | |
944 | } | |
945 | ||
5e3dd157 | 946 | i = 0; |
24d9ef5e RM |
947 | while (ath10k_ce_completed_recv_next_nolock(ce_diag, |
948 | (void **)&buf, | |
949 | &completed_nbytes) | |
950 | != 0) { | |
5e3dd157 KV |
951 | mdelay(1); |
952 | ||
953 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
954 | ret = -EBUSY; | |
955 | goto done; | |
956 | } | |
957 | } | |
958 | ||
959 | if (nbytes != completed_nbytes) { | |
960 | ret = -EIO; | |
961 | goto done; | |
962 | } | |
963 | ||
24d9ef5e | 964 | if (*buf != ce_data) { |
5e3dd157 KV |
965 | ret = -EIO; |
966 | goto done; | |
967 | } | |
968 | ||
969 | remaining_bytes -= nbytes; | |
1e56d512 ARN |
970 | |
971 | if (ret) { | |
972 | ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", | |
973 | address, ret); | |
974 | break; | |
975 | } | |
976 | memcpy(data, data_buf, nbytes); | |
977 | ||
5e3dd157 | 978 | address += nbytes; |
1e56d512 | 979 | data += nbytes; |
5e3dd157 KV |
980 | } |
981 | ||
982 | done: | |
5e3dd157 KV |
983 | |
984 | if (data_buf) | |
1e56d512 | 985 | dma_free_coherent(ar->dev, alloc_nbytes, data_buf, |
68c03249 | 986 | ce_data_base); |
5e3dd157 | 987 | |
eef25405 KV |
988 | spin_unlock_bh(&ar_pci->ce_lock); |
989 | ||
5e3dd157 KV |
990 | return ret; |
991 | } | |
992 | ||
3d29a3e0 KV |
993 | static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) |
994 | { | |
0fdc14e4 MK |
995 | __le32 val = 0; |
996 | int ret; | |
997 | ||
998 | ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); | |
999 | *value = __le32_to_cpu(val); | |
1000 | ||
1001 | return ret; | |
3d29a3e0 KV |
1002 | } |
1003 | ||
1004 | static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, | |
1005 | u32 src, u32 len) | |
1006 | { | |
1007 | u32 host_addr, addr; | |
1008 | int ret; | |
1009 | ||
1010 | host_addr = host_interest_item_address(src); | |
1011 | ||
1012 | ret = ath10k_pci_diag_read32(ar, host_addr, &addr); | |
1013 | if (ret != 0) { | |
7aa7a72a | 1014 | ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", |
3d29a3e0 KV |
1015 | src, ret); |
1016 | return ret; | |
1017 | } | |
1018 | ||
1019 | ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); | |
1020 | if (ret != 0) { | |
7aa7a72a | 1021 | ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", |
3d29a3e0 KV |
1022 | addr, len, ret); |
1023 | return ret; | |
1024 | } | |
1025 | ||
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ | |
8cc7f26c | 1030 | __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) |
3d29a3e0 | 1031 | |
f52f5171 RM |
1032 | int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, |
1033 | const void *data, int nbytes) | |
5e3dd157 KV |
1034 | { |
1035 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1036 | int ret = 0; | |
24d9ef5e | 1037 | u32 *buf; |
5e3dd157 | 1038 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; |
2aa39115 | 1039 | struct ath10k_ce_pipe *ce_diag; |
5e3dd157 KV |
1040 | void *data_buf = NULL; |
1041 | u32 ce_data; /* Host buffer address in CE space */ | |
1042 | dma_addr_t ce_data_base = 0; | |
1043 | int i; | |
1044 | ||
eef25405 KV |
1045 | spin_lock_bh(&ar_pci->ce_lock); |
1046 | ||
5e3dd157 KV |
1047 | ce_diag = ar_pci->ce_diag; |
1048 | ||
1049 | /* | |
1050 | * Allocate a temporary bounce buffer to hold caller's data | |
1051 | * to be DMA'ed to Target. This guarantees | |
1052 | * 1) 4-byte alignment | |
1053 | * 2) Buffer in DMA-able space | |
1054 | */ | |
1055 | orig_nbytes = nbytes; | |
68c03249 MK |
1056 | data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, |
1057 | orig_nbytes, | |
1058 | &ce_data_base, | |
1059 | GFP_ATOMIC); | |
5e3dd157 KV |
1060 | if (!data_buf) { |
1061 | ret = -ENOMEM; | |
1062 | goto done; | |
1063 | } | |
1064 | ||
1065 | /* Copy caller's data to allocated DMA buf */ | |
0fdc14e4 | 1066 | memcpy(data_buf, data, orig_nbytes); |
5e3dd157 KV |
1067 | |
1068 | /* | |
1069 | * The address supplied by the caller is in the | |
1070 | * Target CPU virtual address space. | |
1071 | * | |
1072 | * In order to use this address with the diagnostic CE, | |
1073 | * convert it from | |
1074 | * Target CPU virtual address space | |
1075 | * to | |
1076 | * CE address space | |
1077 | */ | |
418ca599 | 1078 | address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); |
5e3dd157 KV |
1079 | |
1080 | remaining_bytes = orig_nbytes; | |
1081 | ce_data = ce_data_base; | |
1082 | while (remaining_bytes) { | |
1083 | /* FIXME: check cast */ | |
1084 | nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); | |
1085 | ||
1086 | /* Set up to receive directly into Target(!) address */ | |
24d9ef5e | 1087 | ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address); |
5e3dd157 KV |
1088 | if (ret != 0) |
1089 | goto done; | |
1090 | ||
1091 | /* | |
1092 | * Request CE to send caller-supplied data that | |
1093 | * was copied to bounce buffer to Target(!) address. | |
1094 | */ | |
eef25405 KV |
1095 | ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, |
1096 | nbytes, 0, 0); | |
5e3dd157 KV |
1097 | if (ret != 0) |
1098 | goto done; | |
1099 | ||
1100 | i = 0; | |
765952e4 RM |
1101 | while (ath10k_ce_completed_send_next_nolock(ce_diag, |
1102 | NULL) != 0) { | |
5e3dd157 KV |
1103 | mdelay(1); |
1104 | ||
1105 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
1106 | ret = -EBUSY; | |
1107 | goto done; | |
1108 | } | |
1109 | } | |
1110 | ||
5e3dd157 | 1111 | i = 0; |
24d9ef5e RM |
1112 | while (ath10k_ce_completed_recv_next_nolock(ce_diag, |
1113 | (void **)&buf, | |
1114 | &completed_nbytes) | |
1115 | != 0) { | |
5e3dd157 KV |
1116 | mdelay(1); |
1117 | ||
1118 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
1119 | ret = -EBUSY; | |
1120 | goto done; | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | if (nbytes != completed_nbytes) { | |
1125 | ret = -EIO; | |
1126 | goto done; | |
1127 | } | |
1128 | ||
24d9ef5e | 1129 | if (*buf != address) { |
5e3dd157 KV |
1130 | ret = -EIO; |
1131 | goto done; | |
1132 | } | |
1133 | ||
1134 | remaining_bytes -= nbytes; | |
1135 | address += nbytes; | |
1136 | ce_data += nbytes; | |
1137 | } | |
1138 | ||
1139 | done: | |
1140 | if (data_buf) { | |
68c03249 MK |
1141 | dma_free_coherent(ar->dev, orig_nbytes, data_buf, |
1142 | ce_data_base); | |
5e3dd157 KV |
1143 | } |
1144 | ||
1145 | if (ret != 0) | |
7aa7a72a | 1146 | ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", |
50f87a67 | 1147 | address, ret); |
5e3dd157 | 1148 | |
eef25405 KV |
1149 | spin_unlock_bh(&ar_pci->ce_lock); |
1150 | ||
5e3dd157 KV |
1151 | return ret; |
1152 | } | |
1153 | ||
0fdc14e4 MK |
1154 | static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) |
1155 | { | |
1156 | __le32 val = __cpu_to_le32(value); | |
1157 | ||
1158 | return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); | |
1159 | } | |
1160 | ||
5e3dd157 | 1161 | /* Called by lower (CE) layer when a send to Target completes. */ |
0e5b2950 | 1162 | static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) |
5e3dd157 KV |
1163 | { |
1164 | struct ath10k *ar = ce_state->ar; | |
1cb86d47 MK |
1165 | struct sk_buff_head list; |
1166 | struct sk_buff *skb; | |
5e3dd157 | 1167 | |
1cb86d47 | 1168 | __skb_queue_head_init(&list); |
765952e4 | 1169 | while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { |
a16942e6 | 1170 | /* no need to call tx completion for NULL pointers */ |
1cb86d47 | 1171 | if (skb == NULL) |
726346fc MK |
1172 | continue; |
1173 | ||
1cb86d47 | 1174 | __skb_queue_tail(&list, skb); |
5440ce25 | 1175 | } |
1cb86d47 MK |
1176 | |
1177 | while ((skb = __skb_dequeue(&list))) | |
0e5b2950 | 1178 | ath10k_htc_tx_completion_handler(ar, skb); |
5e3dd157 KV |
1179 | } |
1180 | ||
a70587b3 RM |
1181 | static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, |
1182 | void (*callback)(struct ath10k *ar, | |
1183 | struct sk_buff *skb)) | |
5e3dd157 KV |
1184 | { |
1185 | struct ath10k *ar = ce_state->ar; | |
1186 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 1187 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
5e3dd157 | 1188 | struct sk_buff *skb; |
1cb86d47 | 1189 | struct sk_buff_head list; |
5440ce25 | 1190 | void *transfer_context; |
2f5280da | 1191 | unsigned int nbytes, max_nbytes; |
5e3dd157 | 1192 | |
1cb86d47 | 1193 | __skb_queue_head_init(&list); |
5440ce25 | 1194 | while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, |
24d9ef5e | 1195 | &nbytes) == 0) { |
5e3dd157 | 1196 | skb = transfer_context; |
2f5280da | 1197 | max_nbytes = skb->len + skb_tailroom(skb); |
8582bf3b | 1198 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
2f5280da MK |
1199 | max_nbytes, DMA_FROM_DEVICE); |
1200 | ||
1201 | if (unlikely(max_nbytes < nbytes)) { | |
7aa7a72a | 1202 | ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", |
2f5280da MK |
1203 | nbytes, max_nbytes); |
1204 | dev_kfree_skb_any(skb); | |
1205 | continue; | |
1206 | } | |
5e3dd157 | 1207 | |
2f5280da | 1208 | skb_put(skb, nbytes); |
1cb86d47 MK |
1209 | __skb_queue_tail(&list, skb); |
1210 | } | |
a360e54c | 1211 | |
1cb86d47 | 1212 | while ((skb = __skb_dequeue(&list))) { |
a360e54c MK |
1213 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", |
1214 | ce_state->id, skb->len); | |
1215 | ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", | |
1216 | skb->data, skb->len); | |
1217 | ||
a70587b3 | 1218 | callback(ar, skb); |
2f5280da | 1219 | } |
c29a380e | 1220 | |
728f95ee | 1221 | ath10k_pci_rx_post_pipe(pipe_info); |
5e3dd157 KV |
1222 | } |
1223 | ||
128abd09 RM |
1224 | static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, |
1225 | void (*callback)(struct ath10k *ar, | |
1226 | struct sk_buff *skb)) | |
1227 | { | |
1228 | struct ath10k *ar = ce_state->ar; | |
1229 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1230 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; | |
1231 | struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; | |
1232 | struct sk_buff *skb; | |
1233 | struct sk_buff_head list; | |
1234 | void *transfer_context; | |
1235 | unsigned int nbytes, max_nbytes, nentries; | |
1236 | int orig_len; | |
1237 | ||
1238 | /* No need to aquire ce_lock for CE5, since this is the only place CE5 | |
1239 | * is processed other than init and deinit. Before releasing CE5 | |
1240 | * buffers, interrupts are disabled. Thus CE5 access is serialized. | |
1241 | */ | |
1242 | __skb_queue_head_init(&list); | |
1243 | while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, | |
1244 | &nbytes) == 0) { | |
1245 | skb = transfer_context; | |
1246 | max_nbytes = skb->len + skb_tailroom(skb); | |
1247 | ||
1248 | if (unlikely(max_nbytes < nbytes)) { | |
1249 | ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", | |
1250 | nbytes, max_nbytes); | |
1251 | continue; | |
1252 | } | |
1253 | ||
1254 | dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, | |
1255 | max_nbytes, DMA_FROM_DEVICE); | |
1256 | skb_put(skb, nbytes); | |
1257 | __skb_queue_tail(&list, skb); | |
1258 | } | |
1259 | ||
1260 | nentries = skb_queue_len(&list); | |
1261 | while ((skb = __skb_dequeue(&list))) { | |
1262 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", | |
1263 | ce_state->id, skb->len); | |
1264 | ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", | |
1265 | skb->data, skb->len); | |
1266 | ||
1267 | orig_len = skb->len; | |
1268 | callback(ar, skb); | |
1269 | skb_push(skb, orig_len - skb->len); | |
1270 | skb_reset_tail_pointer(skb); | |
1271 | skb_trim(skb, 0); | |
1272 | ||
1273 | /*let device gain the buffer again*/ | |
1274 | dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, | |
1275 | skb->len + skb_tailroom(skb), | |
1276 | DMA_FROM_DEVICE); | |
1277 | } | |
1278 | ath10k_ce_rx_update_write_idx(ce_pipe, nentries); | |
1279 | } | |
1280 | ||
a70587b3 RM |
1281 | /* Called by lower (CE) layer when data is received from the Target. */ |
1282 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1283 | { | |
1284 | ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); | |
6419fdbb RM |
1285 | } |
1286 | ||
1287 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1288 | { | |
1289 | /* CE4 polling needs to be done whenever CE pipe which transports | |
1290 | * HTT Rx (target->host) is processed. | |
1291 | */ | |
1292 | ath10k_ce_per_engine_service(ce_state->ar, 4); | |
1293 | ||
1294 | ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); | |
a70587b3 RM |
1295 | } |
1296 | ||
afb0bf7f VN |
1297 | /* Called by lower (CE) layer when data is received from the Target. |
1298 | * Only 10.4 firmware uses separate CE to transfer pktlog data. | |
1299 | */ | |
1300 | static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1301 | { | |
1302 | ath10k_pci_process_rx_cb(ce_state, | |
1303 | ath10k_htt_rx_pktlog_completion_handler); | |
1304 | } | |
1305 | ||
a70587b3 RM |
1306 | /* Called by lower (CE) layer when a send to HTT Target completes. */ |
1307 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) | |
1308 | { | |
1309 | struct ath10k *ar = ce_state->ar; | |
1310 | struct sk_buff *skb; | |
a70587b3 | 1311 | |
765952e4 | 1312 | while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { |
a70587b3 RM |
1313 | /* no need to call tx completion for NULL pointers */ |
1314 | if (!skb) | |
1315 | continue; | |
1316 | ||
1317 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, | |
1318 | skb->len, DMA_TO_DEVICE); | |
1319 | ath10k_htt_hif_tx_complete(ar, skb); | |
1320 | } | |
1321 | } | |
1322 | ||
1323 | static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) | |
1324 | { | |
1325 | skb_pull(skb, sizeof(struct ath10k_htc_hdr)); | |
1326 | ath10k_htt_t2h_msg_handler(ar, skb); | |
1327 | } | |
1328 | ||
1329 | /* Called by lower (CE) layer when HTT data is received from the Target. */ | |
1330 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1331 | { | |
1332 | /* CE4 polling needs to be done whenever CE pipe which transports | |
1333 | * HTT Rx (target->host) is processed. | |
1334 | */ | |
1335 | ath10k_ce_per_engine_service(ce_state->ar, 4); | |
1336 | ||
128abd09 | 1337 | ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); |
a70587b3 RM |
1338 | } |
1339 | ||
f52f5171 RM |
1340 | int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, |
1341 | struct ath10k_hif_sg_item *items, int n_items) | |
5e3dd157 | 1342 | { |
5e3dd157 | 1343 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
726346fc MK |
1344 | struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; |
1345 | struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; | |
1346 | struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; | |
7147a131 MK |
1347 | unsigned int nentries_mask; |
1348 | unsigned int sw_index; | |
1349 | unsigned int write_index; | |
08b8aa09 | 1350 | int err, i = 0; |
5e3dd157 | 1351 | |
726346fc | 1352 | spin_lock_bh(&ar_pci->ce_lock); |
5e3dd157 | 1353 | |
7147a131 MK |
1354 | nentries_mask = src_ring->nentries_mask; |
1355 | sw_index = src_ring->sw_index; | |
1356 | write_index = src_ring->write_index; | |
1357 | ||
726346fc MK |
1358 | if (unlikely(CE_RING_DELTA(nentries_mask, |
1359 | write_index, sw_index - 1) < n_items)) { | |
1360 | err = -ENOBUFS; | |
08b8aa09 | 1361 | goto err; |
726346fc | 1362 | } |
5e3dd157 | 1363 | |
726346fc | 1364 | for (i = 0; i < n_items - 1; i++) { |
7aa7a72a | 1365 | ath10k_dbg(ar, ATH10K_DBG_PCI, |
726346fc MK |
1366 | "pci tx item %d paddr 0x%08x len %d n_items %d\n", |
1367 | i, items[i].paddr, items[i].len, n_items); | |
7aa7a72a | 1368 | ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", |
726346fc | 1369 | items[i].vaddr, items[i].len); |
5e3dd157 | 1370 | |
726346fc MK |
1371 | err = ath10k_ce_send_nolock(ce_pipe, |
1372 | items[i].transfer_context, | |
1373 | items[i].paddr, | |
1374 | items[i].len, | |
1375 | items[i].transfer_id, | |
1376 | CE_SEND_FLAG_GATHER); | |
1377 | if (err) | |
08b8aa09 | 1378 | goto err; |
726346fc MK |
1379 | } |
1380 | ||
1381 | /* `i` is equal to `n_items -1` after for() */ | |
1382 | ||
7aa7a72a | 1383 | ath10k_dbg(ar, ATH10K_DBG_PCI, |
726346fc MK |
1384 | "pci tx item %d paddr 0x%08x len %d n_items %d\n", |
1385 | i, items[i].paddr, items[i].len, n_items); | |
7aa7a72a | 1386 | ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", |
726346fc MK |
1387 | items[i].vaddr, items[i].len); |
1388 | ||
1389 | err = ath10k_ce_send_nolock(ce_pipe, | |
1390 | items[i].transfer_context, | |
1391 | items[i].paddr, | |
1392 | items[i].len, | |
1393 | items[i].transfer_id, | |
1394 | 0); | |
1395 | if (err) | |
08b8aa09 MK |
1396 | goto err; |
1397 | ||
1398 | spin_unlock_bh(&ar_pci->ce_lock); | |
1399 | return 0; | |
1400 | ||
1401 | err: | |
1402 | for (; i > 0; i--) | |
1403 | __ath10k_ce_send_revert(ce_pipe); | |
726346fc | 1404 | |
726346fc MK |
1405 | spin_unlock_bh(&ar_pci->ce_lock); |
1406 | return err; | |
5e3dd157 KV |
1407 | } |
1408 | ||
f52f5171 RM |
1409 | int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, |
1410 | size_t buf_len) | |
eef25405 KV |
1411 | { |
1412 | return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); | |
1413 | } | |
1414 | ||
f52f5171 | 1415 | u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) |
5e3dd157 KV |
1416 | { |
1417 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
50f87a67 | 1418 | |
7aa7a72a | 1419 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); |
50f87a67 | 1420 | |
3efcb3b4 | 1421 | return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); |
5e3dd157 KV |
1422 | } |
1423 | ||
384914b2 BG |
1424 | static void ath10k_pci_dump_registers(struct ath10k *ar, |
1425 | struct ath10k_fw_crash_data *crash_data) | |
5e3dd157 | 1426 | { |
0fdc14e4 MK |
1427 | __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; |
1428 | int i, ret; | |
5e3dd157 | 1429 | |
384914b2 | 1430 | lockdep_assert_held(&ar->data_lock); |
5e3dd157 | 1431 | |
3d29a3e0 KV |
1432 | ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], |
1433 | hi_failure_state, | |
0fdc14e4 | 1434 | REG_DUMP_COUNT_QCA988X * sizeof(__le32)); |
1d2b48d6 | 1435 | if (ret) { |
7aa7a72a | 1436 | ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); |
5e3dd157 KV |
1437 | return; |
1438 | } | |
1439 | ||
1440 | BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); | |
1441 | ||
7aa7a72a | 1442 | ath10k_err(ar, "firmware register dump:\n"); |
5e3dd157 | 1443 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) |
7aa7a72a | 1444 | ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", |
5e3dd157 | 1445 | i, |
0fdc14e4 MK |
1446 | __le32_to_cpu(reg_dump_values[i]), |
1447 | __le32_to_cpu(reg_dump_values[i + 1]), | |
1448 | __le32_to_cpu(reg_dump_values[i + 2]), | |
1449 | __le32_to_cpu(reg_dump_values[i + 3])); | |
affd3217 | 1450 | |
1bbb119d MK |
1451 | if (!crash_data) |
1452 | return; | |
1453 | ||
384914b2 | 1454 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) |
0fdc14e4 | 1455 | crash_data->registers[i] = reg_dump_values[i]; |
384914b2 BG |
1456 | } |
1457 | ||
0e9848c0 | 1458 | static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) |
384914b2 BG |
1459 | { |
1460 | struct ath10k_fw_crash_data *crash_data; | |
1461 | char uuid[50]; | |
1462 | ||
1463 | spin_lock_bh(&ar->data_lock); | |
1464 | ||
f51dbe73 BG |
1465 | ar->stats.fw_crash_counter++; |
1466 | ||
384914b2 BG |
1467 | crash_data = ath10k_debug_get_new_fw_crash_data(ar); |
1468 | ||
1469 | if (crash_data) | |
1470 | scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); | |
1471 | else | |
1472 | scnprintf(uuid, sizeof(uuid), "n/a"); | |
1473 | ||
7aa7a72a | 1474 | ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); |
8a0c797e | 1475 | ath10k_print_driver_info(ar); |
384914b2 BG |
1476 | ath10k_pci_dump_registers(ar, crash_data); |
1477 | ||
384914b2 | 1478 | spin_unlock_bh(&ar->data_lock); |
affd3217 | 1479 | |
5e90de86 | 1480 | queue_work(ar->workqueue, &ar->restart_work); |
5e3dd157 KV |
1481 | } |
1482 | ||
f52f5171 RM |
1483 | void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, |
1484 | int force) | |
5e3dd157 | 1485 | { |
7aa7a72a | 1486 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); |
50f87a67 | 1487 | |
5e3dd157 KV |
1488 | if (!force) { |
1489 | int resources; | |
1490 | /* | |
1491 | * Decide whether to actually poll for completions, or just | |
1492 | * wait for a later chance. | |
1493 | * If there seem to be plenty of resources left, then just wait | |
1494 | * since checking involves reading a CE register, which is a | |
1495 | * relatively expensive operation. | |
1496 | */ | |
1497 | resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); | |
1498 | ||
1499 | /* | |
1500 | * If at least 50% of the total resources are still available, | |
1501 | * don't bother checking again yet. | |
1502 | */ | |
1503 | if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) | |
1504 | return; | |
1505 | } | |
1506 | ath10k_ce_per_engine_service(ar, pipe); | |
1507 | } | |
1508 | ||
f52f5171 | 1509 | void ath10k_pci_kill_tasklet(struct ath10k *ar) |
5e3dd157 KV |
1510 | { |
1511 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 1512 | |
5e3dd157 | 1513 | tasklet_kill(&ar_pci->intr_tq); |
728f95ee MK |
1514 | |
1515 | del_timer_sync(&ar_pci->rx_post_retry); | |
96a9d0dc MK |
1516 | } |
1517 | ||
f52f5171 RM |
1518 | int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, |
1519 | u8 *ul_pipe, u8 *dl_pipe) | |
5e3dd157 | 1520 | { |
7c6aa25d MK |
1521 | const struct service_to_pipe *entry; |
1522 | bool ul_set = false, dl_set = false; | |
1523 | int i; | |
5e3dd157 | 1524 | |
7aa7a72a | 1525 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); |
50f87a67 | 1526 | |
7c6aa25d MK |
1527 | for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { |
1528 | entry = &target_service_to_ce_map_wlan[i]; | |
5e3dd157 | 1529 | |
0fdc14e4 | 1530 | if (__le32_to_cpu(entry->service_id) != service_id) |
7c6aa25d | 1531 | continue; |
5e3dd157 | 1532 | |
0fdc14e4 | 1533 | switch (__le32_to_cpu(entry->pipedir)) { |
7c6aa25d MK |
1534 | case PIPEDIR_NONE: |
1535 | break; | |
1536 | case PIPEDIR_IN: | |
1537 | WARN_ON(dl_set); | |
0fdc14e4 | 1538 | *dl_pipe = __le32_to_cpu(entry->pipenum); |
7c6aa25d MK |
1539 | dl_set = true; |
1540 | break; | |
1541 | case PIPEDIR_OUT: | |
1542 | WARN_ON(ul_set); | |
0fdc14e4 | 1543 | *ul_pipe = __le32_to_cpu(entry->pipenum); |
7c6aa25d MK |
1544 | ul_set = true; |
1545 | break; | |
1546 | case PIPEDIR_INOUT: | |
1547 | WARN_ON(dl_set); | |
1548 | WARN_ON(ul_set); | |
0fdc14e4 MK |
1549 | *dl_pipe = __le32_to_cpu(entry->pipenum); |
1550 | *ul_pipe = __le32_to_cpu(entry->pipenum); | |
7c6aa25d MK |
1551 | dl_set = true; |
1552 | ul_set = true; | |
1553 | break; | |
1554 | } | |
5e3dd157 | 1555 | } |
5e3dd157 | 1556 | |
7c6aa25d MK |
1557 | if (WARN_ON(!ul_set || !dl_set)) |
1558 | return -ENOENT; | |
5e3dd157 | 1559 | |
7c6aa25d | 1560 | return 0; |
5e3dd157 KV |
1561 | } |
1562 | ||
f52f5171 RM |
1563 | void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, |
1564 | u8 *ul_pipe, u8 *dl_pipe) | |
5e3dd157 | 1565 | { |
7aa7a72a | 1566 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); |
50f87a67 | 1567 | |
5e3dd157 KV |
1568 | (void)ath10k_pci_hif_map_service_to_pipe(ar, |
1569 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | |
400143e4 | 1570 | ul_pipe, dl_pipe); |
5e3dd157 KV |
1571 | } |
1572 | ||
7c0f0e3c | 1573 | static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) |
5e3dd157 | 1574 | { |
7c0f0e3c MK |
1575 | u32 val; |
1576 | ||
6e4202c3 VT |
1577 | switch (ar->hw_rev) { |
1578 | case ATH10K_HW_QCA988X: | |
6fd3dd71 | 1579 | case ATH10K_HW_QCA9887: |
6e4202c3 | 1580 | case ATH10K_HW_QCA6174: |
a226b519 | 1581 | case ATH10K_HW_QCA9377: |
6e4202c3 VT |
1582 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
1583 | CORE_CTRL_ADDRESS); | |
1584 | val &= ~CORE_CTRL_PCIE_REG_31_MASK; | |
1585 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + | |
1586 | CORE_CTRL_ADDRESS, val); | |
1587 | break; | |
1588 | case ATH10K_HW_QCA99X0: | |
651b4cdc | 1589 | case ATH10K_HW_QCA9984: |
e565c312 | 1590 | case ATH10K_HW_QCA9888: |
0b523ced | 1591 | case ATH10K_HW_QCA4019: |
6e4202c3 VT |
1592 | /* TODO: Find appropriate register configuration for QCA99X0 |
1593 | * to mask irq/MSI. | |
1594 | */ | |
1595 | break; | |
1596 | } | |
7c0f0e3c MK |
1597 | } |
1598 | ||
1599 | static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) | |
1600 | { | |
1601 | u32 val; | |
1602 | ||
6e4202c3 VT |
1603 | switch (ar->hw_rev) { |
1604 | case ATH10K_HW_QCA988X: | |
6fd3dd71 | 1605 | case ATH10K_HW_QCA9887: |
6e4202c3 | 1606 | case ATH10K_HW_QCA6174: |
a226b519 | 1607 | case ATH10K_HW_QCA9377: |
6e4202c3 VT |
1608 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
1609 | CORE_CTRL_ADDRESS); | |
1610 | val |= CORE_CTRL_PCIE_REG_31_MASK; | |
1611 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + | |
1612 | CORE_CTRL_ADDRESS, val); | |
1613 | break; | |
1614 | case ATH10K_HW_QCA99X0: | |
651b4cdc | 1615 | case ATH10K_HW_QCA9984: |
e565c312 | 1616 | case ATH10K_HW_QCA9888: |
0b523ced | 1617 | case ATH10K_HW_QCA4019: |
6e4202c3 VT |
1618 | /* TODO: Find appropriate register configuration for QCA99X0 |
1619 | * to unmask irq/MSI. | |
1620 | */ | |
1621 | break; | |
1622 | } | |
7c0f0e3c | 1623 | } |
5e3dd157 | 1624 | |
7c0f0e3c MK |
1625 | static void ath10k_pci_irq_disable(struct ath10k *ar) |
1626 | { | |
ec5ba4d3 | 1627 | ath10k_ce_disable_interrupts(ar); |
e75db4e3 | 1628 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
7c0f0e3c MK |
1629 | ath10k_pci_irq_msi_fw_mask(ar); |
1630 | } | |
1631 | ||
1632 | static void ath10k_pci_irq_sync(struct ath10k *ar) | |
1633 | { | |
1634 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 1635 | |
cfe9011a | 1636 | synchronize_irq(ar_pci->pdev->irq); |
5e3dd157 KV |
1637 | } |
1638 | ||
ec5ba4d3 | 1639 | static void ath10k_pci_irq_enable(struct ath10k *ar) |
5e3dd157 | 1640 | { |
ec5ba4d3 | 1641 | ath10k_ce_enable_interrupts(ar); |
e75db4e3 | 1642 | ath10k_pci_enable_legacy_irq(ar); |
7c0f0e3c | 1643 | ath10k_pci_irq_msi_fw_unmask(ar); |
5e3dd157 KV |
1644 | } |
1645 | ||
1646 | static int ath10k_pci_hif_start(struct ath10k *ar) | |
1647 | { | |
76d870ed | 1648 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
9a14969f | 1649 | |
7aa7a72a | 1650 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); |
5e3dd157 | 1651 | |
ec5ba4d3 | 1652 | ath10k_pci_irq_enable(ar); |
728f95ee | 1653 | ath10k_pci_rx_post(ar); |
50f87a67 | 1654 | |
76d870ed JD |
1655 | pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, |
1656 | ar_pci->link_ctl); | |
1657 | ||
5e3dd157 KV |
1658 | return 0; |
1659 | } | |
1660 | ||
099ac7ce | 1661 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) |
5e3dd157 KV |
1662 | { |
1663 | struct ath10k *ar; | |
099ac7ce MK |
1664 | struct ath10k_ce_pipe *ce_pipe; |
1665 | struct ath10k_ce_ring *ce_ring; | |
1666 | struct sk_buff *skb; | |
1667 | int i; | |
5e3dd157 | 1668 | |
099ac7ce MK |
1669 | ar = pci_pipe->hif_ce_state; |
1670 | ce_pipe = pci_pipe->ce_hdl; | |
1671 | ce_ring = ce_pipe->dest_ring; | |
5e3dd157 | 1672 | |
099ac7ce | 1673 | if (!ce_ring) |
5e3dd157 KV |
1674 | return; |
1675 | ||
099ac7ce MK |
1676 | if (!pci_pipe->buf_sz) |
1677 | return; | |
5e3dd157 | 1678 | |
099ac7ce MK |
1679 | for (i = 0; i < ce_ring->nentries; i++) { |
1680 | skb = ce_ring->per_transfer_context[i]; | |
1681 | if (!skb) | |
1682 | continue; | |
1683 | ||
1684 | ce_ring->per_transfer_context[i] = NULL; | |
1685 | ||
8582bf3b | 1686 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
099ac7ce | 1687 | skb->len + skb_tailroom(skb), |
5e3dd157 | 1688 | DMA_FROM_DEVICE); |
099ac7ce | 1689 | dev_kfree_skb_any(skb); |
5e3dd157 KV |
1690 | } |
1691 | } | |
1692 | ||
099ac7ce | 1693 | static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) |
5e3dd157 KV |
1694 | { |
1695 | struct ath10k *ar; | |
1696 | struct ath10k_pci *ar_pci; | |
099ac7ce MK |
1697 | struct ath10k_ce_pipe *ce_pipe; |
1698 | struct ath10k_ce_ring *ce_ring; | |
099ac7ce | 1699 | struct sk_buff *skb; |
099ac7ce | 1700 | int i; |
5e3dd157 | 1701 | |
099ac7ce MK |
1702 | ar = pci_pipe->hif_ce_state; |
1703 | ar_pci = ath10k_pci_priv(ar); | |
1704 | ce_pipe = pci_pipe->ce_hdl; | |
1705 | ce_ring = ce_pipe->src_ring; | |
5e3dd157 | 1706 | |
099ac7ce | 1707 | if (!ce_ring) |
5e3dd157 KV |
1708 | return; |
1709 | ||
099ac7ce MK |
1710 | if (!pci_pipe->buf_sz) |
1711 | return; | |
5e3dd157 | 1712 | |
099ac7ce MK |
1713 | for (i = 0; i < ce_ring->nentries; i++) { |
1714 | skb = ce_ring->per_transfer_context[i]; | |
1715 | if (!skb) | |
2415fc16 | 1716 | continue; |
2415fc16 | 1717 | |
099ac7ce | 1718 | ce_ring->per_transfer_context[i] = NULL; |
099ac7ce | 1719 | |
0e5b2950 | 1720 | ath10k_htc_tx_completion_handler(ar, skb); |
5e3dd157 KV |
1721 | } |
1722 | } | |
1723 | ||
1724 | /* | |
1725 | * Cleanup residual buffers for device shutdown: | |
1726 | * buffers that were enqueued for receive | |
1727 | * buffers that were to be sent | |
1728 | * Note: Buffers that had completed but which were | |
1729 | * not yet processed are on a completion queue. They | |
1730 | * are handled when the completion thread shuts down. | |
1731 | */ | |
1732 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar) | |
1733 | { | |
1734 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1735 | int pipe_num; | |
1736 | ||
fad6ed78 | 1737 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
87263e5b | 1738 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
1739 | |
1740 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
1741 | ath10k_pci_rx_pipe_cleanup(pipe_info); | |
1742 | ath10k_pci_tx_pipe_cleanup(pipe_info); | |
1743 | } | |
1744 | } | |
1745 | ||
f52f5171 | 1746 | void ath10k_pci_ce_deinit(struct ath10k *ar) |
5e3dd157 | 1747 | { |
25d0dbcb | 1748 | int i; |
5e3dd157 | 1749 | |
25d0dbcb MK |
1750 | for (i = 0; i < CE_COUNT; i++) |
1751 | ath10k_ce_deinit_pipe(ar, i); | |
5e3dd157 KV |
1752 | } |
1753 | ||
f52f5171 | 1754 | void ath10k_pci_flush(struct ath10k *ar) |
5e3dd157 | 1755 | { |
5d1aa946 | 1756 | ath10k_pci_kill_tasklet(ar); |
728f95ee MK |
1757 | ath10k_pci_buffer_cleanup(ar); |
1758 | } | |
5e3dd157 | 1759 | |
5e3dd157 KV |
1760 | static void ath10k_pci_hif_stop(struct ath10k *ar) |
1761 | { | |
77258d40 MK |
1762 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1763 | unsigned long flags; | |
1764 | ||
7aa7a72a | 1765 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); |
5e3dd157 | 1766 | |
10d23db4 MK |
1767 | /* Most likely the device has HTT Rx ring configured. The only way to |
1768 | * prevent the device from accessing (and possible corrupting) host | |
1769 | * memory is to reset the chip now. | |
e75db4e3 MK |
1770 | * |
1771 | * There's also no known way of masking MSI interrupts on the device. | |
1772 | * For ranged MSI the CE-related interrupts can be masked. However | |
1773 | * regardless how many MSI interrupts are assigned the first one | |
1774 | * is always used for firmware indications (crashes) and cannot be | |
1775 | * masked. To prevent the device from asserting the interrupt reset it | |
1776 | * before proceeding with cleanup. | |
10d23db4 | 1777 | */ |
6e4202c3 | 1778 | ath10k_pci_safe_chip_reset(ar); |
e75db4e3 MK |
1779 | |
1780 | ath10k_pci_irq_disable(ar); | |
7c0f0e3c | 1781 | ath10k_pci_irq_sync(ar); |
e75db4e3 | 1782 | ath10k_pci_flush(ar); |
77258d40 MK |
1783 | |
1784 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
1785 | WARN_ON(ar_pci->ps_wake_refcount > 0); | |
1786 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
5e3dd157 KV |
1787 | } |
1788 | ||
f52f5171 RM |
1789 | int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, |
1790 | void *req, u32 req_len, | |
1791 | void *resp, u32 *resp_len) | |
5e3dd157 KV |
1792 | { |
1793 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2aa39115 MK |
1794 | struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; |
1795 | struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; | |
1796 | struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; | |
1797 | struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; | |
5e3dd157 KV |
1798 | dma_addr_t req_paddr = 0; |
1799 | dma_addr_t resp_paddr = 0; | |
1800 | struct bmi_xfer xfer = {}; | |
1801 | void *treq, *tresp = NULL; | |
1802 | int ret = 0; | |
1803 | ||
85622cde MK |
1804 | might_sleep(); |
1805 | ||
5e3dd157 KV |
1806 | if (resp && !resp_len) |
1807 | return -EINVAL; | |
1808 | ||
1809 | if (resp && resp_len && *resp_len == 0) | |
1810 | return -EINVAL; | |
1811 | ||
1812 | treq = kmemdup(req, req_len, GFP_KERNEL); | |
1813 | if (!treq) | |
1814 | return -ENOMEM; | |
1815 | ||
1816 | req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); | |
1817 | ret = dma_mapping_error(ar->dev, req_paddr); | |
5e55e3cb MK |
1818 | if (ret) { |
1819 | ret = -EIO; | |
5e3dd157 | 1820 | goto err_dma; |
5e55e3cb | 1821 | } |
5e3dd157 KV |
1822 | |
1823 | if (resp && resp_len) { | |
1824 | tresp = kzalloc(*resp_len, GFP_KERNEL); | |
1825 | if (!tresp) { | |
1826 | ret = -ENOMEM; | |
1827 | goto err_req; | |
1828 | } | |
1829 | ||
1830 | resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, | |
1831 | DMA_FROM_DEVICE); | |
1832 | ret = dma_mapping_error(ar->dev, resp_paddr); | |
5e55e3cb | 1833 | if (ret) { |
22baa980 | 1834 | ret = -EIO; |
5e3dd157 | 1835 | goto err_req; |
5e55e3cb | 1836 | } |
5e3dd157 KV |
1837 | |
1838 | xfer.wait_for_resp = true; | |
1839 | xfer.resp_len = 0; | |
1840 | ||
728f95ee | 1841 | ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); |
5e3dd157 KV |
1842 | } |
1843 | ||
5e3dd157 KV |
1844 | ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); |
1845 | if (ret) | |
1846 | goto err_resp; | |
1847 | ||
85622cde MK |
1848 | ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); |
1849 | if (ret) { | |
5e3dd157 KV |
1850 | u32 unused_buffer; |
1851 | unsigned int unused_nbytes; | |
1852 | unsigned int unused_id; | |
1853 | ||
5e3dd157 KV |
1854 | ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, |
1855 | &unused_nbytes, &unused_id); | |
1856 | } else { | |
1857 | /* non-zero means we did not time out */ | |
1858 | ret = 0; | |
1859 | } | |
1860 | ||
1861 | err_resp: | |
1862 | if (resp) { | |
1863 | u32 unused_buffer; | |
1864 | ||
1865 | ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); | |
1866 | dma_unmap_single(ar->dev, resp_paddr, | |
1867 | *resp_len, DMA_FROM_DEVICE); | |
1868 | } | |
1869 | err_req: | |
1870 | dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); | |
1871 | ||
1872 | if (ret == 0 && resp_len) { | |
1873 | *resp_len = min(*resp_len, xfer.resp_len); | |
1874 | memcpy(resp, tresp, xfer.resp_len); | |
1875 | } | |
1876 | err_dma: | |
1877 | kfree(treq); | |
1878 | kfree(tresp); | |
1879 | ||
1880 | return ret; | |
1881 | } | |
1882 | ||
5440ce25 | 1883 | static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) |
5e3dd157 | 1884 | { |
5440ce25 | 1885 | struct bmi_xfer *xfer; |
5440ce25 | 1886 | |
765952e4 | 1887 | if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) |
5440ce25 | 1888 | return; |
5e3dd157 | 1889 | |
2374b186 | 1890 | xfer->tx_done = true; |
5e3dd157 KV |
1891 | } |
1892 | ||
5440ce25 | 1893 | static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) |
5e3dd157 | 1894 | { |
7aa7a72a | 1895 | struct ath10k *ar = ce_state->ar; |
5440ce25 | 1896 | struct bmi_xfer *xfer; |
5440ce25 | 1897 | unsigned int nbytes; |
5440ce25 | 1898 | |
24d9ef5e RM |
1899 | if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, |
1900 | &nbytes)) | |
5440ce25 | 1901 | return; |
5e3dd157 | 1902 | |
04ed9dfe MK |
1903 | if (WARN_ON_ONCE(!xfer)) |
1904 | return; | |
1905 | ||
5e3dd157 | 1906 | if (!xfer->wait_for_resp) { |
7aa7a72a | 1907 | ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); |
5e3dd157 KV |
1908 | return; |
1909 | } | |
1910 | ||
1911 | xfer->resp_len = nbytes; | |
2374b186 | 1912 | xfer->rx_done = true; |
5e3dd157 KV |
1913 | } |
1914 | ||
85622cde MK |
1915 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, |
1916 | struct ath10k_ce_pipe *rx_pipe, | |
1917 | struct bmi_xfer *xfer) | |
1918 | { | |
1919 | unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; | |
1920 | ||
1921 | while (time_before_eq(jiffies, timeout)) { | |
1922 | ath10k_pci_bmi_send_done(tx_pipe); | |
1923 | ath10k_pci_bmi_recv_data(rx_pipe); | |
1924 | ||
2374b186 | 1925 | if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) |
85622cde MK |
1926 | return 0; |
1927 | ||
1928 | schedule(); | |
1929 | } | |
5e3dd157 | 1930 | |
85622cde MK |
1931 | return -ETIMEDOUT; |
1932 | } | |
5e3dd157 KV |
1933 | |
1934 | /* | |
1935 | * Send an interrupt to the device to wake up the Target CPU | |
1936 | * so it has an opportunity to notice any changed state. | |
1937 | */ | |
1938 | static int ath10k_pci_wake_target_cpu(struct ath10k *ar) | |
1939 | { | |
9e264945 | 1940 | u32 addr, val; |
5e3dd157 | 1941 | |
9e264945 MK |
1942 | addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; |
1943 | val = ath10k_pci_read32(ar, addr); | |
1944 | val |= CORE_CTRL_CPU_INTR_MASK; | |
1945 | ath10k_pci_write32(ar, addr, val); | |
5e3dd157 | 1946 | |
1d2b48d6 | 1947 | return 0; |
5e3dd157 KV |
1948 | } |
1949 | ||
d63955b3 MK |
1950 | static int ath10k_pci_get_num_banks(struct ath10k *ar) |
1951 | { | |
1952 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1953 | ||
1954 | switch (ar_pci->pdev->device) { | |
1955 | case QCA988X_2_0_DEVICE_ID: | |
8bd47021 | 1956 | case QCA99X0_2_0_DEVICE_ID: |
e565c312 | 1957 | case QCA9888_2_0_DEVICE_ID: |
651b4cdc | 1958 | case QCA9984_1_0_DEVICE_ID: |
6fd3dd71 | 1959 | case QCA9887_1_0_DEVICE_ID: |
d63955b3 | 1960 | return 1; |
36582e5d | 1961 | case QCA6164_2_1_DEVICE_ID: |
d63955b3 MK |
1962 | case QCA6174_2_1_DEVICE_ID: |
1963 | switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { | |
1964 | case QCA6174_HW_1_0_CHIP_ID_REV: | |
1965 | case QCA6174_HW_1_1_CHIP_ID_REV: | |
11a002ef MK |
1966 | case QCA6174_HW_2_1_CHIP_ID_REV: |
1967 | case QCA6174_HW_2_2_CHIP_ID_REV: | |
d63955b3 MK |
1968 | return 3; |
1969 | case QCA6174_HW_1_3_CHIP_ID_REV: | |
1970 | return 2; | |
d63955b3 MK |
1971 | case QCA6174_HW_3_0_CHIP_ID_REV: |
1972 | case QCA6174_HW_3_1_CHIP_ID_REV: | |
1973 | case QCA6174_HW_3_2_CHIP_ID_REV: | |
1974 | return 9; | |
1975 | } | |
1976 | break; | |
a226b519 BM |
1977 | case QCA9377_1_0_DEVICE_ID: |
1978 | return 2; | |
d63955b3 MK |
1979 | } |
1980 | ||
1981 | ath10k_warn(ar, "unknown number of banks, assuming 1\n"); | |
1982 | return 1; | |
1983 | } | |
1984 | ||
4ddb3299 RM |
1985 | static int ath10k_bus_get_num_banks(struct ath10k *ar) |
1986 | { | |
1987 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1988 | ||
1989 | return ar_pci->bus_ops->get_num_banks(ar); | |
1990 | } | |
1991 | ||
f52f5171 | 1992 | int ath10k_pci_init_config(struct ath10k *ar) |
5e3dd157 KV |
1993 | { |
1994 | u32 interconnect_targ_addr; | |
1995 | u32 pcie_state_targ_addr = 0; | |
1996 | u32 pipe_cfg_targ_addr = 0; | |
1997 | u32 svc_to_pipe_map = 0; | |
1998 | u32 pcie_config_flags = 0; | |
1999 | u32 ealloc_value; | |
2000 | u32 ealloc_targ_addr; | |
2001 | u32 flag2_value; | |
2002 | u32 flag2_targ_addr; | |
2003 | int ret = 0; | |
2004 | ||
2005 | /* Download to Target the CE Config and the service-to-CE map */ | |
2006 | interconnect_targ_addr = | |
2007 | host_interest_item_address(HI_ITEM(hi_interconnect_state)); | |
2008 | ||
2009 | /* Supply Target-side CE configuration */ | |
9e264945 MK |
2010 | ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, |
2011 | &pcie_state_targ_addr); | |
5e3dd157 | 2012 | if (ret != 0) { |
7aa7a72a | 2013 | ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); |
5e3dd157 KV |
2014 | return ret; |
2015 | } | |
2016 | ||
2017 | if (pcie_state_targ_addr == 0) { | |
2018 | ret = -EIO; | |
7aa7a72a | 2019 | ath10k_err(ar, "Invalid pcie state addr\n"); |
5e3dd157 KV |
2020 | return ret; |
2021 | } | |
2022 | ||
9e264945 | 2023 | ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + |
5e3dd157 | 2024 | offsetof(struct pcie_state, |
9e264945 MK |
2025 | pipe_cfg_addr)), |
2026 | &pipe_cfg_targ_addr); | |
5e3dd157 | 2027 | if (ret != 0) { |
7aa7a72a | 2028 | ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); |
5e3dd157 KV |
2029 | return ret; |
2030 | } | |
2031 | ||
2032 | if (pipe_cfg_targ_addr == 0) { | |
2033 | ret = -EIO; | |
7aa7a72a | 2034 | ath10k_err(ar, "Invalid pipe cfg addr\n"); |
5e3dd157 KV |
2035 | return ret; |
2036 | } | |
2037 | ||
2038 | ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, | |
5b07e07f | 2039 | target_ce_config_wlan, |
050af069 VT |
2040 | sizeof(struct ce_pipe_config) * |
2041 | NUM_TARGET_CE_CONFIG_WLAN); | |
5e3dd157 KV |
2042 | |
2043 | if (ret != 0) { | |
7aa7a72a | 2044 | ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); |
5e3dd157 KV |
2045 | return ret; |
2046 | } | |
2047 | ||
9e264945 | 2048 | ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + |
5e3dd157 | 2049 | offsetof(struct pcie_state, |
9e264945 MK |
2050 | svc_to_pipe_map)), |
2051 | &svc_to_pipe_map); | |
5e3dd157 | 2052 | if (ret != 0) { |
7aa7a72a | 2053 | ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); |
5e3dd157 KV |
2054 | return ret; |
2055 | } | |
2056 | ||
2057 | if (svc_to_pipe_map == 0) { | |
2058 | ret = -EIO; | |
7aa7a72a | 2059 | ath10k_err(ar, "Invalid svc_to_pipe map\n"); |
5e3dd157 KV |
2060 | return ret; |
2061 | } | |
2062 | ||
2063 | ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, | |
5b07e07f KV |
2064 | target_service_to_ce_map_wlan, |
2065 | sizeof(target_service_to_ce_map_wlan)); | |
5e3dd157 | 2066 | if (ret != 0) { |
7aa7a72a | 2067 | ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); |
5e3dd157 KV |
2068 | return ret; |
2069 | } | |
2070 | ||
9e264945 | 2071 | ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + |
5e3dd157 | 2072 | offsetof(struct pcie_state, |
9e264945 MK |
2073 | config_flags)), |
2074 | &pcie_config_flags); | |
5e3dd157 | 2075 | if (ret != 0) { |
7aa7a72a | 2076 | ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); |
5e3dd157 KV |
2077 | return ret; |
2078 | } | |
2079 | ||
2080 | pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; | |
2081 | ||
9e264945 MK |
2082 | ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + |
2083 | offsetof(struct pcie_state, | |
2084 | config_flags)), | |
2085 | pcie_config_flags); | |
5e3dd157 | 2086 | if (ret != 0) { |
7aa7a72a | 2087 | ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); |
5e3dd157 KV |
2088 | return ret; |
2089 | } | |
2090 | ||
2091 | /* configure early allocation */ | |
2092 | ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); | |
2093 | ||
9e264945 | 2094 | ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); |
5e3dd157 | 2095 | if (ret != 0) { |
7aa7a72a | 2096 | ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); |
5e3dd157 KV |
2097 | return ret; |
2098 | } | |
2099 | ||
2100 | /* first bank is switched to IRAM */ | |
2101 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & | |
2102 | HI_EARLY_ALLOC_MAGIC_MASK); | |
4ddb3299 | 2103 | ealloc_value |= ((ath10k_bus_get_num_banks(ar) << |
d63955b3 | 2104 | HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & |
5e3dd157 KV |
2105 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); |
2106 | ||
9e264945 | 2107 | ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); |
5e3dd157 | 2108 | if (ret != 0) { |
7aa7a72a | 2109 | ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); |
5e3dd157 KV |
2110 | return ret; |
2111 | } | |
2112 | ||
2113 | /* Tell Target to proceed with initialization */ | |
2114 | flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); | |
2115 | ||
9e264945 | 2116 | ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); |
5e3dd157 | 2117 | if (ret != 0) { |
7aa7a72a | 2118 | ath10k_err(ar, "Failed to get option val: %d\n", ret); |
5e3dd157 KV |
2119 | return ret; |
2120 | } | |
2121 | ||
2122 | flag2_value |= HI_OPTION_EARLY_CFG_DONE; | |
2123 | ||
9e264945 | 2124 | ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); |
5e3dd157 | 2125 | if (ret != 0) { |
7aa7a72a | 2126 | ath10k_err(ar, "Failed to set option val: %d\n", ret); |
5e3dd157 KV |
2127 | return ret; |
2128 | } | |
2129 | ||
2130 | return 0; | |
2131 | } | |
2132 | ||
2727a743 RH |
2133 | static void ath10k_pci_override_ce_config(struct ath10k *ar) |
2134 | { | |
2135 | struct ce_attr *attr; | |
2136 | struct ce_pipe_config *config; | |
2137 | ||
2138 | /* For QCA6174 we're overriding the Copy Engine 5 configuration, | |
2139 | * since it is currently used for other feature. | |
2140 | */ | |
2141 | ||
2142 | /* Override Host's Copy Engine 5 configuration */ | |
2143 | attr = &host_ce_config_wlan[5]; | |
2144 | attr->src_sz_max = 0; | |
2145 | attr->dest_nentries = 0; | |
2146 | ||
2147 | /* Override Target firmware's Copy Engine configuration */ | |
2148 | config = &target_ce_config_wlan[5]; | |
2149 | config->pipedir = __cpu_to_le32(PIPEDIR_OUT); | |
2150 | config->nbytes_max = __cpu_to_le32(2048); | |
2151 | ||
2152 | /* Map from service/endpoint to Copy Engine */ | |
2153 | target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); | |
2154 | } | |
2155 | ||
f52f5171 | 2156 | int ath10k_pci_alloc_pipes(struct ath10k *ar) |
25d0dbcb | 2157 | { |
84cbf3a7 MK |
2158 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2159 | struct ath10k_pci_pipe *pipe; | |
25d0dbcb MK |
2160 | int i, ret; |
2161 | ||
2162 | for (i = 0; i < CE_COUNT; i++) { | |
84cbf3a7 MK |
2163 | pipe = &ar_pci->pipe_info[i]; |
2164 | pipe->ce_hdl = &ar_pci->ce_states[i]; | |
2165 | pipe->pipe_num = i; | |
2166 | pipe->hif_ce_state = ar; | |
2167 | ||
9d9bdbb0 | 2168 | ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); |
25d0dbcb | 2169 | if (ret) { |
7aa7a72a | 2170 | ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", |
25d0dbcb MK |
2171 | i, ret); |
2172 | return ret; | |
2173 | } | |
84cbf3a7 MK |
2174 | |
2175 | /* Last CE is Diagnostic Window */ | |
050af069 | 2176 | if (i == CE_DIAG_PIPE) { |
84cbf3a7 MK |
2177 | ar_pci->ce_diag = pipe->ce_hdl; |
2178 | continue; | |
2179 | } | |
2180 | ||
2181 | pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); | |
25d0dbcb MK |
2182 | } |
2183 | ||
2184 | return 0; | |
2185 | } | |
2186 | ||
f52f5171 | 2187 | void ath10k_pci_free_pipes(struct ath10k *ar) |
25d0dbcb MK |
2188 | { |
2189 | int i; | |
5e3dd157 | 2190 | |
25d0dbcb MK |
2191 | for (i = 0; i < CE_COUNT; i++) |
2192 | ath10k_ce_free_pipe(ar, i); | |
2193 | } | |
5e3dd157 | 2194 | |
f52f5171 | 2195 | int ath10k_pci_init_pipes(struct ath10k *ar) |
5e3dd157 | 2196 | { |
84cbf3a7 | 2197 | int i, ret; |
5e3dd157 | 2198 | |
84cbf3a7 MK |
2199 | for (i = 0; i < CE_COUNT; i++) { |
2200 | ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); | |
25d0dbcb | 2201 | if (ret) { |
7aa7a72a | 2202 | ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", |
84cbf3a7 | 2203 | i, ret); |
25d0dbcb | 2204 | return ret; |
5e3dd157 | 2205 | } |
5e3dd157 KV |
2206 | } |
2207 | ||
5e3dd157 KV |
2208 | return 0; |
2209 | } | |
2210 | ||
5c771e74 | 2211 | static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) |
5e3dd157 | 2212 | { |
5c771e74 MK |
2213 | return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & |
2214 | FW_IND_EVENT_PENDING; | |
2215 | } | |
5e3dd157 | 2216 | |
5c771e74 MK |
2217 | static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) |
2218 | { | |
2219 | u32 val; | |
5e3dd157 | 2220 | |
5c771e74 MK |
2221 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); |
2222 | val &= ~FW_IND_EVENT_PENDING; | |
2223 | ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); | |
5e3dd157 KV |
2224 | } |
2225 | ||
fb7caaba MSS |
2226 | static bool ath10k_pci_has_device_gone(struct ath10k *ar) |
2227 | { | |
2228 | u32 val; | |
2229 | ||
2230 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); | |
2231 | return (val == 0xffffffff); | |
2232 | } | |
2233 | ||
de01357b MK |
2234 | /* this function effectively clears target memory controller assert line */ |
2235 | static void ath10k_pci_warm_reset_si0(struct ath10k *ar) | |
2236 | { | |
2237 | u32 val; | |
2238 | ||
2239 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2240 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, | |
2241 | val | SOC_RESET_CONTROL_SI0_RST_MASK); | |
2242 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2243 | ||
2244 | msleep(10); | |
2245 | ||
2246 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2247 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, | |
2248 | val & ~SOC_RESET_CONTROL_SI0_RST_MASK); | |
2249 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2250 | ||
2251 | msleep(10); | |
2252 | } | |
2253 | ||
61c1648b | 2254 | static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) |
fc36e3ff | 2255 | { |
fc36e3ff MK |
2256 | u32 val; |
2257 | ||
b39712ce | 2258 | ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); |
fc36e3ff | 2259 | |
fc36e3ff | 2260 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
61c1648b MK |
2261 | SOC_RESET_CONTROL_ADDRESS); |
2262 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, | |
2263 | val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); | |
2264 | } | |
2265 | ||
2266 | static void ath10k_pci_warm_reset_ce(struct ath10k *ar) | |
2267 | { | |
2268 | u32 val; | |
fc36e3ff | 2269 | |
fc36e3ff MK |
2270 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
2271 | SOC_RESET_CONTROL_ADDRESS); | |
61c1648b | 2272 | |
fc36e3ff MK |
2273 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, |
2274 | val | SOC_RESET_CONTROL_CE_RST_MASK); | |
fc36e3ff | 2275 | msleep(10); |
fc36e3ff MK |
2276 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, |
2277 | val & ~SOC_RESET_CONTROL_CE_RST_MASK); | |
61c1648b MK |
2278 | } |
2279 | ||
2280 | static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) | |
2281 | { | |
2282 | u32 val; | |
2283 | ||
fc36e3ff | 2284 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
61c1648b MK |
2285 | SOC_LF_TIMER_CONTROL0_ADDRESS); |
2286 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + | |
2287 | SOC_LF_TIMER_CONTROL0_ADDRESS, | |
2288 | val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); | |
2289 | } | |
fc36e3ff | 2290 | |
61c1648b MK |
2291 | static int ath10k_pci_warm_reset(struct ath10k *ar) |
2292 | { | |
2293 | int ret; | |
2294 | ||
2295 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); | |
de01357b | 2296 | |
61c1648b MK |
2297 | spin_lock_bh(&ar->data_lock); |
2298 | ar->stats.fw_warm_reset_counter++; | |
2299 | spin_unlock_bh(&ar->data_lock); | |
fc36e3ff | 2300 | |
61c1648b | 2301 | ath10k_pci_irq_disable(ar); |
fc36e3ff | 2302 | |
61c1648b MK |
2303 | /* Make sure the target CPU is not doing anything dangerous, e.g. if it |
2304 | * were to access copy engine while host performs copy engine reset | |
2305 | * then it is possible for the device to confuse pci-e controller to | |
2306 | * the point of bringing host system to a complete stop (i.e. hang). | |
2307 | */ | |
2308 | ath10k_pci_warm_reset_si0(ar); | |
2309 | ath10k_pci_warm_reset_cpu(ar); | |
2310 | ath10k_pci_init_pipes(ar); | |
2311 | ath10k_pci_wait_for_target_init(ar); | |
fc36e3ff | 2312 | |
61c1648b MK |
2313 | ath10k_pci_warm_reset_clear_lf(ar); |
2314 | ath10k_pci_warm_reset_ce(ar); | |
2315 | ath10k_pci_warm_reset_cpu(ar); | |
2316 | ath10k_pci_init_pipes(ar); | |
fc36e3ff | 2317 | |
61c1648b MK |
2318 | ret = ath10k_pci_wait_for_target_init(ar); |
2319 | if (ret) { | |
2320 | ath10k_warn(ar, "failed to wait for target init: %d\n", ret); | |
2321 | return ret; | |
2322 | } | |
fc36e3ff | 2323 | |
7aa7a72a | 2324 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); |
fc36e3ff | 2325 | |
c0c378f9 | 2326 | return 0; |
fc36e3ff MK |
2327 | } |
2328 | ||
0fc7e270 VT |
2329 | static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) |
2330 | { | |
2331 | ath10k_pci_irq_disable(ar); | |
2332 | return ath10k_pci_qca99x0_chip_reset(ar); | |
2333 | } | |
2334 | ||
6e4202c3 VT |
2335 | static int ath10k_pci_safe_chip_reset(struct ath10k *ar) |
2336 | { | |
0fc7e270 VT |
2337 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2338 | ||
2339 | if (!ar_pci->pci_soft_reset) | |
6e4202c3 | 2340 | return -ENOTSUPP; |
0fc7e270 VT |
2341 | |
2342 | return ar_pci->pci_soft_reset(ar); | |
6e4202c3 VT |
2343 | } |
2344 | ||
d63955b3 | 2345 | static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) |
0bc14d06 MK |
2346 | { |
2347 | int i, ret; | |
2348 | u32 val; | |
2349 | ||
d63955b3 | 2350 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); |
0bc14d06 MK |
2351 | |
2352 | /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. | |
2353 | * It is thus preferred to use warm reset which is safer but may not be | |
2354 | * able to recover the device from all possible fail scenarios. | |
2355 | * | |
2356 | * Warm reset doesn't always work on first try so attempt it a few | |
2357 | * times before giving up. | |
2358 | */ | |
2359 | for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { | |
2360 | ret = ath10k_pci_warm_reset(ar); | |
2361 | if (ret) { | |
2362 | ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", | |
2363 | i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, | |
2364 | ret); | |
2365 | continue; | |
2366 | } | |
2367 | ||
2368 | /* FIXME: Sometimes copy engine doesn't recover after warm | |
2369 | * reset. In most cases this needs cold reset. In some of these | |
2370 | * cases the device is in such a state that a cold reset may | |
2371 | * lock up the host. | |
2372 | * | |
2373 | * Reading any host interest register via copy engine is | |
2374 | * sufficient to verify if device is capable of booting | |
2375 | * firmware blob. | |
2376 | */ | |
2377 | ret = ath10k_pci_init_pipes(ar); | |
2378 | if (ret) { | |
2379 | ath10k_warn(ar, "failed to init copy engine: %d\n", | |
2380 | ret); | |
2381 | continue; | |
2382 | } | |
2383 | ||
2384 | ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, | |
2385 | &val); | |
2386 | if (ret) { | |
2387 | ath10k_warn(ar, "failed to poke copy engine: %d\n", | |
2388 | ret); | |
2389 | continue; | |
2390 | } | |
2391 | ||
2392 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); | |
2393 | return 0; | |
2394 | } | |
2395 | ||
2396 | if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { | |
2397 | ath10k_warn(ar, "refusing cold reset as requested\n"); | |
2398 | return -EPERM; | |
2399 | } | |
2400 | ||
2401 | ret = ath10k_pci_cold_reset(ar); | |
2402 | if (ret) { | |
2403 | ath10k_warn(ar, "failed to cold reset: %d\n", ret); | |
2404 | return ret; | |
2405 | } | |
2406 | ||
2407 | ret = ath10k_pci_wait_for_target_init(ar); | |
2408 | if (ret) { | |
2409 | ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", | |
2410 | ret); | |
2411 | return ret; | |
2412 | } | |
2413 | ||
d63955b3 MK |
2414 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); |
2415 | ||
2416 | return 0; | |
2417 | } | |
2418 | ||
2419 | static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) | |
2420 | { | |
2421 | int ret; | |
2422 | ||
2423 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); | |
2424 | ||
2425 | /* FIXME: QCA6174 requires cold + warm reset to work. */ | |
2426 | ||
2427 | ret = ath10k_pci_cold_reset(ar); | |
2428 | if (ret) { | |
2429 | ath10k_warn(ar, "failed to cold reset: %d\n", ret); | |
2430 | return ret; | |
2431 | } | |
2432 | ||
2433 | ret = ath10k_pci_wait_for_target_init(ar); | |
2434 | if (ret) { | |
2435 | ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", | |
617b0f4d | 2436 | ret); |
d63955b3 MK |
2437 | return ret; |
2438 | } | |
2439 | ||
2440 | ret = ath10k_pci_warm_reset(ar); | |
2441 | if (ret) { | |
2442 | ath10k_warn(ar, "failed to warm reset: %d\n", ret); | |
2443 | return ret; | |
2444 | } | |
2445 | ||
2446 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); | |
0bc14d06 MK |
2447 | |
2448 | return 0; | |
2449 | } | |
2450 | ||
6e4202c3 VT |
2451 | static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) |
2452 | { | |
2453 | int ret; | |
2454 | ||
2455 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); | |
2456 | ||
2457 | ret = ath10k_pci_cold_reset(ar); | |
2458 | if (ret) { | |
2459 | ath10k_warn(ar, "failed to cold reset: %d\n", ret); | |
2460 | return ret; | |
2461 | } | |
2462 | ||
2463 | ret = ath10k_pci_wait_for_target_init(ar); | |
2464 | if (ret) { | |
2465 | ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", | |
2466 | ret); | |
2467 | return ret; | |
2468 | } | |
2469 | ||
2470 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); | |
2471 | ||
2472 | return 0; | |
2473 | } | |
2474 | ||
d63955b3 MK |
2475 | static int ath10k_pci_chip_reset(struct ath10k *ar) |
2476 | { | |
0fc7e270 VT |
2477 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2478 | ||
2479 | if (WARN_ON(!ar_pci->pci_hard_reset)) | |
d63955b3 | 2480 | return -ENOTSUPP; |
0fc7e270 VT |
2481 | |
2482 | return ar_pci->pci_hard_reset(ar); | |
d63955b3 MK |
2483 | } |
2484 | ||
0bc14d06 | 2485 | static int ath10k_pci_hif_power_up(struct ath10k *ar) |
8c5c5368 | 2486 | { |
76d870ed | 2487 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
8c5c5368 MK |
2488 | int ret; |
2489 | ||
0bc14d06 MK |
2490 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); |
2491 | ||
76d870ed JD |
2492 | pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, |
2493 | &ar_pci->link_ctl); | |
2494 | pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, | |
2495 | ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); | |
2496 | ||
8c5c5368 MK |
2497 | /* |
2498 | * Bring the target up cleanly. | |
2499 | * | |
2500 | * The target may be in an undefined state with an AUX-powered Target | |
2501 | * and a Host in WoW mode. If the Host crashes, loses power, or is | |
2502 | * restarted (without unloading the driver) then the Target is left | |
2503 | * (aux) powered and running. On a subsequent driver load, the Target | |
2504 | * is in an unexpected state. We try to catch that here in order to | |
2505 | * reset the Target and retry the probe. | |
2506 | */ | |
0bc14d06 | 2507 | ret = ath10k_pci_chip_reset(ar); |
5b2589fc | 2508 | if (ret) { |
a2fa8800 MK |
2509 | if (ath10k_pci_has_fw_crashed(ar)) { |
2510 | ath10k_warn(ar, "firmware crashed during chip reset\n"); | |
2511 | ath10k_pci_fw_crashed_clear(ar); | |
2512 | ath10k_pci_fw_crashed_dump(ar); | |
2513 | } | |
2514 | ||
0bc14d06 | 2515 | ath10k_err(ar, "failed to reset chip: %d\n", ret); |
707b1bbd | 2516 | goto err_sleep; |
5b2589fc | 2517 | } |
8c5c5368 | 2518 | |
84cbf3a7 | 2519 | ret = ath10k_pci_init_pipes(ar); |
1d2b48d6 | 2520 | if (ret) { |
7aa7a72a | 2521 | ath10k_err(ar, "failed to initialize CE: %d\n", ret); |
707b1bbd | 2522 | goto err_sleep; |
ab977bd0 MK |
2523 | } |
2524 | ||
98563d5a MK |
2525 | ret = ath10k_pci_init_config(ar); |
2526 | if (ret) { | |
7aa7a72a | 2527 | ath10k_err(ar, "failed to setup init config: %d\n", ret); |
5c771e74 | 2528 | goto err_ce; |
98563d5a | 2529 | } |
8c5c5368 MK |
2530 | |
2531 | ret = ath10k_pci_wake_target_cpu(ar); | |
2532 | if (ret) { | |
7aa7a72a | 2533 | ath10k_err(ar, "could not wake up target CPU: %d\n", ret); |
5c771e74 | 2534 | goto err_ce; |
8c5c5368 MK |
2535 | } |
2536 | ||
2537 | return 0; | |
2538 | ||
2539 | err_ce: | |
2540 | ath10k_pci_ce_deinit(ar); | |
61c95cea | 2541 | |
707b1bbd | 2542 | err_sleep: |
61c95cea MK |
2543 | return ret; |
2544 | } | |
2545 | ||
f52f5171 | 2546 | void ath10k_pci_hif_power_down(struct ath10k *ar) |
8c5c5368 | 2547 | { |
7aa7a72a | 2548 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); |
50f87a67 | 2549 | |
c011b281 MK |
2550 | /* Currently hif_power_up performs effectively a reset and hif_stop |
2551 | * resets the chip as well so there's no point in resetting here. | |
2552 | */ | |
8c5c5368 MK |
2553 | } |
2554 | ||
8cd13cad MK |
2555 | #ifdef CONFIG_PM |
2556 | ||
8cd13cad MK |
2557 | static int ath10k_pci_hif_suspend(struct ath10k *ar) |
2558 | { | |
77258d40 MK |
2559 | /* The grace timer can still be counting down and ar->ps_awake be true. |
2560 | * It is known that the device may be asleep after resuming regardless | |
2561 | * of the SoC powersave state before suspending. Hence make sure the | |
2562 | * device is asleep before proceeding. | |
2563 | */ | |
2564 | ath10k_pci_sleep_sync(ar); | |
320e14b8 | 2565 | |
8cd13cad MK |
2566 | return 0; |
2567 | } | |
2568 | ||
2569 | static int ath10k_pci_hif_resume(struct ath10k *ar) | |
2570 | { | |
2571 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2572 | struct pci_dev *pdev = ar_pci->pdev; | |
2573 | u32 val; | |
1aaf8efb AK |
2574 | int ret = 0; |
2575 | ||
d9d6a5ae RM |
2576 | ret = ath10k_pci_force_wake(ar); |
2577 | if (ret) { | |
2578 | ath10k_err(ar, "failed to wake up target: %d\n", ret); | |
2579 | return ret; | |
1aaf8efb | 2580 | } |
8cd13cad | 2581 | |
9ff4be96 MK |
2582 | /* Suspend/Resume resets the PCI configuration space, so we have to |
2583 | * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries | |
2584 | * from interfering with C3 CPU state. pci_restore_state won't help | |
2585 | * here since it only restores the first 64 bytes pci config header. | |
2586 | */ | |
2587 | pci_read_config_dword(pdev, 0x40, &val); | |
2588 | if ((val & 0x0000ff00) != 0) | |
2589 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | |
8cd13cad | 2590 | |
1aaf8efb | 2591 | return ret; |
8cd13cad MK |
2592 | } |
2593 | #endif | |
2594 | ||
6847f967 SE |
2595 | static bool ath10k_pci_validate_cal(void *data, size_t size) |
2596 | { | |
2597 | __le16 *cal_words = data; | |
2598 | u16 checksum = 0; | |
2599 | size_t i; | |
2600 | ||
2601 | if (size % 2 != 0) | |
2602 | return false; | |
2603 | ||
2604 | for (i = 0; i < size / 2; i++) | |
2605 | checksum ^= le16_to_cpu(cal_words[i]); | |
2606 | ||
2607 | return checksum == 0xffff; | |
2608 | } | |
2609 | ||
2610 | static void ath10k_pci_enable_eeprom(struct ath10k *ar) | |
2611 | { | |
2612 | /* Enable SI clock */ | |
2613 | ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); | |
2614 | ||
2615 | /* Configure GPIOs for I2C operation */ | |
2616 | ath10k_pci_write32(ar, | |
2617 | GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + | |
2618 | 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, | |
2619 | SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, | |
2620 | GPIO_PIN0_CONFIG) | | |
2621 | SM(1, GPIO_PIN0_PAD_PULL)); | |
2622 | ||
2623 | ath10k_pci_write32(ar, | |
2624 | GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + | |
2625 | 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, | |
2626 | SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | | |
2627 | SM(1, GPIO_PIN0_PAD_PULL)); | |
2628 | ||
2629 | ath10k_pci_write32(ar, | |
2630 | GPIO_BASE_ADDRESS + | |
2631 | QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, | |
2632 | 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); | |
2633 | ||
2634 | /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ | |
2635 | ath10k_pci_write32(ar, | |
2636 | SI_BASE_ADDRESS + SI_CONFIG_OFFSET, | |
2637 | SM(1, SI_CONFIG_ERR_INT) | | |
2638 | SM(1, SI_CONFIG_BIDIR_OD_DATA) | | |
2639 | SM(1, SI_CONFIG_I2C) | | |
2640 | SM(1, SI_CONFIG_POS_SAMPLE) | | |
2641 | SM(1, SI_CONFIG_INACTIVE_DATA) | | |
2642 | SM(1, SI_CONFIG_INACTIVE_CLK) | | |
2643 | SM(8, SI_CONFIG_DIVIDER)); | |
2644 | } | |
2645 | ||
2646 | static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) | |
2647 | { | |
2648 | u32 reg; | |
2649 | int wait_limit; | |
2650 | ||
2651 | /* set device select byte and for the read operation */ | |
2652 | reg = QCA9887_EEPROM_SELECT_READ | | |
2653 | SM(addr, QCA9887_EEPROM_ADDR_LO) | | |
2654 | SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); | |
2655 | ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); | |
2656 | ||
2657 | /* write transmit data, transfer length, and START bit */ | |
2658 | ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, | |
2659 | SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | | |
2660 | SM(4, SI_CS_TX_CNT)); | |
2661 | ||
2662 | /* wait max 1 sec */ | |
2663 | wait_limit = 100000; | |
2664 | ||
2665 | /* wait for SI_CS_DONE_INT */ | |
2666 | do { | |
2667 | reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); | |
2668 | if (MS(reg, SI_CS_DONE_INT)) | |
2669 | break; | |
2670 | ||
2671 | wait_limit--; | |
2672 | udelay(10); | |
2673 | } while (wait_limit > 0); | |
2674 | ||
2675 | if (!MS(reg, SI_CS_DONE_INT)) { | |
2676 | ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", | |
2677 | addr); | |
2678 | return -ETIMEDOUT; | |
2679 | } | |
2680 | ||
2681 | /* clear SI_CS_DONE_INT */ | |
2682 | ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); | |
2683 | ||
2684 | if (MS(reg, SI_CS_DONE_ERR)) { | |
2685 | ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); | |
2686 | return -EIO; | |
2687 | } | |
2688 | ||
2689 | /* extract receive data */ | |
2690 | reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); | |
2691 | *out = reg; | |
2692 | ||
2693 | return 0; | |
2694 | } | |
2695 | ||
2696 | static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, | |
2697 | size_t *data_len) | |
2698 | { | |
2699 | u8 *caldata = NULL; | |
2700 | size_t calsize, i; | |
2701 | int ret; | |
2702 | ||
2703 | if (!QCA_REV_9887(ar)) | |
2704 | return -EOPNOTSUPP; | |
2705 | ||
2706 | calsize = ar->hw_params.cal_data_len; | |
2707 | caldata = kmalloc(calsize, GFP_KERNEL); | |
2708 | if (!caldata) | |
2709 | return -ENOMEM; | |
2710 | ||
2711 | ath10k_pci_enable_eeprom(ar); | |
2712 | ||
2713 | for (i = 0; i < calsize; i++) { | |
2714 | ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); | |
2715 | if (ret) | |
2716 | goto err_free; | |
2717 | } | |
2718 | ||
2719 | if (!ath10k_pci_validate_cal(caldata, calsize)) | |
2720 | goto err_free; | |
2721 | ||
2722 | *data = caldata; | |
2723 | *data_len = calsize; | |
2724 | ||
2725 | return 0; | |
2726 | ||
2727 | err_free: | |
2728 | kfree(data); | |
2729 | ||
2730 | return -EINVAL; | |
2731 | } | |
2732 | ||
5e3dd157 | 2733 | static const struct ath10k_hif_ops ath10k_pci_hif_ops = { |
726346fc | 2734 | .tx_sg = ath10k_pci_hif_tx_sg, |
eef25405 | 2735 | .diag_read = ath10k_pci_hif_diag_read, |
9f65ad25 | 2736 | .diag_write = ath10k_pci_diag_write_mem, |
5e3dd157 KV |
2737 | .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, |
2738 | .start = ath10k_pci_hif_start, | |
2739 | .stop = ath10k_pci_hif_stop, | |
2740 | .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, | |
2741 | .get_default_pipe = ath10k_pci_hif_get_default_pipe, | |
2742 | .send_complete_check = ath10k_pci_hif_send_complete_check, | |
5e3dd157 | 2743 | .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, |
8c5c5368 MK |
2744 | .power_up = ath10k_pci_hif_power_up, |
2745 | .power_down = ath10k_pci_hif_power_down, | |
077a3804 YL |
2746 | .read32 = ath10k_pci_read32, |
2747 | .write32 = ath10k_pci_write32, | |
8cd13cad MK |
2748 | #ifdef CONFIG_PM |
2749 | .suspend = ath10k_pci_hif_suspend, | |
2750 | .resume = ath10k_pci_hif_resume, | |
2751 | #endif | |
6847f967 | 2752 | .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, |
5e3dd157 KV |
2753 | }; |
2754 | ||
5e3dd157 KV |
2755 | /* |
2756 | * Top-level interrupt handler for all PCI interrupts from a Target. | |
2757 | * When a block of MSI interrupts is allocated, this top-level handler | |
2758 | * is not used; instead, we directly call the correct sub-handler. | |
2759 | */ | |
2760 | static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) | |
2761 | { | |
2762 | struct ath10k *ar = arg; | |
2763 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1aaf8efb AK |
2764 | int ret; |
2765 | ||
fb7caaba MSS |
2766 | if (ath10k_pci_has_device_gone(ar)) |
2767 | return IRQ_NONE; | |
2768 | ||
d9d6a5ae RM |
2769 | ret = ath10k_pci_force_wake(ar); |
2770 | if (ret) { | |
2771 | ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); | |
2772 | return IRQ_NONE; | |
1aaf8efb | 2773 | } |
5e3dd157 | 2774 | |
cfe9011a | 2775 | if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) { |
e539887b MK |
2776 | if (!ath10k_pci_irq_pending(ar)) |
2777 | return IRQ_NONE; | |
2778 | ||
2685218b | 2779 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
5e3dd157 KV |
2780 | } |
2781 | ||
2782 | tasklet_schedule(&ar_pci->intr_tq); | |
2783 | ||
2784 | return IRQ_HANDLED; | |
2785 | } | |
2786 | ||
5c771e74 | 2787 | static void ath10k_pci_tasklet(unsigned long data) |
ab977bd0 MK |
2788 | { |
2789 | struct ath10k *ar = (struct ath10k *)data; | |
5c771e74 | 2790 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
ab977bd0 | 2791 | |
5c771e74 | 2792 | if (ath10k_pci_has_fw_crashed(ar)) { |
6f3b7ff4 | 2793 | ath10k_pci_irq_disable(ar); |
5c771e74 | 2794 | ath10k_pci_fw_crashed_clear(ar); |
0e9848c0 | 2795 | ath10k_pci_fw_crashed_dump(ar); |
ab977bd0 MK |
2796 | return; |
2797 | } | |
2798 | ||
5e3dd157 KV |
2799 | ath10k_ce_per_engine_service_any(ar); |
2800 | ||
2685218b | 2801 | /* Re-enable legacy irq that was disabled in the irq handler */ |
cfe9011a | 2802 | if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) |
2685218b | 2803 | ath10k_pci_enable_legacy_irq(ar); |
5e3dd157 KV |
2804 | } |
2805 | ||
fc15ca13 | 2806 | static int ath10k_pci_request_irq_msi(struct ath10k *ar) |
5e3dd157 KV |
2807 | { |
2808 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2809 | int ret; | |
2810 | ||
5e3dd157 KV |
2811 | ret = request_irq(ar_pci->pdev->irq, |
2812 | ath10k_pci_interrupt_handler, | |
2813 | IRQF_SHARED, "ath10k_pci", ar); | |
fc15ca13 | 2814 | if (ret) { |
7aa7a72a | 2815 | ath10k_warn(ar, "failed to request MSI irq %d: %d\n", |
fc15ca13 | 2816 | ar_pci->pdev->irq, ret); |
5e3dd157 KV |
2817 | return ret; |
2818 | } | |
2819 | ||
5e3dd157 KV |
2820 | return 0; |
2821 | } | |
2822 | ||
fc15ca13 | 2823 | static int ath10k_pci_request_irq_legacy(struct ath10k *ar) |
5e3dd157 KV |
2824 | { |
2825 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2826 | int ret; | |
2827 | ||
2828 | ret = request_irq(ar_pci->pdev->irq, | |
2829 | ath10k_pci_interrupt_handler, | |
2830 | IRQF_SHARED, "ath10k_pci", ar); | |
f3782744 | 2831 | if (ret) { |
7aa7a72a | 2832 | ath10k_warn(ar, "failed to request legacy irq %d: %d\n", |
fc15ca13 | 2833 | ar_pci->pdev->irq, ret); |
5e3dd157 | 2834 | return ret; |
f3782744 | 2835 | } |
5e3dd157 | 2836 | |
5e3dd157 KV |
2837 | return 0; |
2838 | } | |
2839 | ||
fc15ca13 MK |
2840 | static int ath10k_pci_request_irq(struct ath10k *ar) |
2841 | { | |
2842 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 2843 | |
cfe9011a RM |
2844 | switch (ar_pci->oper_irq_mode) { |
2845 | case ATH10K_PCI_IRQ_LEGACY: | |
fc15ca13 | 2846 | return ath10k_pci_request_irq_legacy(ar); |
cfe9011a | 2847 | case ATH10K_PCI_IRQ_MSI: |
fc15ca13 | 2848 | return ath10k_pci_request_irq_msi(ar); |
b8402d82 | 2849 | default: |
cfe9011a | 2850 | return -EINVAL; |
fc15ca13 | 2851 | } |
5e3dd157 KV |
2852 | } |
2853 | ||
fc15ca13 MK |
2854 | static void ath10k_pci_free_irq(struct ath10k *ar) |
2855 | { | |
2856 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
fc15ca13 | 2857 | |
cfe9011a | 2858 | free_irq(ar_pci->pdev->irq, ar); |
fc15ca13 MK |
2859 | } |
2860 | ||
f52f5171 | 2861 | void ath10k_pci_init_irq_tasklets(struct ath10k *ar) |
5e3dd157 KV |
2862 | { |
2863 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 2864 | |
fc15ca13 | 2865 | tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); |
fc15ca13 MK |
2866 | } |
2867 | ||
2868 | static int ath10k_pci_init_irq(struct ath10k *ar) | |
2869 | { | |
2870 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2871 | int ret; | |
5e3dd157 | 2872 | |
fc15ca13 | 2873 | ath10k_pci_init_irq_tasklets(ar); |
5e3dd157 | 2874 | |
403d627b | 2875 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) |
7aa7a72a MK |
2876 | ath10k_info(ar, "limiting irq mode to: %d\n", |
2877 | ath10k_pci_irq_mode); | |
5e3dd157 | 2878 | |
fc15ca13 | 2879 | /* Try MSI */ |
cfe9c45b | 2880 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { |
cfe9011a | 2881 | ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; |
cfe9c45b | 2882 | ret = pci_enable_msi(ar_pci->pdev); |
5e3dd157 | 2883 | if (ret == 0) |
cfe9c45b | 2884 | return 0; |
5e3dd157 | 2885 | |
cfe9c45b | 2886 | /* fall-through */ |
5e3dd157 KV |
2887 | } |
2888 | ||
fc15ca13 MK |
2889 | /* Try legacy irq |
2890 | * | |
2891 | * A potential race occurs here: The CORE_BASE write | |
2892 | * depends on target correctly decoding AXI address but | |
2893 | * host won't know when target writes BAR to CORE_CTRL. | |
2894 | * This write might get lost if target has NOT written BAR. | |
2895 | * For now, fix the race by repeating the write in below | |
2896 | * synchronization checking. */ | |
cfe9011a | 2897 | ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; |
5e3dd157 | 2898 | |
fc15ca13 MK |
2899 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
2900 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | |
fc15ca13 MK |
2901 | |
2902 | return 0; | |
5e3dd157 KV |
2903 | } |
2904 | ||
c0c378f9 | 2905 | static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) |
5e3dd157 | 2906 | { |
fc15ca13 MK |
2907 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
2908 | 0); | |
5e3dd157 KV |
2909 | } |
2910 | ||
fc15ca13 | 2911 | static int ath10k_pci_deinit_irq(struct ath10k *ar) |
5e3dd157 KV |
2912 | { |
2913 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 2914 | |
cfe9011a RM |
2915 | switch (ar_pci->oper_irq_mode) { |
2916 | case ATH10K_PCI_IRQ_LEGACY: | |
c0c378f9 | 2917 | ath10k_pci_deinit_irq_legacy(ar); |
b8402d82 | 2918 | break; |
bb8b621a AG |
2919 | default: |
2920 | pci_disable_msi(ar_pci->pdev); | |
b8402d82 | 2921 | break; |
fc15ca13 MK |
2922 | } |
2923 | ||
b8402d82 | 2924 | return 0; |
5e3dd157 KV |
2925 | } |
2926 | ||
f52f5171 | 2927 | int ath10k_pci_wait_for_target_init(struct ath10k *ar) |
5e3dd157 KV |
2928 | { |
2929 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
0399eca8 | 2930 | unsigned long timeout; |
0399eca8 | 2931 | u32 val; |
5e3dd157 | 2932 | |
7aa7a72a | 2933 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); |
5e3dd157 | 2934 | |
0399eca8 KV |
2935 | timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); |
2936 | ||
2937 | do { | |
2938 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); | |
2939 | ||
7aa7a72a MK |
2940 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", |
2941 | val); | |
50f87a67 | 2942 | |
0399eca8 KV |
2943 | /* target should never return this */ |
2944 | if (val == 0xffffffff) | |
2945 | continue; | |
2946 | ||
7710cd2e MK |
2947 | /* the device has crashed so don't bother trying anymore */ |
2948 | if (val & FW_IND_EVENT_PENDING) | |
2949 | break; | |
2950 | ||
0399eca8 KV |
2951 | if (val & FW_IND_INITIALIZED) |
2952 | break; | |
2953 | ||
cfe9011a | 2954 | if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) |
5e3dd157 | 2955 | /* Fix potential race by repeating CORE_BASE writes */ |
a428249d | 2956 | ath10k_pci_enable_legacy_irq(ar); |
0399eca8 | 2957 | |
5e3dd157 | 2958 | mdelay(10); |
0399eca8 | 2959 | } while (time_before(jiffies, timeout)); |
5e3dd157 | 2960 | |
a428249d | 2961 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
7c0f0e3c | 2962 | ath10k_pci_irq_msi_fw_mask(ar); |
a428249d | 2963 | |
6a4f6e1d | 2964 | if (val == 0xffffffff) { |
7aa7a72a | 2965 | ath10k_err(ar, "failed to read device register, device is gone\n"); |
c0c378f9 | 2966 | return -EIO; |
6a4f6e1d MK |
2967 | } |
2968 | ||
7710cd2e | 2969 | if (val & FW_IND_EVENT_PENDING) { |
7aa7a72a | 2970 | ath10k_warn(ar, "device has crashed during init\n"); |
c0c378f9 | 2971 | return -ECOMM; |
7710cd2e MK |
2972 | } |
2973 | ||
6a4f6e1d | 2974 | if (!(val & FW_IND_INITIALIZED)) { |
7aa7a72a | 2975 | ath10k_err(ar, "failed to receive initialized event from target: %08x\n", |
0399eca8 | 2976 | val); |
c0c378f9 | 2977 | return -ETIMEDOUT; |
5e3dd157 KV |
2978 | } |
2979 | ||
7aa7a72a | 2980 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); |
c0c378f9 | 2981 | return 0; |
5e3dd157 KV |
2982 | } |
2983 | ||
fc36e3ff | 2984 | static int ath10k_pci_cold_reset(struct ath10k *ar) |
5e3dd157 | 2985 | { |
5e3dd157 KV |
2986 | u32 val; |
2987 | ||
7aa7a72a | 2988 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); |
5e3dd157 | 2989 | |
f51dbe73 BG |
2990 | spin_lock_bh(&ar->data_lock); |
2991 | ||
2992 | ar->stats.fw_cold_reset_counter++; | |
2993 | ||
2994 | spin_unlock_bh(&ar->data_lock); | |
2995 | ||
5e3dd157 | 2996 | /* Put Target, including PCIe, into RESET. */ |
e479ed43 | 2997 | val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); |
5e3dd157 | 2998 | val |= 1; |
e479ed43 | 2999 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
5e3dd157 | 3000 | |
acd19580 VT |
3001 | /* After writing into SOC_GLOBAL_RESET to put device into |
3002 | * reset and pulling out of reset pcie may not be stable | |
3003 | * for any immediate pcie register access and cause bus error, | |
3004 | * add delay before any pcie access request to fix this issue. | |
3005 | */ | |
3006 | msleep(20); | |
5e3dd157 KV |
3007 | |
3008 | /* Pull Target, including PCIe, out of RESET. */ | |
3009 | val &= ~1; | |
e479ed43 | 3010 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
5e3dd157 | 3011 | |
acd19580 | 3012 | msleep(20); |
5e3dd157 | 3013 | |
7aa7a72a | 3014 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); |
50f87a67 | 3015 | |
5b2589fc | 3016 | return 0; |
5e3dd157 KV |
3017 | } |
3018 | ||
2986e3ef | 3019 | static int ath10k_pci_claim(struct ath10k *ar) |
5e3dd157 | 3020 | { |
2986e3ef MK |
3021 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
3022 | struct pci_dev *pdev = ar_pci->pdev; | |
2986e3ef | 3023 | int ret; |
5e3dd157 KV |
3024 | |
3025 | pci_set_drvdata(pdev, ar); | |
3026 | ||
5e3dd157 KV |
3027 | ret = pci_enable_device(pdev); |
3028 | if (ret) { | |
7aa7a72a | 3029 | ath10k_err(ar, "failed to enable pci device: %d\n", ret); |
2986e3ef | 3030 | return ret; |
5e3dd157 KV |
3031 | } |
3032 | ||
5e3dd157 KV |
3033 | ret = pci_request_region(pdev, BAR_NUM, "ath"); |
3034 | if (ret) { | |
7aa7a72a | 3035 | ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, |
2986e3ef | 3036 | ret); |
5e3dd157 KV |
3037 | goto err_device; |
3038 | } | |
3039 | ||
2986e3ef | 3040 | /* Target expects 32 bit DMA. Enforce it. */ |
5e3dd157 KV |
3041 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3042 | if (ret) { | |
7aa7a72a | 3043 | ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); |
5e3dd157 KV |
3044 | goto err_region; |
3045 | } | |
3046 | ||
3047 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
3048 | if (ret) { | |
7aa7a72a | 3049 | ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", |
2986e3ef | 3050 | ret); |
5e3dd157 KV |
3051 | goto err_region; |
3052 | } | |
3053 | ||
5e3dd157 KV |
3054 | pci_set_master(pdev); |
3055 | ||
5e3dd157 | 3056 | /* Arrange for access to Target SoC registers. */ |
aeae5b4c | 3057 | ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); |
2986e3ef MK |
3058 | ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); |
3059 | if (!ar_pci->mem) { | |
7aa7a72a | 3060 | ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); |
5e3dd157 KV |
3061 | ret = -EIO; |
3062 | goto err_master; | |
3063 | } | |
3064 | ||
7aa7a72a | 3065 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); |
2986e3ef MK |
3066 | return 0; |
3067 | ||
3068 | err_master: | |
3069 | pci_clear_master(pdev); | |
3070 | ||
3071 | err_region: | |
3072 | pci_release_region(pdev, BAR_NUM); | |
3073 | ||
3074 | err_device: | |
3075 | pci_disable_device(pdev); | |
3076 | ||
3077 | return ret; | |
3078 | } | |
3079 | ||
3080 | static void ath10k_pci_release(struct ath10k *ar) | |
3081 | { | |
3082 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
3083 | struct pci_dev *pdev = ar_pci->pdev; | |
3084 | ||
3085 | pci_iounmap(pdev, ar_pci->mem); | |
3086 | pci_release_region(pdev, BAR_NUM); | |
3087 | pci_clear_master(pdev); | |
3088 | pci_disable_device(pdev); | |
3089 | } | |
3090 | ||
7505f7c3 MK |
3091 | static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) |
3092 | { | |
3093 | const struct ath10k_pci_supp_chip *supp_chip; | |
3094 | int i; | |
3095 | u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); | |
3096 | ||
3097 | for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { | |
3098 | supp_chip = &ath10k_pci_supp_chips[i]; | |
3099 | ||
3100 | if (supp_chip->dev_id == dev_id && | |
3101 | supp_chip->rev_id == rev_id) | |
3102 | return true; | |
3103 | } | |
3104 | ||
3105 | return false; | |
3106 | } | |
3107 | ||
90188f80 RM |
3108 | int ath10k_pci_setup_resource(struct ath10k *ar) |
3109 | { | |
3110 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
3111 | int ret; | |
3112 | ||
3113 | spin_lock_init(&ar_pci->ce_lock); | |
3114 | spin_lock_init(&ar_pci->ps_lock); | |
3115 | ||
3116 | setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, | |
3117 | (unsigned long)ar); | |
3118 | ||
3119 | if (QCA_REV_6174(ar)) | |
3120 | ath10k_pci_override_ce_config(ar); | |
3121 | ||
3122 | ret = ath10k_pci_alloc_pipes(ar); | |
3123 | if (ret) { | |
3124 | ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", | |
3125 | ret); | |
3126 | return ret; | |
3127 | } | |
3128 | ||
3129 | return 0; | |
3130 | } | |
3131 | ||
3132 | void ath10k_pci_release_resource(struct ath10k *ar) | |
3133 | { | |
3134 | ath10k_pci_kill_tasklet(ar); | |
3135 | ath10k_pci_ce_deinit(ar); | |
3136 | ath10k_pci_free_pipes(ar); | |
3137 | } | |
3138 | ||
4ddb3299 RM |
3139 | static const struct ath10k_bus_ops ath10k_pci_bus_ops = { |
3140 | .read32 = ath10k_bus_pci_read32, | |
3141 | .write32 = ath10k_bus_pci_write32, | |
3142 | .get_num_banks = ath10k_pci_get_num_banks, | |
3143 | }; | |
3144 | ||
2986e3ef MK |
3145 | static int ath10k_pci_probe(struct pci_dev *pdev, |
3146 | const struct pci_device_id *pci_dev) | |
3147 | { | |
3148 | int ret = 0; | |
3149 | struct ath10k *ar; | |
3150 | struct ath10k_pci *ar_pci; | |
d63955b3 | 3151 | enum ath10k_hw_rev hw_rev; |
2986e3ef | 3152 | u32 chip_id; |
1aaf8efb | 3153 | bool pci_ps; |
0fc7e270 VT |
3154 | int (*pci_soft_reset)(struct ath10k *ar); |
3155 | int (*pci_hard_reset)(struct ath10k *ar); | |
2986e3ef | 3156 | |
d63955b3 MK |
3157 | switch (pci_dev->device) { |
3158 | case QCA988X_2_0_DEVICE_ID: | |
3159 | hw_rev = ATH10K_HW_QCA988X; | |
1aaf8efb | 3160 | pci_ps = false; |
0fc7e270 VT |
3161 | pci_soft_reset = ath10k_pci_warm_reset; |
3162 | pci_hard_reset = ath10k_pci_qca988x_chip_reset; | |
d63955b3 | 3163 | break; |
6fd3dd71 | 3164 | case QCA9887_1_0_DEVICE_ID: |
6fd3dd71 SE |
3165 | hw_rev = ATH10K_HW_QCA9887; |
3166 | pci_ps = false; | |
3167 | pci_soft_reset = ath10k_pci_warm_reset; | |
3168 | pci_hard_reset = ath10k_pci_qca988x_chip_reset; | |
3169 | break; | |
36582e5d | 3170 | case QCA6164_2_1_DEVICE_ID: |
d63955b3 MK |
3171 | case QCA6174_2_1_DEVICE_ID: |
3172 | hw_rev = ATH10K_HW_QCA6174; | |
1aaf8efb | 3173 | pci_ps = true; |
0fc7e270 VT |
3174 | pci_soft_reset = ath10k_pci_warm_reset; |
3175 | pci_hard_reset = ath10k_pci_qca6174_chip_reset; | |
d63955b3 | 3176 | break; |
8bd47021 VT |
3177 | case QCA99X0_2_0_DEVICE_ID: |
3178 | hw_rev = ATH10K_HW_QCA99X0; | |
1aaf8efb | 3179 | pci_ps = false; |
0fc7e270 VT |
3180 | pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; |
3181 | pci_hard_reset = ath10k_pci_qca99x0_chip_reset; | |
8bd47021 | 3182 | break; |
651b4cdc VT |
3183 | case QCA9984_1_0_DEVICE_ID: |
3184 | hw_rev = ATH10K_HW_QCA9984; | |
3185 | pci_ps = false; | |
3186 | pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; | |
3187 | pci_hard_reset = ath10k_pci_qca99x0_chip_reset; | |
3188 | break; | |
e565c312 AK |
3189 | case QCA9888_2_0_DEVICE_ID: |
3190 | hw_rev = ATH10K_HW_QCA9888; | |
3191 | pci_ps = false; | |
3192 | pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; | |
3193 | pci_hard_reset = ath10k_pci_qca99x0_chip_reset; | |
3194 | break; | |
a226b519 BM |
3195 | case QCA9377_1_0_DEVICE_ID: |
3196 | hw_rev = ATH10K_HW_QCA9377; | |
3197 | pci_ps = true; | |
0fc7e270 VT |
3198 | pci_soft_reset = NULL; |
3199 | pci_hard_reset = ath10k_pci_qca6174_chip_reset; | |
a226b519 | 3200 | break; |
d63955b3 MK |
3201 | default: |
3202 | WARN_ON(1); | |
3203 | return -ENOTSUPP; | |
3204 | } | |
3205 | ||
3206 | ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, | |
3207 | hw_rev, &ath10k_pci_hif_ops); | |
2986e3ef | 3208 | if (!ar) { |
7aa7a72a | 3209 | dev_err(&pdev->dev, "failed to allocate core\n"); |
2986e3ef MK |
3210 | return -ENOMEM; |
3211 | } | |
3212 | ||
0a51b343 MP |
3213 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", |
3214 | pdev->vendor, pdev->device, | |
3215 | pdev->subsystem_vendor, pdev->subsystem_device); | |
7aa7a72a | 3216 | |
2986e3ef MK |
3217 | ar_pci = ath10k_pci_priv(ar); |
3218 | ar_pci->pdev = pdev; | |
3219 | ar_pci->dev = &pdev->dev; | |
3220 | ar_pci->ar = ar; | |
36582e5d | 3221 | ar->dev_id = pci_dev->device; |
1aaf8efb | 3222 | ar_pci->pci_ps = pci_ps; |
4ddb3299 | 3223 | ar_pci->bus_ops = &ath10k_pci_bus_ops; |
0fc7e270 VT |
3224 | ar_pci->pci_soft_reset = pci_soft_reset; |
3225 | ar_pci->pci_hard_reset = pci_hard_reset; | |
5e3dd157 | 3226 | |
0a51b343 MP |
3227 | ar->id.vendor = pdev->vendor; |
3228 | ar->id.device = pdev->device; | |
3229 | ar->id.subsystem_vendor = pdev->subsystem_vendor; | |
3230 | ar->id.subsystem_device = pdev->subsystem_device; | |
de57e2c8 | 3231 | |
77258d40 MK |
3232 | setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, |
3233 | (unsigned long)ar); | |
5e3dd157 | 3234 | |
90188f80 | 3235 | ret = ath10k_pci_setup_resource(ar); |
e01ae68c | 3236 | if (ret) { |
90188f80 | 3237 | ath10k_err(ar, "failed to setup resource: %d\n", ret); |
2986e3ef | 3238 | goto err_core_destroy; |
e01ae68c KV |
3239 | } |
3240 | ||
90188f80 | 3241 | ret = ath10k_pci_claim(ar); |
25d0dbcb | 3242 | if (ret) { |
90188f80 RM |
3243 | ath10k_err(ar, "failed to claim device: %d\n", ret); |
3244 | goto err_free_pipes; | |
25d0dbcb MK |
3245 | } |
3246 | ||
d9d6a5ae RM |
3247 | ret = ath10k_pci_force_wake(ar); |
3248 | if (ret) { | |
3249 | ath10k_warn(ar, "failed to wake up device : %d\n", ret); | |
90188f80 | 3250 | goto err_sleep; |
1aaf8efb AK |
3251 | } |
3252 | ||
aa538aca RM |
3253 | ath10k_pci_ce_deinit(ar); |
3254 | ath10k_pci_irq_disable(ar); | |
3255 | ||
403d627b | 3256 | ret = ath10k_pci_init_irq(ar); |
5e3dd157 | 3257 | if (ret) { |
7aa7a72a | 3258 | ath10k_err(ar, "failed to init irqs: %d\n", ret); |
90188f80 | 3259 | goto err_sleep; |
5e3dd157 KV |
3260 | } |
3261 | ||
cfe9011a RM |
3262 | ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", |
3263 | ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, | |
403d627b MK |
3264 | ath10k_pci_irq_mode, ath10k_pci_reset_mode); |
3265 | ||
5c771e74 MK |
3266 | ret = ath10k_pci_request_irq(ar); |
3267 | if (ret) { | |
7aa7a72a | 3268 | ath10k_warn(ar, "failed to request irqs: %d\n", ret); |
5c771e74 MK |
3269 | goto err_deinit_irq; |
3270 | } | |
3271 | ||
1a7fecb7 MK |
3272 | ret = ath10k_pci_chip_reset(ar); |
3273 | if (ret) { | |
3274 | ath10k_err(ar, "failed to reset chip: %d\n", ret); | |
3275 | goto err_free_irq; | |
3276 | } | |
3277 | ||
3278 | chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); | |
3279 | if (chip_id == 0xffffffff) { | |
3280 | ath10k_err(ar, "failed to get chip id\n"); | |
3281 | goto err_free_irq; | |
3282 | } | |
3283 | ||
3284 | if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { | |
3285 | ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", | |
3286 | pdev->device, chip_id); | |
d9585a92 | 3287 | goto err_free_irq; |
1a7fecb7 MK |
3288 | } |
3289 | ||
e01ae68c | 3290 | ret = ath10k_core_register(ar, chip_id); |
5e3dd157 | 3291 | if (ret) { |
7aa7a72a | 3292 | ath10k_err(ar, "failed to register driver core: %d\n", ret); |
5c771e74 | 3293 | goto err_free_irq; |
5e3dd157 KV |
3294 | } |
3295 | ||
3296 | return 0; | |
3297 | ||
5c771e74 MK |
3298 | err_free_irq: |
3299 | ath10k_pci_free_irq(ar); | |
21396271 | 3300 | ath10k_pci_kill_tasklet(ar); |
5c771e74 | 3301 | |
403d627b MK |
3302 | err_deinit_irq: |
3303 | ath10k_pci_deinit_irq(ar); | |
3304 | ||
c0c378f9 | 3305 | err_sleep: |
0bcbbe67 | 3306 | ath10k_pci_sleep_sync(ar); |
2986e3ef MK |
3307 | ath10k_pci_release(ar); |
3308 | ||
90188f80 RM |
3309 | err_free_pipes: |
3310 | ath10k_pci_free_pipes(ar); | |
3311 | ||
e7b54194 | 3312 | err_core_destroy: |
5e3dd157 | 3313 | ath10k_core_destroy(ar); |
5e3dd157 KV |
3314 | |
3315 | return ret; | |
3316 | } | |
3317 | ||
3318 | static void ath10k_pci_remove(struct pci_dev *pdev) | |
3319 | { | |
3320 | struct ath10k *ar = pci_get_drvdata(pdev); | |
3321 | struct ath10k_pci *ar_pci; | |
3322 | ||
7aa7a72a | 3323 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); |
5e3dd157 KV |
3324 | |
3325 | if (!ar) | |
3326 | return; | |
3327 | ||
3328 | ar_pci = ath10k_pci_priv(ar); | |
3329 | ||
3330 | if (!ar_pci) | |
3331 | return; | |
3332 | ||
5e3dd157 | 3333 | ath10k_core_unregister(ar); |
5c771e74 | 3334 | ath10k_pci_free_irq(ar); |
403d627b | 3335 | ath10k_pci_deinit_irq(ar); |
90188f80 | 3336 | ath10k_pci_release_resource(ar); |
77258d40 | 3337 | ath10k_pci_sleep_sync(ar); |
2986e3ef | 3338 | ath10k_pci_release(ar); |
5e3dd157 | 3339 | ath10k_core_destroy(ar); |
5e3dd157 KV |
3340 | } |
3341 | ||
5e3dd157 KV |
3342 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); |
3343 | ||
3344 | static struct pci_driver ath10k_pci_driver = { | |
3345 | .name = "ath10k_pci", | |
3346 | .id_table = ath10k_pci_id_table, | |
3347 | .probe = ath10k_pci_probe, | |
3348 | .remove = ath10k_pci_remove, | |
5e3dd157 KV |
3349 | }; |
3350 | ||
3351 | static int __init ath10k_pci_init(void) | |
3352 | { | |
3353 | int ret; | |
3354 | ||
3355 | ret = pci_register_driver(&ath10k_pci_driver); | |
3356 | if (ret) | |
7aa7a72a MK |
3357 | printk(KERN_ERR "failed to register ath10k pci driver: %d\n", |
3358 | ret); | |
5e3dd157 | 3359 | |
0b523ced RM |
3360 | ret = ath10k_ahb_init(); |
3361 | if (ret) | |
3362 | printk(KERN_ERR "ahb init failed: %d\n", ret); | |
3363 | ||
5e3dd157 KV |
3364 | return ret; |
3365 | } | |
3366 | module_init(ath10k_pci_init); | |
3367 | ||
3368 | static void __exit ath10k_pci_exit(void) | |
3369 | { | |
3370 | pci_unregister_driver(&ath10k_pci_driver); | |
0b523ced | 3371 | ath10k_ahb_exit(); |
5e3dd157 KV |
3372 | } |
3373 | ||
3374 | module_exit(ath10k_pci_exit); | |
3375 | ||
3376 | MODULE_AUTHOR("Qualcomm Atheros"); | |
b855de0f | 3377 | MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); |
5e3dd157 | 3378 | MODULE_LICENSE("Dual BSD/GPL"); |
5c427f5c BM |
3379 | |
3380 | /* QCA988x 2.0 firmware files */ | |
8026cae7 BM |
3381 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); |
3382 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); | |
5c427f5c | 3383 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); |
53513c30 | 3384 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
5e3dd157 | 3385 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); |
0a51b343 | 3386 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
5c427f5c | 3387 | |
6fd3dd71 SE |
3388 | /* QCA9887 1.0 firmware files */ |
3389 | MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); | |
3390 | MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); | |
3391 | MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); | |
3392 | ||
5c427f5c BM |
3393 | /* QCA6174 2.1 firmware files */ |
3394 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); | |
e451c1db | 3395 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); |
5c427f5c | 3396 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); |
0a51b343 | 3397 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
5c427f5c BM |
3398 | |
3399 | /* QCA6174 3.1 firmware files */ | |
3400 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); | |
e451c1db | 3401 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
5c427f5c | 3402 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); |
0a51b343 | 3403 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
a226b519 BM |
3404 | |
3405 | /* QCA9377 1.0 firmware files */ | |
3406 | MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); | |
3407 | MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); |