Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[deliverable/linux.git] / drivers / net / wireless / ath / wil6210 / wmi.c
1 /*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/etherdevice.h>
18 #include <linux/if_arp.h>
19
20 #include "wil6210.h"
21 #include "txrx.h"
22 #include "wmi.h"
23 #include "trace.h"
24
25 /**
26 * WMI event receiving - theory of operations
27 *
28 * When firmware about to report WMI event, it fills memory area
29 * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
30 * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
31 *
32 * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
33 * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
34 * and handles events within the @wmi_event_worker. Every event get detached
35 * from list, processed and deleted.
36 *
37 * Purpose for this mechanism is to release IRQ thread; otherwise,
38 * if WMI event handling involves another WMI command flow, this 2-nd flow
39 * won't be completed because of blocked IRQ thread.
40 */
41
42 /**
43 * Addressing - theory of operations
44 *
45 * There are several buses present on the WIL6210 card.
46 * Same memory areas are visible at different address on
47 * the different busses. There are 3 main bus masters:
48 * - MAC CPU (ucode)
49 * - User CPU (firmware)
50 * - AHB (host)
51 *
52 * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
53 * AHB addresses starting from 0x880000
54 *
55 * Internally, firmware uses addresses that allows faster access but
56 * are invisible from the host. To read from these addresses, alternative
57 * AHB address must be used.
58 *
59 * Memory mapping
60 * Linker address PCI/Host address
61 * 0x880000 .. 0xa80000 2Mb BAR0
62 * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
63 * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
64 */
65
66 /**
67 * @fw_mapping provides memory remapping table
68 */
69 static const struct {
70 u32 from; /* linker address - from, inclusive */
71 u32 to; /* linker address - to, exclusive */
72 u32 host; /* PCI/Host address - BAR0 + 0x880000 */
73 } fw_mapping[] = {
74 {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
75 {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
76 {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
77 {0x880000, 0x88a000, 0x880000}, /* various RGF */
78 {0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */
79 /*
80 * 920000..930000 ucode code RAM
81 * 930000..932000 ucode data RAM
82 * 932000..949000 back-door debug data
83 */
84 };
85
86 /**
87 * return AHB address for given firmware/ucode internal (linker) address
88 * @x - internal address
89 * If address have no valid AHB mapping, return 0
90 */
91 static u32 wmi_addr_remap(u32 x)
92 {
93 uint i;
94
95 for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
96 if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
97 return x + fw_mapping[i].host - fw_mapping[i].from;
98 }
99
100 return 0;
101 }
102
103 /**
104 * Check address validity for WMI buffer; remap if needed
105 * @ptr - internal (linker) fw/ucode address
106 *
107 * Valid buffer should be DWORD aligned
108 *
109 * return address for accessing buffer from the host;
110 * if buffer is not valid, return NULL.
111 */
112 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
113 {
114 u32 off;
115 u32 ptr = le32_to_cpu(ptr_);
116
117 if (ptr % 4)
118 return NULL;
119
120 ptr = wmi_addr_remap(ptr);
121 if (ptr < WIL6210_FW_HOST_OFF)
122 return NULL;
123
124 off = HOSTADDR(ptr);
125 if (off > WIL6210_MEM_SIZE - 4)
126 return NULL;
127
128 return wil->csr + off;
129 }
130
131 /**
132 * Check address validity
133 */
134 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
135 {
136 u32 off;
137
138 if (ptr % 4)
139 return NULL;
140
141 if (ptr < WIL6210_FW_HOST_OFF)
142 return NULL;
143
144 off = HOSTADDR(ptr);
145 if (off > WIL6210_MEM_SIZE - 4)
146 return NULL;
147
148 return wil->csr + off;
149 }
150
151 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
152 struct wil6210_mbox_hdr *hdr)
153 {
154 void __iomem *src = wmi_buffer(wil, ptr);
155 if (!src)
156 return -EINVAL;
157
158 wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
159
160 return 0;
161 }
162
163 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
164 {
165 struct {
166 struct wil6210_mbox_hdr hdr;
167 struct wil6210_mbox_hdr_wmi wmi;
168 } __packed cmd = {
169 .hdr = {
170 .type = WIL_MBOX_HDR_TYPE_WMI,
171 .flags = 0,
172 .len = cpu_to_le16(sizeof(cmd.wmi) + len),
173 },
174 .wmi = {
175 .mid = 0,
176 .id = cpu_to_le16(cmdid),
177 },
178 };
179 struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
180 struct wil6210_mbox_ring_desc d_head;
181 u32 next_head;
182 void __iomem *dst;
183 void __iomem *head = wmi_addr(wil, r->head);
184 uint retry;
185
186 if (sizeof(cmd) + len > r->entry_size) {
187 wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
188 (int)(sizeof(cmd) + len), r->entry_size);
189 return -ERANGE;
190 }
191
192 might_sleep();
193
194 if (!test_bit(wil_status_fwready, &wil->status)) {
195 wil_err(wil, "WMI: cannot send command while FW not ready\n");
196 return -EAGAIN;
197 }
198
199 if (!head) {
200 wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
201 return -EINVAL;
202 }
203 /* read Tx head till it is not busy */
204 for (retry = 5; retry > 0; retry--) {
205 wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
206 if (d_head.sync == 0)
207 break;
208 msleep(20);
209 }
210 if (d_head.sync != 0) {
211 wil_err(wil, "WMI head busy\n");
212 return -EBUSY;
213 }
214 /* next head */
215 next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
216 wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
217 /* wait till FW finish with previous command */
218 for (retry = 5; retry > 0; retry--) {
219 r->tail = ioread32(wil->csr + HOST_MBOX +
220 offsetof(struct wil6210_mbox_ctl, tx.tail));
221 if (next_head != r->tail)
222 break;
223 msleep(20);
224 }
225 if (next_head == r->tail) {
226 wil_err(wil, "WMI ring full\n");
227 return -EBUSY;
228 }
229 dst = wmi_buffer(wil, d_head.addr);
230 if (!dst) {
231 wil_err(wil, "invalid WMI buffer: 0x%08x\n",
232 le32_to_cpu(d_head.addr));
233 return -EINVAL;
234 }
235 cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
236 /* set command */
237 wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
238 wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
239 sizeof(cmd), true);
240 wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
241 len, true);
242 wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
243 wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
244 /* mark entry as full */
245 iowrite32(1, wil->csr + HOSTADDR(r->head) +
246 offsetof(struct wil6210_mbox_ring_desc, sync));
247 /* advance next ptr */
248 iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
249 offsetof(struct wil6210_mbox_ctl, tx.head));
250
251 trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
252
253 /* interrupt to FW */
254 iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
255
256 return 0;
257 }
258
259 int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
260 {
261 int rc;
262
263 mutex_lock(&wil->wmi_mutex);
264 rc = __wmi_send(wil, cmdid, buf, len);
265 mutex_unlock(&wil->wmi_mutex);
266
267 return rc;
268 }
269
270 /*=== Event handlers ===*/
271 static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
272 {
273 struct net_device *ndev = wil_to_ndev(wil);
274 struct wireless_dev *wdev = wil->wdev;
275 struct wmi_ready_event *evt = d;
276 wil->fw_version = le32_to_cpu(evt->sw_version);
277 wil->n_mids = evt->numof_additional_mids;
278
279 wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
280 evt->mac, wil->n_mids);
281
282 if (!is_valid_ether_addr(ndev->dev_addr)) {
283 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
284 memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
285 }
286 snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
287 "%d", wil->fw_version);
288 }
289
290 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
291 int len)
292 {
293 wil_dbg_wmi(wil, "WMI: got FW ready event\n");
294
295 set_bit(wil_status_fwready, &wil->status);
296 /* reuse wmi_ready for the firmware ready indication */
297 complete(&wil->wmi_ready);
298 }
299
300 static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
301 {
302 struct wmi_rx_mgmt_packet_event *data = d;
303 struct wiphy *wiphy = wil_to_wiphy(wil);
304 struct ieee80211_mgmt *rx_mgmt_frame =
305 (struct ieee80211_mgmt *)data->payload;
306 int ch_no = data->info.channel+1;
307 u32 freq = ieee80211_channel_to_frequency(ch_no,
308 IEEE80211_BAND_60GHZ);
309 struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
310 s32 signal = data->info.sqi;
311 __le16 fc = rx_mgmt_frame->frame_control;
312 u32 d_len = le32_to_cpu(data->info.len);
313 u16 d_status = le16_to_cpu(data->info.status);
314
315 wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
316 data->info.channel, data->info.mcs, data->info.snr,
317 data->info.sqi);
318 wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
319 le16_to_cpu(fc));
320 wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
321 data->info.qid, data->info.mid, data->info.cid);
322
323 if (!channel) {
324 wil_err(wil, "Frame on unsupported channel\n");
325 return;
326 }
327
328 if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
329 struct cfg80211_bss *bss;
330
331 bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
332 d_len, signal, GFP_KERNEL);
333 if (bss) {
334 wil_dbg_wmi(wil, "Added BSS %pM\n",
335 rx_mgmt_frame->bssid);
336 cfg80211_put_bss(wiphy, bss);
337 } else {
338 wil_err(wil, "cfg80211_inform_bss() failed\n");
339 }
340 } else {
341 cfg80211_rx_mgmt(wil->wdev, freq, signal,
342 (void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL);
343 }
344 }
345
346 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
347 void *d, int len)
348 {
349 if (wil->scan_request) {
350 struct wmi_scan_complete_event *data = d;
351 bool aborted = (data->status != WMI_SCAN_SUCCESS);
352
353 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
354 cfg80211_scan_done(wil->scan_request, aborted);
355 wil->scan_request = NULL;
356 } else {
357 wil_err(wil, "SCAN_COMPLETE while not scanning\n");
358 }
359 }
360
361 static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
362 {
363 struct net_device *ndev = wil_to_ndev(wil);
364 struct wireless_dev *wdev = wil->wdev;
365 struct wmi_connect_event *evt = d;
366 int ch; /* channel number */
367 struct station_info sinfo;
368 u8 *assoc_req_ie, *assoc_resp_ie;
369 size_t assoc_req_ielen, assoc_resp_ielen;
370 /* capinfo(u16) + listen_interval(u16) + IEs */
371 const size_t assoc_req_ie_offset = sizeof(u16) * 2;
372 /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
373 const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
374
375 if (len < sizeof(*evt)) {
376 wil_err(wil, "Connect event too short : %d bytes\n", len);
377 return;
378 }
379 if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
380 evt->assoc_resp_len) {
381 wil_err(wil,
382 "Connect event corrupted : %d != %d + %d + %d + %d\n",
383 len, (int)sizeof(*evt), evt->beacon_ie_len,
384 evt->assoc_req_len, evt->assoc_resp_len);
385 return;
386 }
387 if (evt->cid >= WIL6210_MAX_CID) {
388 wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
389 return;
390 }
391
392 ch = evt->channel + 1;
393 wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
394 evt->bssid, ch, evt->cid);
395 wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
396 evt->assoc_info, len - sizeof(*evt), true);
397
398 /* figure out IE's */
399 assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
400 assoc_req_ie_offset];
401 assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
402 if (evt->assoc_req_len <= assoc_req_ie_offset) {
403 assoc_req_ie = NULL;
404 assoc_req_ielen = 0;
405 }
406
407 assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
408 evt->assoc_req_len +
409 assoc_resp_ie_offset];
410 assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
411 if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
412 assoc_resp_ie = NULL;
413 assoc_resp_ielen = 0;
414 }
415
416 if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
417 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
418 if (!test_bit(wil_status_fwconnecting, &wil->status)) {
419 wil_err(wil, "Not in connecting state\n");
420 return;
421 }
422 del_timer_sync(&wil->connect_timer);
423 cfg80211_connect_result(ndev, evt->bssid,
424 assoc_req_ie, assoc_req_ielen,
425 assoc_resp_ie, assoc_resp_ielen,
426 WLAN_STATUS_SUCCESS, GFP_KERNEL);
427
428 } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
429 (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
430 memset(&sinfo, 0, sizeof(sinfo));
431
432 sinfo.generation = wil->sinfo_gen++;
433
434 if (assoc_req_ie) {
435 sinfo.assoc_req_ies = assoc_req_ie;
436 sinfo.assoc_req_ies_len = assoc_req_ielen;
437 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
438 }
439
440 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
441 }
442 clear_bit(wil_status_fwconnecting, &wil->status);
443 set_bit(wil_status_fwconnected, &wil->status);
444
445 /* FIXME FW can transmit only ucast frames to peer */
446 /* FIXME real ring_id instead of hard coded 0 */
447 memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
448 wil->sta[evt->cid].status = wil_sta_conn_pending;
449
450 wil->pending_connect_cid = evt->cid;
451 queue_work(wil->wmi_wq_conn, &wil->connect_worker);
452 }
453
454 static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
455 void *d, int len)
456 {
457 struct wmi_disconnect_event *evt = d;
458
459 wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
460 evt->bssid,
461 evt->protocol_reason_status, evt->disconnect_reason);
462
463 wil->sinfo_gen++;
464
465 mutex_lock(&wil->mutex);
466 wil6210_disconnect(wil, evt->bssid);
467 mutex_unlock(&wil->mutex);
468 }
469
470 static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
471 {
472 struct wmi_notify_req_done_event *evt = d;
473
474 if (len < sizeof(*evt)) {
475 wil_err(wil, "Short NOTIFY event\n");
476 return;
477 }
478
479 wil->stats.tsf = le64_to_cpu(evt->tsf);
480 wil->stats.snr = le32_to_cpu(evt->snr_val);
481 wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
482 wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
483 wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
484 wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
485 wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
486 wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
487 "BF status 0x%08x SNR 0x%08x SQI %d%%\n"
488 "Tx Tpt %d goodput %d Rx goodput %d\n"
489 "Sectors(rx:tx) my %d:%d peer %d:%d\n",
490 wil->stats.bf_mcs, wil->stats.tsf, evt->status,
491 wil->stats.snr, evt->sqi, le32_to_cpu(evt->tx_tpt),
492 le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
493 wil->stats.my_rx_sector, wil->stats.my_tx_sector,
494 wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
495 }
496
497 /*
498 * Firmware reports EAPOL frame using WME event.
499 * Reconstruct Ethernet frame and deliver it via normal Rx
500 */
501 static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
502 void *d, int len)
503 {
504 struct net_device *ndev = wil_to_ndev(wil);
505 struct wmi_eapol_rx_event *evt = d;
506 u16 eapol_len = le16_to_cpu(evt->eapol_len);
507 int sz = eapol_len + ETH_HLEN;
508 struct sk_buff *skb;
509 struct ethhdr *eth;
510 int cid;
511 struct wil_net_stats *stats = NULL;
512
513 wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
514 evt->src_mac);
515
516 cid = wil_find_cid(wil, evt->src_mac);
517 if (cid >= 0)
518 stats = &wil->sta[cid].stats;
519
520 if (eapol_len > 196) { /* TODO: revisit size limit */
521 wil_err(wil, "EAPOL too large\n");
522 return;
523 }
524
525 skb = alloc_skb(sz, GFP_KERNEL);
526 if (!skb) {
527 wil_err(wil, "Failed to allocate skb\n");
528 return;
529 }
530
531 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
532 memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
533 memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
534 eth->h_proto = cpu_to_be16(ETH_P_PAE);
535 memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
536 skb->protocol = eth_type_trans(skb, ndev);
537 if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
538 ndev->stats.rx_packets++;
539 ndev->stats.rx_bytes += sz;
540 if (stats) {
541 stats->rx_packets++;
542 stats->rx_bytes += sz;
543 }
544 } else {
545 ndev->stats.rx_dropped++;
546 if (stats)
547 stats->rx_dropped++;
548 }
549 }
550
551 static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
552 {
553 struct net_device *ndev = wil_to_ndev(wil);
554 struct wmi_data_port_open_event *evt = d;
555 u8 cid = evt->cid;
556
557 wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
558
559 if (cid >= ARRAY_SIZE(wil->sta)) {
560 wil_err(wil, "Link UP for invalid CID %d\n", cid);
561 return;
562 }
563
564 wil->sta[cid].data_port_open = true;
565 netif_carrier_on(ndev);
566 }
567
568 static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
569 {
570 struct net_device *ndev = wil_to_ndev(wil);
571 struct wmi_wbe_link_down_event *evt = d;
572 u8 cid = evt->cid;
573
574 wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
575 cid, le32_to_cpu(evt->reason));
576
577 if (cid >= ARRAY_SIZE(wil->sta)) {
578 wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
579 return;
580 }
581
582 wil->sta[cid].data_port_open = false;
583 netif_carrier_off(ndev);
584 }
585
586 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
587 int len)
588 {
589 struct wmi_vring_ba_status_event *evt = d;
590 struct wil_sta_info *sta;
591 uint i, cid;
592
593 /* TODO: use Rx BA status, not Tx one */
594
595 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
596 evt->ringid,
597 evt->status == WMI_BA_AGREED ? "OK" : "N/A",
598 evt->agg_wsize, __le16_to_cpu(evt->ba_timeout));
599
600 if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
601 wil_err(wil, "invalid ring id %d\n", evt->ringid);
602 return;
603 }
604
605 cid = wil->vring2cid_tid[evt->ringid][0];
606 if (cid >= WIL6210_MAX_CID) {
607 wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid);
608 return;
609 }
610
611 sta = &wil->sta[cid];
612 if (sta->status == wil_sta_unused) {
613 wil_err(wil, "CID %d unused\n", cid);
614 return;
615 }
616
617 wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr);
618 for (i = 0; i < WIL_STA_TID_NUM; i++) {
619 struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
620 sta->tid_rx[i] = NULL;
621 wil_tid_ampdu_rx_free(wil, r);
622 if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize)
623 sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil,
624 evt->agg_wsize, 0);
625 }
626 }
627
628 static const struct {
629 int eventid;
630 void (*handler)(struct wil6210_priv *wil, int eventid,
631 void *data, int data_len);
632 } wmi_evt_handlers[] = {
633 {WMI_READY_EVENTID, wmi_evt_ready},
634 {WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
635 {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
636 {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
637 {WMI_CONNECT_EVENTID, wmi_evt_connect},
638 {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
639 {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
640 {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
641 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
642 {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
643 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
644 };
645
646 /*
647 * Run in IRQ context
648 * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
649 * that will be eventually handled by the @wmi_event_worker in the thread
650 * context of thread "wil6210_wmi"
651 */
652 void wmi_recv_cmd(struct wil6210_priv *wil)
653 {
654 struct wil6210_mbox_ring_desc d_tail;
655 struct wil6210_mbox_hdr hdr;
656 struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
657 struct pending_wmi_event *evt;
658 u8 *cmd;
659 void __iomem *src;
660 ulong flags;
661
662 if (!test_bit(wil_status_reset_done, &wil->status)) {
663 wil_err(wil, "Reset not completed\n");
664 return;
665 }
666
667 for (;;) {
668 u16 len;
669
670 r->head = ioread32(wil->csr + HOST_MBOX +
671 offsetof(struct wil6210_mbox_ctl, rx.head));
672 if (r->tail == r->head)
673 return;
674
675 /* read cmd from tail */
676 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
677 sizeof(struct wil6210_mbox_ring_desc));
678 if (d_tail.sync == 0) {
679 wil_err(wil, "Mbox evt not owned by FW?\n");
680 return;
681 }
682
683 if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
684 wil_err(wil, "Mbox evt at 0x%08x?\n",
685 le32_to_cpu(d_tail.addr));
686 return;
687 }
688
689 len = le16_to_cpu(hdr.len);
690 src = wmi_buffer(wil, d_tail.addr) +
691 sizeof(struct wil6210_mbox_hdr);
692 evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
693 event.wmi) + len, 4),
694 GFP_KERNEL);
695 if (!evt)
696 return;
697
698 evt->event.hdr = hdr;
699 cmd = (void *)&evt->event.wmi;
700 wil_memcpy_fromio_32(cmd, src, len);
701 /* mark entry as empty */
702 iowrite32(0, wil->csr + HOSTADDR(r->tail) +
703 offsetof(struct wil6210_mbox_ring_desc, sync));
704 /* indicate */
705 wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
706 le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
707 hdr.flags);
708 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
709 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
710 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
711 u16 id = le16_to_cpu(wmi->id);
712 u32 tstamp = le32_to_cpu(wmi->timestamp);
713 wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
714 id, wmi->mid, tstamp);
715 trace_wil6210_wmi_event(wmi, &wmi[1],
716 len - sizeof(*wmi));
717 }
718 wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
719 &evt->event.hdr, sizeof(hdr) + len, true);
720
721 /* advance tail */
722 r->tail = r->base + ((r->tail - r->base +
723 sizeof(struct wil6210_mbox_ring_desc)) % r->size);
724 iowrite32(r->tail, wil->csr + HOST_MBOX +
725 offsetof(struct wil6210_mbox_ctl, rx.tail));
726
727 /* add to the pending list */
728 spin_lock_irqsave(&wil->wmi_ev_lock, flags);
729 list_add_tail(&evt->list, &wil->pending_wmi_ev);
730 spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
731 {
732 int q = queue_work(wil->wmi_wq,
733 &wil->wmi_event_worker);
734 wil_dbg_wmi(wil, "queue_work -> %d\n", q);
735 }
736 }
737 }
738
739 int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
740 u16 reply_id, void *reply, u8 reply_size, int to_msec)
741 {
742 int rc;
743 int remain;
744
745 mutex_lock(&wil->wmi_mutex);
746
747 rc = __wmi_send(wil, cmdid, buf, len);
748 if (rc)
749 goto out;
750
751 wil->reply_id = reply_id;
752 wil->reply_buf = reply;
753 wil->reply_size = reply_size;
754 remain = wait_for_completion_timeout(&wil->wmi_ready,
755 msecs_to_jiffies(to_msec));
756 if (0 == remain) {
757 wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
758 cmdid, reply_id, to_msec);
759 rc = -ETIME;
760 } else {
761 wil_dbg_wmi(wil,
762 "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
763 cmdid, reply_id,
764 to_msec - jiffies_to_msecs(remain));
765 }
766 wil->reply_id = 0;
767 wil->reply_buf = NULL;
768 wil->reply_size = 0;
769 out:
770 mutex_unlock(&wil->wmi_mutex);
771
772 return rc;
773 }
774
775 int wmi_echo(struct wil6210_priv *wil)
776 {
777 struct wmi_echo_cmd cmd = {
778 .value = cpu_to_le32(0x12345678),
779 };
780
781 return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
782 WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
783 }
784
785 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
786 {
787 struct wmi_set_mac_address_cmd cmd;
788
789 memcpy(cmd.mac, addr, ETH_ALEN);
790
791 wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
792
793 return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
794 }
795
796 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
797 {
798 int rc;
799
800 struct wmi_pcp_start_cmd cmd = {
801 .bcon_interval = cpu_to_le16(bi),
802 .network_type = wmi_nettype,
803 .disable_sec_offload = 1,
804 .channel = chan - 1,
805 .pcp_max_assoc_sta = WIL6210_MAX_CID,
806 };
807 struct {
808 struct wil6210_mbox_hdr_wmi wmi;
809 struct wmi_pcp_started_event evt;
810 } __packed reply;
811
812 if (!wil->secure_pcp)
813 cmd.disable_sec = 1;
814
815 /*
816 * Processing time may be huge, in case of secure AP it takes about
817 * 3500ms for FW to start AP
818 */
819 rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
820 WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
821 if (rc)
822 return rc;
823
824 if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
825 rc = -EINVAL;
826
827 return rc;
828 }
829
830 int wmi_pcp_stop(struct wil6210_priv *wil)
831 {
832 return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
833 WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
834 }
835
836 int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
837 {
838 struct wmi_set_ssid_cmd cmd = {
839 .ssid_len = cpu_to_le32(ssid_len),
840 };
841
842 if (ssid_len > sizeof(cmd.ssid))
843 return -EINVAL;
844
845 memcpy(cmd.ssid, ssid, ssid_len);
846
847 return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
848 }
849
850 int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
851 {
852 int rc;
853 struct {
854 struct wil6210_mbox_hdr_wmi wmi;
855 struct wmi_set_ssid_cmd cmd;
856 } __packed reply;
857 int len; /* reply.cmd.ssid_len in CPU order */
858
859 rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
860 &reply, sizeof(reply), 20);
861 if (rc)
862 return rc;
863
864 len = le32_to_cpu(reply.cmd.ssid_len);
865 if (len > sizeof(reply.cmd.ssid))
866 return -EINVAL;
867
868 *ssid_len = len;
869 memcpy(ssid, reply.cmd.ssid, len);
870
871 return 0;
872 }
873
874 int wmi_set_channel(struct wil6210_priv *wil, int channel)
875 {
876 struct wmi_set_pcp_channel_cmd cmd = {
877 .channel = channel - 1,
878 };
879
880 return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
881 }
882
883 int wmi_get_channel(struct wil6210_priv *wil, int *channel)
884 {
885 int rc;
886 struct {
887 struct wil6210_mbox_hdr_wmi wmi;
888 struct wmi_set_pcp_channel_cmd cmd;
889 } __packed reply;
890
891 rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
892 WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
893 if (rc)
894 return rc;
895
896 if (reply.cmd.channel > 3)
897 return -EINVAL;
898
899 *channel = reply.cmd.channel + 1;
900
901 return 0;
902 }
903
904 int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
905 {
906 struct wmi_p2p_cfg_cmd cmd = {
907 .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
908 .channel = channel - 1,
909 };
910
911 return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
912 }
913
914 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
915 const void *mac_addr)
916 {
917 struct wmi_delete_cipher_key_cmd cmd = {
918 .key_index = key_index,
919 };
920
921 if (mac_addr)
922 memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
923
924 return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
925 }
926
927 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
928 const void *mac_addr, int key_len, const void *key)
929 {
930 struct wmi_add_cipher_key_cmd cmd = {
931 .key_index = key_index,
932 .key_usage = WMI_KEY_USE_PAIRWISE,
933 .key_len = key_len,
934 };
935
936 if (!key || (key_len > sizeof(cmd.key)))
937 return -EINVAL;
938
939 memcpy(cmd.key, key, key_len);
940 if (mac_addr)
941 memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
942
943 return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
944 }
945
946 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
947 {
948 int rc;
949 u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
950 struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
951 if (!cmd)
952 return -ENOMEM;
953
954 cmd->mgmt_frm_type = type;
955 /* BUG: FW API define ieLen as u8. Will fix FW */
956 cmd->ie_len = cpu_to_le16(ie_len);
957 memcpy(cmd->ie_info, ie, ie_len);
958 rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
959 kfree(cmd);
960
961 return rc;
962 }
963
964 /**
965 * wmi_rxon - turn radio on/off
966 * @on: turn on if true, off otherwise
967 *
968 * Only switch radio. Channel should be set separately.
969 * No timeout for rxon - radio turned on forever unless some other call
970 * turns it off
971 */
972 int wmi_rxon(struct wil6210_priv *wil, bool on)
973 {
974 int rc;
975 struct {
976 struct wil6210_mbox_hdr_wmi wmi;
977 struct wmi_listen_started_event evt;
978 } __packed reply;
979
980 wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
981
982 if (on) {
983 rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
984 WMI_LISTEN_STARTED_EVENTID,
985 &reply, sizeof(reply), 100);
986 if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
987 rc = -EINVAL;
988 } else {
989 rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
990 WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
991 }
992
993 return rc;
994 }
995
996 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
997 {
998 struct wireless_dev *wdev = wil->wdev;
999 struct net_device *ndev = wil_to_ndev(wil);
1000 struct wmi_cfg_rx_chain_cmd cmd = {
1001 .action = WMI_RX_CHAIN_ADD,
1002 .rx_sw_ring = {
1003 .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
1004 .ring_mem_base = cpu_to_le64(vring->pa),
1005 .ring_size = cpu_to_le16(vring->size),
1006 },
1007 .mid = 0, /* TODO - what is it? */
1008 .decap_trans_type = WMI_DECAP_TYPE_802_3,
1009 .reorder_type = WMI_RX_SW_REORDER,
1010 };
1011 struct {
1012 struct wil6210_mbox_hdr_wmi wmi;
1013 struct wmi_cfg_rx_chain_done_event evt;
1014 } __packed evt;
1015 int rc;
1016
1017 if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
1018 struct ieee80211_channel *ch = wdev->preset_chandef.chan;
1019
1020 cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
1021 if (ch)
1022 cmd.sniffer_cfg.channel = ch->hw_value - 1;
1023 cmd.sniffer_cfg.phy_info_mode =
1024 cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
1025 cmd.sniffer_cfg.phy_support =
1026 cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
1027 ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
1028 } else {
1029 /* Initialize offload (in non-sniffer mode).
1030 * Linux IP stack always calculates IP checksum
1031 * HW always calculate TCP/UDP checksum
1032 */
1033 cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
1034 }
1035 /* typical time for secure PCP is 840ms */
1036 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
1037 WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
1038 if (rc)
1039 return rc;
1040
1041 vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
1042
1043 wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
1044 le32_to_cpu(evt.evt.status), vring->hwtail);
1045
1046 if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
1047 rc = -EINVAL;
1048
1049 return rc;
1050 }
1051
1052 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
1053 {
1054 int rc;
1055 struct wmi_temp_sense_cmd cmd = {
1056 .measure_marlon_m_en = cpu_to_le32(!!t_m),
1057 .measure_marlon_r_en = cpu_to_le32(!!t_r),
1058 };
1059 struct {
1060 struct wil6210_mbox_hdr_wmi wmi;
1061 struct wmi_temp_sense_done_event evt;
1062 } __packed reply;
1063
1064 rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd),
1065 WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
1066 if (rc)
1067 return rc;
1068
1069 if (t_m)
1070 *t_m = le32_to_cpu(reply.evt.marlon_m_t1000);
1071 if (t_r)
1072 *t_r = le32_to_cpu(reply.evt.marlon_r_t1000);
1073
1074 return 0;
1075 }
1076
1077 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
1078 {
1079 struct wmi_disconnect_sta_cmd cmd = {
1080 .disconnect_reason = cpu_to_le16(reason),
1081 };
1082 memcpy(cmd.dst_mac, mac, ETH_ALEN);
1083
1084 wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
1085
1086 return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
1087 }
1088
1089 void wmi_event_flush(struct wil6210_priv *wil)
1090 {
1091 struct pending_wmi_event *evt, *t;
1092
1093 wil_dbg_wmi(wil, "%s()\n", __func__);
1094
1095 list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
1096 list_del(&evt->list);
1097 kfree(evt);
1098 }
1099 }
1100
1101 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
1102 void *d, int len)
1103 {
1104 uint i;
1105
1106 for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
1107 if (wmi_evt_handlers[i].eventid == id) {
1108 wmi_evt_handlers[i].handler(wil, id, d, len);
1109 return true;
1110 }
1111 }
1112
1113 return false;
1114 }
1115
1116 static void wmi_event_handle(struct wil6210_priv *wil,
1117 struct wil6210_mbox_hdr *hdr)
1118 {
1119 u16 len = le16_to_cpu(hdr->len);
1120
1121 if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
1122 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
1123 struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
1124 void *evt_data = (void *)(&wmi[1]);
1125 u16 id = le16_to_cpu(wmi->id);
1126 /* check if someone waits for this event */
1127 if (wil->reply_id && wil->reply_id == id) {
1128 if (wil->reply_buf) {
1129 memcpy(wil->reply_buf, wmi,
1130 min(len, wil->reply_size));
1131 } else {
1132 wmi_evt_call_handler(wil, id, evt_data,
1133 len - sizeof(*wmi));
1134 }
1135 wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
1136 complete(&wil->wmi_ready);
1137 return;
1138 }
1139 /* unsolicited event */
1140 /* search for handler */
1141 if (!wmi_evt_call_handler(wil, id, evt_data,
1142 len - sizeof(*wmi))) {
1143 wil_err(wil, "Unhandled event 0x%04x\n", id);
1144 }
1145 } else {
1146 wil_err(wil, "Unknown event type\n");
1147 print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
1148 hdr, sizeof(*hdr) + len, true);
1149 }
1150 }
1151
1152 /*
1153 * Retrieve next WMI event from the pending list
1154 */
1155 static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
1156 {
1157 ulong flags;
1158 struct list_head *ret = NULL;
1159
1160 spin_lock_irqsave(&wil->wmi_ev_lock, flags);
1161
1162 if (!list_empty(&wil->pending_wmi_ev)) {
1163 ret = wil->pending_wmi_ev.next;
1164 list_del(ret);
1165 }
1166
1167 spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
1168
1169 return ret;
1170 }
1171
1172 /*
1173 * Handler for the WMI events
1174 */
1175 void wmi_event_worker(struct work_struct *work)
1176 {
1177 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
1178 wmi_event_worker);
1179 struct pending_wmi_event *evt;
1180 struct list_head *lh;
1181
1182 while ((lh = next_wmi_ev(wil)) != NULL) {
1183 evt = list_entry(lh, struct pending_wmi_event, list);
1184 wmi_event_handle(wil, &evt->event.hdr);
1185 kfree(evt);
1186 }
1187 }
This page took 0.057022 seconds and 5 git commands to generate.