ath10k: implement p2p bcn ie command
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / wmi-ops.h
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #ifndef _WMI_OPS_H_
19 #define _WMI_OPS_H_
20
21 struct ath10k;
22 struct sk_buff;
23
24 struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
48
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 u16 rd5g, u16 ctl2g, u16 ctl5g,
53 enum wmi_dfs_region dfs_reg);
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
55 u32 value);
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 const struct wmi_start_scan_arg *arg);
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 const struct wmi_stop_scan_arg *arg);
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 enum wmi_vdev_type type,
63 enum wmi_vdev_subtype subtype,
64 const u8 macaddr[ETH_ALEN]);
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 const struct wmi_vdev_start_request_arg *arg,
68 bool restart);
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
71 const u8 *bssid);
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 u32 param_id, u32 param_value);
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 const struct wmi_vdev_install_key_arg *arg);
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 const struct wmi_vdev_spectral_conf_arg *arg);
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 u32 trigger, u32 enable);
81 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
82 const u8 peer_addr[ETH_ALEN]);
83 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
84 const u8 peer_addr[ETH_ALEN]);
85 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
86 const u8 peer_addr[ETH_ALEN],
87 u32 tid_bitmap);
88 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
89 const u8 *peer_addr,
90 enum wmi_peer_param param_id,
91 u32 param_value);
92 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
93 const struct wmi_peer_assoc_complete_arg *arg);
94 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
95 enum wmi_sta_ps_mode psmode);
96 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_sta_powersave_param param_id,
98 u32 value);
99 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
100 const u8 *mac,
101 enum wmi_ap_ps_peer_param param_id,
102 u32 value);
103 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
104 const struct wmi_scan_chan_list_arg *arg);
105 struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif);
106 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
107 const struct wmi_pdev_set_wmm_params_arg *arg);
108 struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
109 enum wmi_stats_id stats_id);
110 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
111 enum wmi_force_fw_hang_type type,
112 u32 delay_ms);
113 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
114 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable);
115 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
116 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
117 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
118 u32 period, u32 duration,
119 u32 next_offset,
120 u32 enabled);
121 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
122 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
123 const u8 *mac);
124 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
125 const u8 *mac, u32 tid, u32 buf_size);
126 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
127 const u8 *mac, u32 tid,
128 u32 status);
129 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
130 const u8 *mac, u32 tid, u32 initiator,
131 u32 reason);
132 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
133 u32 tim_ie_offset, struct sk_buff *bcn,
134 u32 prb_caps, u32 prb_erp,
135 void *prb_ies, size_t prb_ies_len);
136 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
137 struct sk_buff *bcn);
138 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
139 const u8 *p2p_ie);
140 };
141
142 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
143
144 static inline int
145 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
146 {
147 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
148 return -EOPNOTSUPP;
149
150 ar->wmi.ops->rx(ar, skb);
151 return 0;
152 }
153
154 static inline int
155 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
156 size_t len)
157 {
158 if (!ar->wmi.ops->map_svc)
159 return -EOPNOTSUPP;
160
161 ar->wmi.ops->map_svc(in, out, len);
162 return 0;
163 }
164
165 static inline int
166 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
167 struct wmi_scan_ev_arg *arg)
168 {
169 if (!ar->wmi.ops->pull_scan)
170 return -EOPNOTSUPP;
171
172 return ar->wmi.ops->pull_scan(ar, skb, arg);
173 }
174
175 static inline int
176 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
177 struct wmi_mgmt_rx_ev_arg *arg)
178 {
179 if (!ar->wmi.ops->pull_mgmt_rx)
180 return -EOPNOTSUPP;
181
182 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
183 }
184
185 static inline int
186 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
187 struct wmi_ch_info_ev_arg *arg)
188 {
189 if (!ar->wmi.ops->pull_ch_info)
190 return -EOPNOTSUPP;
191
192 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
193 }
194
195 static inline int
196 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
197 struct wmi_vdev_start_ev_arg *arg)
198 {
199 if (!ar->wmi.ops->pull_vdev_start)
200 return -EOPNOTSUPP;
201
202 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
203 }
204
205 static inline int
206 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
207 struct wmi_peer_kick_ev_arg *arg)
208 {
209 if (!ar->wmi.ops->pull_peer_kick)
210 return -EOPNOTSUPP;
211
212 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
213 }
214
215 static inline int
216 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
217 struct wmi_swba_ev_arg *arg)
218 {
219 if (!ar->wmi.ops->pull_swba)
220 return -EOPNOTSUPP;
221
222 return ar->wmi.ops->pull_swba(ar, skb, arg);
223 }
224
225 static inline int
226 ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
227 struct wmi_phyerr_ev_arg *arg)
228 {
229 if (!ar->wmi.ops->pull_phyerr)
230 return -EOPNOTSUPP;
231
232 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
233 }
234
235 static inline int
236 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
237 struct wmi_svc_rdy_ev_arg *arg)
238 {
239 if (!ar->wmi.ops->pull_svc_rdy)
240 return -EOPNOTSUPP;
241
242 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
243 }
244
245 static inline int
246 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
247 struct wmi_rdy_ev_arg *arg)
248 {
249 if (!ar->wmi.ops->pull_rdy)
250 return -EOPNOTSUPP;
251
252 return ar->wmi.ops->pull_rdy(ar, skb, arg);
253 }
254
255 static inline int
256 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
257 struct ath10k_fw_stats *stats)
258 {
259 if (!ar->wmi.ops->pull_fw_stats)
260 return -EOPNOTSUPP;
261
262 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
263 }
264
265 static inline int
266 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
267 {
268 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
269 struct sk_buff *skb;
270 int ret;
271
272 if (!ar->wmi.ops->gen_mgmt_tx)
273 return -EOPNOTSUPP;
274
275 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
276 if (IS_ERR(skb))
277 return PTR_ERR(skb);
278
279 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
280 if (ret)
281 return ret;
282
283 /* FIXME There's no ACK event for Management Tx. This probably
284 * shouldn't be called here either. */
285 info->flags |= IEEE80211_TX_STAT_ACK;
286 ieee80211_tx_status_irqsafe(ar->hw, msdu);
287
288 return 0;
289 }
290
291 static inline int
292 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
293 u16 ctl2g, u16 ctl5g,
294 enum wmi_dfs_region dfs_reg)
295 {
296 struct sk_buff *skb;
297
298 if (!ar->wmi.ops->gen_pdev_set_rd)
299 return -EOPNOTSUPP;
300
301 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
302 dfs_reg);
303 if (IS_ERR(skb))
304 return PTR_ERR(skb);
305
306 return ath10k_wmi_cmd_send(ar, skb,
307 ar->wmi.cmd->pdev_set_regdomain_cmdid);
308 }
309
310 static inline int
311 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
312 {
313 struct sk_buff *skb;
314
315 if (!ar->wmi.ops->gen_pdev_suspend)
316 return -EOPNOTSUPP;
317
318 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
319 if (IS_ERR(skb))
320 return PTR_ERR(skb);
321
322 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
323 }
324
325 static inline int
326 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
327 {
328 struct sk_buff *skb;
329
330 if (!ar->wmi.ops->gen_pdev_resume)
331 return -EOPNOTSUPP;
332
333 skb = ar->wmi.ops->gen_pdev_resume(ar);
334 if (IS_ERR(skb))
335 return PTR_ERR(skb);
336
337 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
338 }
339
340 static inline int
341 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
342 {
343 struct sk_buff *skb;
344
345 if (!ar->wmi.ops->gen_pdev_set_param)
346 return -EOPNOTSUPP;
347
348 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
349 if (IS_ERR(skb))
350 return PTR_ERR(skb);
351
352 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
353 }
354
355 static inline int
356 ath10k_wmi_cmd_init(struct ath10k *ar)
357 {
358 struct sk_buff *skb;
359
360 if (!ar->wmi.ops->gen_init)
361 return -EOPNOTSUPP;
362
363 skb = ar->wmi.ops->gen_init(ar);
364 if (IS_ERR(skb))
365 return PTR_ERR(skb);
366
367 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
368 }
369
370 static inline int
371 ath10k_wmi_start_scan(struct ath10k *ar,
372 const struct wmi_start_scan_arg *arg)
373 {
374 struct sk_buff *skb;
375
376 if (!ar->wmi.ops->gen_start_scan)
377 return -EOPNOTSUPP;
378
379 skb = ar->wmi.ops->gen_start_scan(ar, arg);
380 if (IS_ERR(skb))
381 return PTR_ERR(skb);
382
383 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
384 }
385
386 static inline int
387 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
388 {
389 struct sk_buff *skb;
390
391 if (!ar->wmi.ops->gen_stop_scan)
392 return -EOPNOTSUPP;
393
394 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
395 if (IS_ERR(skb))
396 return PTR_ERR(skb);
397
398 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
399 }
400
401 static inline int
402 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
403 enum wmi_vdev_type type,
404 enum wmi_vdev_subtype subtype,
405 const u8 macaddr[ETH_ALEN])
406 {
407 struct sk_buff *skb;
408
409 if (!ar->wmi.ops->gen_vdev_create)
410 return -EOPNOTSUPP;
411
412 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
413 if (IS_ERR(skb))
414 return PTR_ERR(skb);
415
416 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
417 }
418
419 static inline int
420 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
421 {
422 struct sk_buff *skb;
423
424 if (!ar->wmi.ops->gen_vdev_delete)
425 return -EOPNOTSUPP;
426
427 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
428 if (IS_ERR(skb))
429 return PTR_ERR(skb);
430
431 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
432 }
433
434 static inline int
435 ath10k_wmi_vdev_start(struct ath10k *ar,
436 const struct wmi_vdev_start_request_arg *arg)
437 {
438 struct sk_buff *skb;
439
440 if (!ar->wmi.ops->gen_vdev_start)
441 return -EOPNOTSUPP;
442
443 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
444 if (IS_ERR(skb))
445 return PTR_ERR(skb);
446
447 return ath10k_wmi_cmd_send(ar, skb,
448 ar->wmi.cmd->vdev_start_request_cmdid);
449 }
450
451 static inline int
452 ath10k_wmi_vdev_restart(struct ath10k *ar,
453 const struct wmi_vdev_start_request_arg *arg)
454 {
455 struct sk_buff *skb;
456
457 if (!ar->wmi.ops->gen_vdev_start)
458 return -EOPNOTSUPP;
459
460 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
461 if (IS_ERR(skb))
462 return PTR_ERR(skb);
463
464 return ath10k_wmi_cmd_send(ar, skb,
465 ar->wmi.cmd->vdev_restart_request_cmdid);
466 }
467
468 static inline int
469 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
470 {
471 struct sk_buff *skb;
472
473 if (!ar->wmi.ops->gen_vdev_stop)
474 return -EOPNOTSUPP;
475
476 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
477 if (IS_ERR(skb))
478 return PTR_ERR(skb);
479
480 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
481 }
482
483 static inline int
484 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
485 {
486 struct sk_buff *skb;
487
488 if (!ar->wmi.ops->gen_vdev_up)
489 return -EOPNOTSUPP;
490
491 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
492 if (IS_ERR(skb))
493 return PTR_ERR(skb);
494
495 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
496 }
497
498 static inline int
499 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
500 {
501 struct sk_buff *skb;
502
503 if (!ar->wmi.ops->gen_vdev_down)
504 return -EOPNOTSUPP;
505
506 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
507 if (IS_ERR(skb))
508 return PTR_ERR(skb);
509
510 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
511 }
512
513 static inline int
514 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
515 u32 param_value)
516 {
517 struct sk_buff *skb;
518
519 if (!ar->wmi.ops->gen_vdev_set_param)
520 return -EOPNOTSUPP;
521
522 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
523 param_value);
524 if (IS_ERR(skb))
525 return PTR_ERR(skb);
526
527 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
528 }
529
530 static inline int
531 ath10k_wmi_vdev_install_key(struct ath10k *ar,
532 const struct wmi_vdev_install_key_arg *arg)
533 {
534 struct sk_buff *skb;
535
536 if (!ar->wmi.ops->gen_vdev_install_key)
537 return -EOPNOTSUPP;
538
539 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
540 if (IS_ERR(skb))
541 return PTR_ERR(skb);
542
543 return ath10k_wmi_cmd_send(ar, skb,
544 ar->wmi.cmd->vdev_install_key_cmdid);
545 }
546
547 static inline int
548 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
549 const struct wmi_vdev_spectral_conf_arg *arg)
550 {
551 struct sk_buff *skb;
552 u32 cmd_id;
553
554 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
555 if (IS_ERR(skb))
556 return PTR_ERR(skb);
557
558 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
559 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
560 }
561
562 static inline int
563 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
564 u32 enable)
565 {
566 struct sk_buff *skb;
567 u32 cmd_id;
568
569 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
570 enable);
571 if (IS_ERR(skb))
572 return PTR_ERR(skb);
573
574 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
575 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
576 }
577
578 static inline int
579 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
580 const u8 peer_addr[ETH_ALEN])
581 {
582 struct sk_buff *skb;
583
584 if (!ar->wmi.ops->gen_peer_create)
585 return -EOPNOTSUPP;
586
587 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
588 if (IS_ERR(skb))
589 return PTR_ERR(skb);
590
591 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
592 }
593
594 static inline int
595 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
596 const u8 peer_addr[ETH_ALEN])
597 {
598 struct sk_buff *skb;
599
600 if (!ar->wmi.ops->gen_peer_delete)
601 return -EOPNOTSUPP;
602
603 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
604 if (IS_ERR(skb))
605 return PTR_ERR(skb);
606
607 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
608 }
609
610 static inline int
611 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
612 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
613 {
614 struct sk_buff *skb;
615
616 if (!ar->wmi.ops->gen_peer_flush)
617 return -EOPNOTSUPP;
618
619 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
620 if (IS_ERR(skb))
621 return PTR_ERR(skb);
622
623 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
624 }
625
626 static inline int
627 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
628 enum wmi_peer_param param_id, u32 param_value)
629 {
630 struct sk_buff *skb;
631
632 if (!ar->wmi.ops->gen_peer_set_param)
633 return -EOPNOTSUPP;
634
635 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
636 param_value);
637 if (IS_ERR(skb))
638 return PTR_ERR(skb);
639
640 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
641 }
642
643 static inline int
644 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
645 enum wmi_sta_ps_mode psmode)
646 {
647 struct sk_buff *skb;
648
649 if (!ar->wmi.ops->gen_set_psmode)
650 return -EOPNOTSUPP;
651
652 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
653 if (IS_ERR(skb))
654 return PTR_ERR(skb);
655
656 return ath10k_wmi_cmd_send(ar, skb,
657 ar->wmi.cmd->sta_powersave_mode_cmdid);
658 }
659
660 static inline int
661 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
662 enum wmi_sta_powersave_param param_id, u32 value)
663 {
664 struct sk_buff *skb;
665
666 if (!ar->wmi.ops->gen_set_sta_ps)
667 return -EOPNOTSUPP;
668
669 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
670 if (IS_ERR(skb))
671 return PTR_ERR(skb);
672
673 return ath10k_wmi_cmd_send(ar, skb,
674 ar->wmi.cmd->sta_powersave_param_cmdid);
675 }
676
677 static inline int
678 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
679 enum wmi_ap_ps_peer_param param_id, u32 value)
680 {
681 struct sk_buff *skb;
682
683 if (!ar->wmi.ops->gen_set_ap_ps)
684 return -EOPNOTSUPP;
685
686 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
687 if (IS_ERR(skb))
688 return PTR_ERR(skb);
689
690 return ath10k_wmi_cmd_send(ar, skb,
691 ar->wmi.cmd->ap_ps_peer_param_cmdid);
692 }
693
694 static inline int
695 ath10k_wmi_scan_chan_list(struct ath10k *ar,
696 const struct wmi_scan_chan_list_arg *arg)
697 {
698 struct sk_buff *skb;
699
700 if (!ar->wmi.ops->gen_scan_chan_list)
701 return -EOPNOTSUPP;
702
703 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
704 if (IS_ERR(skb))
705 return PTR_ERR(skb);
706
707 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
708 }
709
710 static inline int
711 ath10k_wmi_peer_assoc(struct ath10k *ar,
712 const struct wmi_peer_assoc_complete_arg *arg)
713 {
714 struct sk_buff *skb;
715
716 if (!ar->wmi.ops->gen_peer_assoc)
717 return -EOPNOTSUPP;
718
719 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
720 if (IS_ERR(skb))
721 return PTR_ERR(skb);
722
723 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
724 }
725
726 static inline int
727 ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
728 {
729 struct ath10k *ar = arvif->ar;
730 struct sk_buff *skb;
731 int ret;
732
733 if (!ar->wmi.ops->gen_beacon_dma)
734 return -EOPNOTSUPP;
735
736 skb = ar->wmi.ops->gen_beacon_dma(arvif);
737 if (IS_ERR(skb))
738 return PTR_ERR(skb);
739
740 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
741 ar->wmi.cmd->pdev_send_bcn_cmdid);
742 if (ret) {
743 dev_kfree_skb(skb);
744 return ret;
745 }
746
747 return 0;
748 }
749
750 static inline int
751 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
752 const struct wmi_pdev_set_wmm_params_arg *arg)
753 {
754 struct sk_buff *skb;
755
756 if (!ar->wmi.ops->gen_pdev_set_wmm)
757 return -EOPNOTSUPP;
758
759 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
760 if (IS_ERR(skb))
761 return PTR_ERR(skb);
762
763 return ath10k_wmi_cmd_send(ar, skb,
764 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
765 }
766
767 static inline int
768 ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
769 {
770 struct sk_buff *skb;
771
772 if (!ar->wmi.ops->gen_request_stats)
773 return -EOPNOTSUPP;
774
775 skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
776 if (IS_ERR(skb))
777 return PTR_ERR(skb);
778
779 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
780 }
781
782 static inline int
783 ath10k_wmi_force_fw_hang(struct ath10k *ar,
784 enum wmi_force_fw_hang_type type, u32 delay_ms)
785 {
786 struct sk_buff *skb;
787
788 if (!ar->wmi.ops->gen_force_fw_hang)
789 return -EOPNOTSUPP;
790
791 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
792 if (IS_ERR(skb))
793 return PTR_ERR(skb);
794
795 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
796 }
797
798 static inline int
799 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
800 {
801 struct sk_buff *skb;
802
803 if (!ar->wmi.ops->gen_dbglog_cfg)
804 return -EOPNOTSUPP;
805
806 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable);
807 if (IS_ERR(skb))
808 return PTR_ERR(skb);
809
810 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
811 }
812
813 static inline int
814 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
815 {
816 struct sk_buff *skb;
817
818 if (!ar->wmi.ops->gen_pktlog_enable)
819 return -EOPNOTSUPP;
820
821 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
822 if (IS_ERR(skb))
823 return PTR_ERR(skb);
824
825 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
826 }
827
828 static inline int
829 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
830 {
831 struct sk_buff *skb;
832
833 if (!ar->wmi.ops->gen_pktlog_disable)
834 return -EOPNOTSUPP;
835
836 skb = ar->wmi.ops->gen_pktlog_disable(ar);
837 if (IS_ERR(skb))
838 return PTR_ERR(skb);
839
840 return ath10k_wmi_cmd_send(ar, skb,
841 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
842 }
843
844 static inline int
845 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
846 u32 next_offset, u32 enabled)
847 {
848 struct sk_buff *skb;
849
850 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
851 return -EOPNOTSUPP;
852
853 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
854 next_offset, enabled);
855 if (IS_ERR(skb))
856 return PTR_ERR(skb);
857
858 return ath10k_wmi_cmd_send(ar, skb,
859 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
860 }
861
862 static inline int
863 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
864 {
865 struct sk_buff *skb;
866
867 if (!ar->wmi.ops->gen_pdev_get_temperature)
868 return -EOPNOTSUPP;
869
870 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
871 if (IS_ERR(skb))
872 return PTR_ERR(skb);
873
874 return ath10k_wmi_cmd_send(ar, skb,
875 ar->wmi.cmd->pdev_get_temperature_cmdid);
876 }
877
878 static inline int
879 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
880 {
881 struct sk_buff *skb;
882
883 if (!ar->wmi.ops->gen_addba_clear_resp)
884 return -EOPNOTSUPP;
885
886 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
887 if (IS_ERR(skb))
888 return PTR_ERR(skb);
889
890 return ath10k_wmi_cmd_send(ar, skb,
891 ar->wmi.cmd->addba_clear_resp_cmdid);
892 }
893
894 static inline int
895 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
896 u32 tid, u32 buf_size)
897 {
898 struct sk_buff *skb;
899
900 if (!ar->wmi.ops->gen_addba_send)
901 return -EOPNOTSUPP;
902
903 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
904 if (IS_ERR(skb))
905 return PTR_ERR(skb);
906
907 return ath10k_wmi_cmd_send(ar, skb,
908 ar->wmi.cmd->addba_send_cmdid);
909 }
910
911 static inline int
912 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
913 u32 tid, u32 status)
914 {
915 struct sk_buff *skb;
916
917 if (!ar->wmi.ops->gen_addba_set_resp)
918 return -EOPNOTSUPP;
919
920 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
921 if (IS_ERR(skb))
922 return PTR_ERR(skb);
923
924 return ath10k_wmi_cmd_send(ar, skb,
925 ar->wmi.cmd->addba_set_resp_cmdid);
926 }
927
928 static inline int
929 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
930 u32 tid, u32 initiator, u32 reason)
931 {
932 struct sk_buff *skb;
933
934 if (!ar->wmi.ops->gen_delba_send)
935 return -EOPNOTSUPP;
936
937 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
938 reason);
939 if (IS_ERR(skb))
940 return PTR_ERR(skb);
941
942 return ath10k_wmi_cmd_send(ar, skb,
943 ar->wmi.cmd->delba_send_cmdid);
944 }
945
946 static inline int
947 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
948 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
949 void *prb_ies, size_t prb_ies_len)
950 {
951 struct sk_buff *skb;
952
953 if (!ar->wmi.ops->gen_bcn_tmpl)
954 return -EOPNOTSUPP;
955
956 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
957 prb_caps, prb_erp, prb_ies,
958 prb_ies_len);
959 if (IS_ERR(skb))
960 return PTR_ERR(skb);
961
962 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
963 }
964
965 static inline int
966 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
967 {
968 struct sk_buff *skb;
969
970 if (!ar->wmi.ops->gen_prb_tmpl)
971 return -EOPNOTSUPP;
972
973 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
974 if (IS_ERR(skb))
975 return PTR_ERR(skb);
976
977 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
978 }
979
980 static inline int
981 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
982 {
983 struct sk_buff *skb;
984
985 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
986 return -EOPNOTSUPP;
987
988 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
989 if (IS_ERR(skb))
990 return PTR_ERR(skb);
991
992 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
993 }
994
995 #endif
This page took 0.074433 seconds and 5 git commands to generate.