Merge remote-tracking branch 'omap_dss2/for-next'
[deliverable/linux.git] / sound / soc / intel / skylake / skl-topology.c
1 /*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/firmware.h>
22 #include <sound/soc.h>
23 #include <sound/soc-topology.h>
24 #include <uapi/sound/snd_sst_tokens.h>
25 #include "skl-sst-dsp.h"
26 #include "skl-sst-ipc.h"
27 #include "skl-topology.h"
28 #include "skl.h"
29 #include "skl-tplg-interface.h"
30 #include "../common/sst-dsp.h"
31 #include "../common/sst-dsp-priv.h"
32
33 #define SKL_CH_FIXUP_MASK (1 << 0)
34 #define SKL_RATE_FIXUP_MASK (1 << 1)
35 #define SKL_FMT_FIXUP_MASK (1 << 2)
36 #define SKL_IN_DIR_BIT_MASK BIT(0)
37 #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
38
39 /*
40 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
41 * ignore. This helpers checks if the SKL driver handles this widget type
42 */
43 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
44 {
45 switch (w->id) {
46 case snd_soc_dapm_dai_link:
47 case snd_soc_dapm_dai_in:
48 case snd_soc_dapm_aif_in:
49 case snd_soc_dapm_aif_out:
50 case snd_soc_dapm_dai_out:
51 case snd_soc_dapm_switch:
52 return false;
53 default:
54 return true;
55 }
56 }
57
58 /*
59 * Each pipelines needs memory to be allocated. Check if we have free memory
60 * from available pool.
61 */
62 static bool skl_is_pipe_mem_avail(struct skl *skl,
63 struct skl_module_cfg *mconfig)
64 {
65 struct skl_sst *ctx = skl->skl_sst;
66
67 if (skl->resource.mem + mconfig->pipe->memory_pages >
68 skl->resource.max_mem) {
69 dev_err(ctx->dev,
70 "%s: module_id %d instance %d\n", __func__,
71 mconfig->id.module_id,
72 mconfig->id.instance_id);
73 dev_err(ctx->dev,
74 "exceeds ppl memory available %d mem %d\n",
75 skl->resource.max_mem, skl->resource.mem);
76 return false;
77 } else {
78 return true;
79 }
80 }
81
82 /*
83 * Add the mem to the mem pool. This is freed when pipe is deleted.
84 * Note: DSP does actual memory management we only keep track for complete
85 * pool
86 */
87 static void skl_tplg_alloc_pipe_mem(struct skl *skl,
88 struct skl_module_cfg *mconfig)
89 {
90 skl->resource.mem += mconfig->pipe->memory_pages;
91 }
92
93 /*
94 * Pipeline needs needs DSP CPU resources for computation, this is
95 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
96 *
97 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
98 * pipe.
99 */
100
101 static bool skl_is_pipe_mcps_avail(struct skl *skl,
102 struct skl_module_cfg *mconfig)
103 {
104 struct skl_sst *ctx = skl->skl_sst;
105
106 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id, mconfig->id.instance_id);
110 dev_err(ctx->dev,
111 "exceeds ppl mcps available %d > mem %d\n",
112 skl->resource.max_mcps, skl->resource.mcps);
113 return false;
114 } else {
115 return true;
116 }
117 }
118
119 static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
120 struct skl_module_cfg *mconfig)
121 {
122 skl->resource.mcps += mconfig->mcps;
123 }
124
125 /*
126 * Free the mcps when tearing down
127 */
128 static void
129 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
130 {
131 skl->resource.mcps -= mconfig->mcps;
132 }
133
134 /*
135 * Free the memory when tearing down
136 */
137 static void
138 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
139 {
140 skl->resource.mem -= mconfig->pipe->memory_pages;
141 }
142
143
144 static void skl_dump_mconfig(struct skl_sst *ctx,
145 struct skl_module_cfg *mcfg)
146 {
147 dev_dbg(ctx->dev, "Dumping config\n");
148 dev_dbg(ctx->dev, "Input Format:\n");
149 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
150 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
151 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
152 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
153 dev_dbg(ctx->dev, "Output Format:\n");
154 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
155 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
156 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
157 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
158 }
159
160 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
161 {
162 int slot_map = 0xFFFFFFFF;
163 int start_slot = 0;
164 int i;
165
166 for (i = 0; i < chs; i++) {
167 /*
168 * For 2 channels with starting slot as 0, slot map will
169 * look like 0xFFFFFF10.
170 */
171 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
172 start_slot++;
173 }
174 fmt->ch_map = slot_map;
175 }
176
177 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
178 struct skl_pipe_params *params, int fixup)
179 {
180 if (fixup & SKL_RATE_FIXUP_MASK)
181 fmt->s_freq = params->s_freq;
182 if (fixup & SKL_CH_FIXUP_MASK) {
183 fmt->channels = params->ch;
184 skl_tplg_update_chmap(fmt, fmt->channels);
185 }
186 if (fixup & SKL_FMT_FIXUP_MASK) {
187 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
188
189 /*
190 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
191 * container so update bit depth accordingly
192 */
193 switch (fmt->valid_bit_depth) {
194 case SKL_DEPTH_16BIT:
195 fmt->bit_depth = fmt->valid_bit_depth;
196 break;
197
198 default:
199 fmt->bit_depth = SKL_DEPTH_32BIT;
200 break;
201 }
202 }
203
204 }
205
206 /*
207 * A pipeline may have modules which impact the pcm parameters, like SRC,
208 * channel converter, format converter.
209 * We need to calculate the output params by applying the 'fixup'
210 * Topology will tell driver which type of fixup is to be applied by
211 * supplying the fixup mask, so based on that we calculate the output
212 *
213 * Now In FE the pcm hw_params is source/target format. Same is applicable
214 * for BE with its hw_params invoked.
215 * here based on FE, BE pipeline and direction we calculate the input and
216 * outfix and then apply that for a module
217 */
218 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
219 struct skl_pipe_params *params, bool is_fe)
220 {
221 int in_fixup, out_fixup;
222 struct skl_module_fmt *in_fmt, *out_fmt;
223
224 /* Fixups will be applied to pin 0 only */
225 in_fmt = &m_cfg->in_fmt[0];
226 out_fmt = &m_cfg->out_fmt[0];
227
228 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
229 if (is_fe) {
230 in_fixup = m_cfg->params_fixup;
231 out_fixup = (~m_cfg->converter) &
232 m_cfg->params_fixup;
233 } else {
234 out_fixup = m_cfg->params_fixup;
235 in_fixup = (~m_cfg->converter) &
236 m_cfg->params_fixup;
237 }
238 } else {
239 if (is_fe) {
240 out_fixup = m_cfg->params_fixup;
241 in_fixup = (~m_cfg->converter) &
242 m_cfg->params_fixup;
243 } else {
244 in_fixup = m_cfg->params_fixup;
245 out_fixup = (~m_cfg->converter) &
246 m_cfg->params_fixup;
247 }
248 }
249
250 skl_tplg_update_params(in_fmt, params, in_fixup);
251 skl_tplg_update_params(out_fmt, params, out_fixup);
252 }
253
254 /*
255 * A module needs input and output buffers, which are dependent upon pcm
256 * params, so once we have calculate params, we need buffer calculation as
257 * well.
258 */
259 static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
260 struct skl_module_cfg *mcfg)
261 {
262 int multiplier = 1;
263 struct skl_module_fmt *in_fmt, *out_fmt;
264 int in_rate, out_rate;
265
266
267 /* Since fixups is applied to pin 0 only, ibs, obs needs
268 * change for pin 0 only
269 */
270 in_fmt = &mcfg->in_fmt[0];
271 out_fmt = &mcfg->out_fmt[0];
272
273 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
274 multiplier = 5;
275
276 if (in_fmt->s_freq % 1000)
277 in_rate = (in_fmt->s_freq / 1000) + 1;
278 else
279 in_rate = (in_fmt->s_freq / 1000);
280
281 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
282 (mcfg->in_fmt->bit_depth >> 3) *
283 multiplier;
284
285 if (mcfg->out_fmt->s_freq % 1000)
286 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
287 else
288 out_rate = (mcfg->out_fmt->s_freq / 1000);
289
290 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
291 (mcfg->out_fmt->bit_depth >> 3) *
292 multiplier;
293 }
294
295 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
296 struct skl_sst *ctx)
297 {
298 struct skl_module_cfg *m_cfg = w->priv;
299 int link_type, dir;
300 u32 ch, s_freq, s_fmt;
301 struct nhlt_specific_cfg *cfg;
302 struct skl *skl = get_skl_ctx(ctx->dev);
303
304 /* check if we already have blob */
305 if (m_cfg->formats_config.caps_size > 0)
306 return 0;
307
308 dev_dbg(ctx->dev, "Applying default cfg blob\n");
309 switch (m_cfg->dev_type) {
310 case SKL_DEVICE_DMIC:
311 link_type = NHLT_LINK_DMIC;
312 dir = SNDRV_PCM_STREAM_CAPTURE;
313 s_freq = m_cfg->in_fmt[0].s_freq;
314 s_fmt = m_cfg->in_fmt[0].bit_depth;
315 ch = m_cfg->in_fmt[0].channels;
316 break;
317
318 case SKL_DEVICE_I2S:
319 link_type = NHLT_LINK_SSP;
320 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
321 dir = SNDRV_PCM_STREAM_PLAYBACK;
322 s_freq = m_cfg->out_fmt[0].s_freq;
323 s_fmt = m_cfg->out_fmt[0].bit_depth;
324 ch = m_cfg->out_fmt[0].channels;
325 } else {
326 dir = SNDRV_PCM_STREAM_CAPTURE;
327 s_freq = m_cfg->in_fmt[0].s_freq;
328 s_fmt = m_cfg->in_fmt[0].bit_depth;
329 ch = m_cfg->in_fmt[0].channels;
330 }
331 break;
332
333 default:
334 return -EINVAL;
335 }
336
337 /* update the blob based on virtual bus_id and default params */
338 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
339 s_fmt, ch, s_freq, dir);
340 if (cfg) {
341 m_cfg->formats_config.caps_size = cfg->size;
342 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
343 } else {
344 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
345 m_cfg->vbus_id, link_type, dir);
346 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
347 ch, s_freq, s_fmt);
348 return -EIO;
349 }
350
351 return 0;
352 }
353
354 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
355 struct skl_sst *ctx)
356 {
357 struct skl_module_cfg *m_cfg = w->priv;
358 struct skl_pipe_params *params = m_cfg->pipe->p_params;
359 int p_conn_type = m_cfg->pipe->conn_type;
360 bool is_fe;
361
362 if (!m_cfg->params_fixup)
363 return;
364
365 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
366 w->name);
367
368 skl_dump_mconfig(ctx, m_cfg);
369
370 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
371 is_fe = true;
372 else
373 is_fe = false;
374
375 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
376 skl_tplg_update_buffer_size(ctx, m_cfg);
377
378 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
379 w->name);
380
381 skl_dump_mconfig(ctx, m_cfg);
382 }
383
384 /*
385 * some modules can have multiple params set from user control and
386 * need to be set after module is initialized. If set_param flag is
387 * set module params will be done after module is initialised.
388 */
389 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
390 struct skl_sst *ctx)
391 {
392 int i, ret;
393 struct skl_module_cfg *mconfig = w->priv;
394 const struct snd_kcontrol_new *k;
395 struct soc_bytes_ext *sb;
396 struct skl_algo_data *bc;
397 struct skl_specific_cfg *sp_cfg;
398
399 if (mconfig->formats_config.caps_size > 0 &&
400 mconfig->formats_config.set_params == SKL_PARAM_SET) {
401 sp_cfg = &mconfig->formats_config;
402 ret = skl_set_module_params(ctx, sp_cfg->caps,
403 sp_cfg->caps_size,
404 sp_cfg->param_id, mconfig);
405 if (ret < 0)
406 return ret;
407 }
408
409 for (i = 0; i < w->num_kcontrols; i++) {
410 k = &w->kcontrol_news[i];
411 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
412 sb = (void *) k->private_value;
413 bc = (struct skl_algo_data *)sb->dobj.private;
414
415 if (bc->set_params == SKL_PARAM_SET) {
416 ret = skl_set_module_params(ctx,
417 (u32 *)bc->params, bc->size,
418 bc->param_id, mconfig);
419 if (ret < 0)
420 return ret;
421 }
422 }
423 }
424
425 return 0;
426 }
427
428 /*
429 * some module param can set from user control and this is required as
430 * when module is initailzed. if module param is required in init it is
431 * identifed by set_param flag. if set_param flag is not set, then this
432 * parameter needs to set as part of module init.
433 */
434 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
435 {
436 const struct snd_kcontrol_new *k;
437 struct soc_bytes_ext *sb;
438 struct skl_algo_data *bc;
439 struct skl_module_cfg *mconfig = w->priv;
440 int i;
441
442 for (i = 0; i < w->num_kcontrols; i++) {
443 k = &w->kcontrol_news[i];
444 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
445 sb = (struct soc_bytes_ext *)k->private_value;
446 bc = (struct skl_algo_data *)sb->dobj.private;
447
448 if (bc->set_params != SKL_PARAM_INIT)
449 continue;
450
451 mconfig->formats_config.caps = (u32 *)&bc->params;
452 mconfig->formats_config.caps_size = bc->size;
453
454 break;
455 }
456 }
457
458 return 0;
459 }
460
461 /*
462 * Inside a pipe instance, we can have various modules. These modules need
463 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
464 * skl_init_module() routine, so invoke that for all modules in a pipeline
465 */
466 static int
467 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
468 {
469 struct skl_pipe_module *w_module;
470 struct snd_soc_dapm_widget *w;
471 struct skl_module_cfg *mconfig;
472 struct skl_sst *ctx = skl->skl_sst;
473 int ret = 0;
474
475 list_for_each_entry(w_module, &pipe->w_list, node) {
476 w = w_module->w;
477 mconfig = w->priv;
478
479 /* check if module ids are populated */
480 if (mconfig->id.module_id < 0) {
481 dev_err(skl->skl_sst->dev,
482 "module %pUL id not populated\n",
483 (uuid_le *)mconfig->guid);
484 return -EIO;
485 }
486
487 /* check resource available */
488 if (!skl_is_pipe_mcps_avail(skl, mconfig))
489 return -ENOMEM;
490
491 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
492 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
493 mconfig->id.module_id, mconfig->guid);
494 if (ret < 0)
495 return ret;
496
497 mconfig->m_state = SKL_MODULE_LOADED;
498 }
499
500 /* update blob if blob is null for be with default value */
501 skl_tplg_update_be_blob(w, ctx);
502
503 /*
504 * apply fix/conversion to module params based on
505 * FE/BE params
506 */
507 skl_tplg_update_module_params(w, ctx);
508
509 skl_tplg_set_module_init_data(w);
510 ret = skl_init_module(ctx, mconfig);
511 if (ret < 0)
512 return ret;
513
514 skl_tplg_alloc_pipe_mcps(skl, mconfig);
515 ret = skl_tplg_set_module_params(w, ctx);
516 if (ret < 0)
517 return ret;
518 }
519
520 return 0;
521 }
522
523 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
524 struct skl_pipe *pipe)
525 {
526 int ret;
527 struct skl_pipe_module *w_module = NULL;
528 struct skl_module_cfg *mconfig = NULL;
529
530 list_for_each_entry(w_module, &pipe->w_list, node) {
531 mconfig = w_module->w->priv;
532
533 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
534 mconfig->m_state > SKL_MODULE_UNINIT) {
535 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
536 mconfig->id.module_id);
537 if (ret < 0)
538 return -EIO;
539 }
540 }
541
542 /* no modules to unload in this path, so return */
543 return 0;
544 }
545
546 /*
547 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
548 * need create the pipeline. So we do following:
549 * - check the resources
550 * - Create the pipeline
551 * - Initialize the modules in pipeline
552 * - finally bind all modules together
553 */
554 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
555 struct skl *skl)
556 {
557 int ret;
558 struct skl_module_cfg *mconfig = w->priv;
559 struct skl_pipe_module *w_module;
560 struct skl_pipe *s_pipe = mconfig->pipe;
561 struct skl_module_cfg *src_module = NULL, *dst_module;
562 struct skl_sst *ctx = skl->skl_sst;
563
564 /* check resource available */
565 if (!skl_is_pipe_mcps_avail(skl, mconfig))
566 return -EBUSY;
567
568 if (!skl_is_pipe_mem_avail(skl, mconfig))
569 return -ENOMEM;
570
571 /*
572 * Create a list of modules for pipe.
573 * This list contains modules from source to sink
574 */
575 ret = skl_create_pipeline(ctx, mconfig->pipe);
576 if (ret < 0)
577 return ret;
578
579 skl_tplg_alloc_pipe_mem(skl, mconfig);
580 skl_tplg_alloc_pipe_mcps(skl, mconfig);
581
582 /* Init all pipe modules from source to sink */
583 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
584 if (ret < 0)
585 return ret;
586
587 /* Bind modules from source to sink */
588 list_for_each_entry(w_module, &s_pipe->w_list, node) {
589 dst_module = w_module->w->priv;
590
591 if (src_module == NULL) {
592 src_module = dst_module;
593 continue;
594 }
595
596 ret = skl_bind_modules(ctx, src_module, dst_module);
597 if (ret < 0)
598 return ret;
599
600 src_module = dst_module;
601 }
602
603 return 0;
604 }
605
606 /*
607 * Some modules require params to be set after the module is bound to
608 * all pins connected.
609 *
610 * The module provider initializes set_param flag for such modules and we
611 * send params after binding
612 */
613 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
614 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
615 {
616 int i, ret;
617 struct skl_module_cfg *mconfig = w->priv;
618 const struct snd_kcontrol_new *k;
619 struct soc_bytes_ext *sb;
620 struct skl_algo_data *bc;
621 struct skl_specific_cfg *sp_cfg;
622
623 /*
624 * check all out/in pins are in bind state.
625 * if so set the module param
626 */
627 for (i = 0; i < mcfg->max_out_queue; i++) {
628 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
629 return 0;
630 }
631
632 for (i = 0; i < mcfg->max_in_queue; i++) {
633 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
634 return 0;
635 }
636
637 if (mconfig->formats_config.caps_size > 0 &&
638 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
639 sp_cfg = &mconfig->formats_config;
640 ret = skl_set_module_params(ctx, sp_cfg->caps,
641 sp_cfg->caps_size,
642 sp_cfg->param_id, mconfig);
643 if (ret < 0)
644 return ret;
645 }
646
647 for (i = 0; i < w->num_kcontrols; i++) {
648 k = &w->kcontrol_news[i];
649 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
650 sb = (void *) k->private_value;
651 bc = (struct skl_algo_data *)sb->dobj.private;
652
653 if (bc->set_params == SKL_PARAM_BIND) {
654 ret = skl_set_module_params(ctx,
655 (u32 *)bc->params, bc->max,
656 bc->param_id, mconfig);
657 if (ret < 0)
658 return ret;
659 }
660 }
661 }
662
663 return 0;
664 }
665
666 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
667 struct skl *skl,
668 struct snd_soc_dapm_widget *src_w,
669 struct skl_module_cfg *src_mconfig)
670 {
671 struct snd_soc_dapm_path *p;
672 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
673 struct skl_module_cfg *sink_mconfig;
674 struct skl_sst *ctx = skl->skl_sst;
675 int ret;
676
677 snd_soc_dapm_widget_for_each_sink_path(w, p) {
678 if (!p->connect)
679 continue;
680
681 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
682 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
683
684 next_sink = p->sink;
685
686 if (!is_skl_dsp_widget_type(p->sink))
687 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
688
689 /*
690 * here we will check widgets in sink pipelines, so that
691 * can be any widgets type and we are only interested if
692 * they are ones used for SKL so check that first
693 */
694 if ((p->sink->priv != NULL) &&
695 is_skl_dsp_widget_type(p->sink)) {
696
697 sink = p->sink;
698 sink_mconfig = sink->priv;
699
700 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
701 sink_mconfig->m_state == SKL_MODULE_UNINIT)
702 continue;
703
704 /* Bind source to sink, mixin is always source */
705 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
706 if (ret)
707 return ret;
708
709 /* set module params after bind */
710 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
711 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
712
713 /* Start sinks pipe first */
714 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
715 if (sink_mconfig->pipe->conn_type !=
716 SKL_PIPE_CONN_TYPE_FE)
717 ret = skl_run_pipe(ctx,
718 sink_mconfig->pipe);
719 if (ret)
720 return ret;
721 }
722 }
723 }
724
725 if (!sink)
726 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
727
728 return 0;
729 }
730
731 /*
732 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
733 * we need to do following:
734 * - Bind to sink pipeline
735 * Since the sink pipes can be running and we don't get mixer event on
736 * connect for already running mixer, we need to find the sink pipes
737 * here and bind to them. This way dynamic connect works.
738 * - Start sink pipeline, if not running
739 * - Then run current pipe
740 */
741 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
742 struct skl *skl)
743 {
744 struct skl_module_cfg *src_mconfig;
745 struct skl_sst *ctx = skl->skl_sst;
746 int ret = 0;
747
748 src_mconfig = w->priv;
749
750 /*
751 * find which sink it is connected to, bind with the sink,
752 * if sink is not started, start sink pipe first, then start
753 * this pipe
754 */
755 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
756 if (ret)
757 return ret;
758
759 /* Start source pipe last after starting all sinks */
760 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
761 return skl_run_pipe(ctx, src_mconfig->pipe);
762
763 return 0;
764 }
765
766 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
767 struct snd_soc_dapm_widget *w, struct skl *skl)
768 {
769 struct snd_soc_dapm_path *p;
770 struct snd_soc_dapm_widget *src_w = NULL;
771 struct skl_sst *ctx = skl->skl_sst;
772
773 snd_soc_dapm_widget_for_each_source_path(w, p) {
774 src_w = p->source;
775 if (!p->connect)
776 continue;
777
778 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
779 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
780
781 /*
782 * here we will check widgets in sink pipelines, so that can
783 * be any widgets type and we are only interested if they are
784 * ones used for SKL so check that first
785 */
786 if ((p->source->priv != NULL) &&
787 is_skl_dsp_widget_type(p->source)) {
788 return p->source;
789 }
790 }
791
792 if (src_w != NULL)
793 return skl_get_src_dsp_widget(src_w, skl);
794
795 return NULL;
796 }
797
798 /*
799 * in the Post-PMU event of mixer we need to do following:
800 * - Check if this pipe is running
801 * - if not, then
802 * - bind this pipeline to its source pipeline
803 * if source pipe is already running, this means it is a dynamic
804 * connection and we need to bind only to that pipe
805 * - start this pipeline
806 */
807 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
808 struct skl *skl)
809 {
810 int ret = 0;
811 struct snd_soc_dapm_widget *source, *sink;
812 struct skl_module_cfg *src_mconfig, *sink_mconfig;
813 struct skl_sst *ctx = skl->skl_sst;
814 int src_pipe_started = 0;
815
816 sink = w;
817 sink_mconfig = sink->priv;
818
819 /*
820 * If source pipe is already started, that means source is driving
821 * one more sink before this sink got connected, Since source is
822 * started, bind this sink to source and start this pipe.
823 */
824 source = skl_get_src_dsp_widget(w, skl);
825 if (source != NULL) {
826 src_mconfig = source->priv;
827 sink_mconfig = sink->priv;
828 src_pipe_started = 1;
829
830 /*
831 * check pipe state, then no need to bind or start the
832 * pipe
833 */
834 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
835 src_pipe_started = 0;
836 }
837
838 if (src_pipe_started) {
839 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
840 if (ret)
841 return ret;
842
843 /* set module params after bind */
844 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
845 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
846
847 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
848 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
849 }
850
851 return ret;
852 }
853
854 /*
855 * in the Pre-PMD event of mixer we need to do following:
856 * - Stop the pipe
857 * - find the source connections and remove that from dapm_path_list
858 * - unbind with source pipelines if still connected
859 */
860 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
861 struct skl *skl)
862 {
863 struct skl_module_cfg *src_mconfig, *sink_mconfig;
864 int ret = 0, i;
865 struct skl_sst *ctx = skl->skl_sst;
866
867 sink_mconfig = w->priv;
868
869 /* Stop the pipe */
870 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
871 if (ret)
872 return ret;
873
874 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
875 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
876 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
877 if (!src_mconfig)
878 continue;
879 /*
880 * If path_found == 1, that means pmd for source
881 * pipe has not occurred, source is connected to
882 * some other sink. so its responsibility of sink
883 * to unbind itself from source.
884 */
885 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
886 if (ret < 0)
887 return ret;
888
889 ret = skl_unbind_modules(ctx,
890 src_mconfig, sink_mconfig);
891 }
892 }
893
894 return ret;
895 }
896
897 /*
898 * in the Post-PMD event of mixer we need to do following:
899 * - Free the mcps used
900 * - Free the mem used
901 * - Unbind the modules within the pipeline
902 * - Delete the pipeline (modules are not required to be explicitly
903 * deleted, pipeline delete is enough here
904 */
905 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
906 struct skl *skl)
907 {
908 struct skl_module_cfg *mconfig = w->priv;
909 struct skl_pipe_module *w_module;
910 struct skl_module_cfg *src_module = NULL, *dst_module;
911 struct skl_sst *ctx = skl->skl_sst;
912 struct skl_pipe *s_pipe = mconfig->pipe;
913 int ret = 0;
914
915 if (s_pipe->state == SKL_PIPE_INVALID)
916 return -EINVAL;
917
918 skl_tplg_free_pipe_mcps(skl, mconfig);
919 skl_tplg_free_pipe_mem(skl, mconfig);
920
921 list_for_each_entry(w_module, &s_pipe->w_list, node) {
922 dst_module = w_module->w->priv;
923
924 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
925 skl_tplg_free_pipe_mcps(skl, dst_module);
926 if (src_module == NULL) {
927 src_module = dst_module;
928 continue;
929 }
930
931 skl_unbind_modules(ctx, src_module, dst_module);
932 src_module = dst_module;
933 }
934
935 ret = skl_delete_pipe(ctx, mconfig->pipe);
936
937 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
938 }
939
940 /*
941 * in the Post-PMD event of PGA we need to do following:
942 * - Free the mcps used
943 * - Stop the pipeline
944 * - In source pipe is connected, unbind with source pipelines
945 */
946 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
947 struct skl *skl)
948 {
949 struct skl_module_cfg *src_mconfig, *sink_mconfig;
950 int ret = 0, i;
951 struct skl_sst *ctx = skl->skl_sst;
952
953 src_mconfig = w->priv;
954
955 /* Stop the pipe since this is a mixin module */
956 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
957 if (ret)
958 return ret;
959
960 for (i = 0; i < src_mconfig->max_out_queue; i++) {
961 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
962 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
963 if (!sink_mconfig)
964 continue;
965 /*
966 * This is a connecter and if path is found that means
967 * unbind between source and sink has not happened yet
968 */
969 ret = skl_unbind_modules(ctx, src_mconfig,
970 sink_mconfig);
971 }
972 }
973
974 return ret;
975 }
976
977 /*
978 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
979 * mixer is not required then it is treated as static mixer aka vmixer with
980 * a hard path to source module
981 * So we don't need to check if source is started or not as hard path puts
982 * dependency on each other
983 */
984 static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
985 struct snd_kcontrol *k, int event)
986 {
987 struct snd_soc_dapm_context *dapm = w->dapm;
988 struct skl *skl = get_skl_ctx(dapm->dev);
989
990 switch (event) {
991 case SND_SOC_DAPM_PRE_PMU:
992 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
993
994 case SND_SOC_DAPM_POST_PMU:
995 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
996
997 case SND_SOC_DAPM_PRE_PMD:
998 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
999
1000 case SND_SOC_DAPM_POST_PMD:
1001 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1002 }
1003
1004 return 0;
1005 }
1006
1007 /*
1008 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1009 * second one is required that is created as another pipe entity.
1010 * The mixer is responsible for pipe management and represent a pipeline
1011 * instance
1012 */
1013 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1014 struct snd_kcontrol *k, int event)
1015 {
1016 struct snd_soc_dapm_context *dapm = w->dapm;
1017 struct skl *skl = get_skl_ctx(dapm->dev);
1018
1019 switch (event) {
1020 case SND_SOC_DAPM_PRE_PMU:
1021 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1022
1023 case SND_SOC_DAPM_POST_PMU:
1024 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1025
1026 case SND_SOC_DAPM_PRE_PMD:
1027 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1028
1029 case SND_SOC_DAPM_POST_PMD:
1030 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1031 }
1032
1033 return 0;
1034 }
1035
1036 /*
1037 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1038 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1039 * the sink when it is running (two FE to one BE or one FE to two BE)
1040 * scenarios
1041 */
1042 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1043 struct snd_kcontrol *k, int event)
1044
1045 {
1046 struct snd_soc_dapm_context *dapm = w->dapm;
1047 struct skl *skl = get_skl_ctx(dapm->dev);
1048
1049 switch (event) {
1050 case SND_SOC_DAPM_PRE_PMU:
1051 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1052
1053 case SND_SOC_DAPM_POST_PMD:
1054 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1055 }
1056
1057 return 0;
1058 }
1059
1060 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1061 unsigned int __user *data, unsigned int size)
1062 {
1063 struct soc_bytes_ext *sb =
1064 (struct soc_bytes_ext *)kcontrol->private_value;
1065 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1066 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1067 struct skl_module_cfg *mconfig = w->priv;
1068 struct skl *skl = get_skl_ctx(w->dapm->dev);
1069
1070 if (w->power)
1071 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
1072 bc->size, bc->param_id, mconfig);
1073
1074 /* decrement size for TLV header */
1075 size -= 2 * sizeof(u32);
1076
1077 /* check size as we don't want to send kernel data */
1078 if (size > bc->max)
1079 size = bc->max;
1080
1081 if (bc->params) {
1082 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1083 return -EFAULT;
1084 if (copy_to_user(data + 1, &size, sizeof(u32)))
1085 return -EFAULT;
1086 if (copy_to_user(data + 2, bc->params, size))
1087 return -EFAULT;
1088 }
1089
1090 return 0;
1091 }
1092
1093 #define SKL_PARAM_VENDOR_ID 0xff
1094
1095 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1096 const unsigned int __user *data, unsigned int size)
1097 {
1098 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1099 struct skl_module_cfg *mconfig = w->priv;
1100 struct soc_bytes_ext *sb =
1101 (struct soc_bytes_ext *)kcontrol->private_value;
1102 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1103 struct skl *skl = get_skl_ctx(w->dapm->dev);
1104
1105 if (ac->params) {
1106 if (size > ac->max)
1107 return -EINVAL;
1108
1109 ac->size = size;
1110 /*
1111 * if the param_is is of type Vendor, firmware expects actual
1112 * parameter id and size from the control.
1113 */
1114 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1115 if (copy_from_user(ac->params, data, size))
1116 return -EFAULT;
1117 } else {
1118 if (copy_from_user(ac->params,
1119 data + 2, size))
1120 return -EFAULT;
1121 }
1122
1123 if (w->power)
1124 return skl_set_module_params(skl->skl_sst,
1125 (u32 *)ac->params, ac->size,
1126 ac->param_id, mconfig);
1127 }
1128
1129 return 0;
1130 }
1131
1132 /*
1133 * Fill the dma id for host and link. In case of passthrough
1134 * pipeline, this will both host and link in the same
1135 * pipeline, so need to copy the link and host based on dev_type
1136 */
1137 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1138 struct skl_pipe_params *params)
1139 {
1140 struct skl_pipe *pipe = mcfg->pipe;
1141
1142 if (pipe->passthru) {
1143 switch (mcfg->dev_type) {
1144 case SKL_DEVICE_HDALINK:
1145 pipe->p_params->link_dma_id = params->link_dma_id;
1146 break;
1147
1148 case SKL_DEVICE_HDAHOST:
1149 pipe->p_params->host_dma_id = params->host_dma_id;
1150 break;
1151
1152 default:
1153 break;
1154 }
1155 pipe->p_params->s_fmt = params->s_fmt;
1156 pipe->p_params->ch = params->ch;
1157 pipe->p_params->s_freq = params->s_freq;
1158 pipe->p_params->stream = params->stream;
1159
1160 } else {
1161 memcpy(pipe->p_params, params, sizeof(*params));
1162 }
1163 }
1164
1165 /*
1166 * The FE params are passed by hw_params of the DAI.
1167 * On hw_params, the params are stored in Gateway module of the FE and we
1168 * need to calculate the format in DSP module configuration, that
1169 * conversion is done here
1170 */
1171 int skl_tplg_update_pipe_params(struct device *dev,
1172 struct skl_module_cfg *mconfig,
1173 struct skl_pipe_params *params)
1174 {
1175 struct skl_module_fmt *format = NULL;
1176
1177 skl_tplg_fill_dma_id(mconfig, params);
1178
1179 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1180 format = &mconfig->in_fmt[0];
1181 else
1182 format = &mconfig->out_fmt[0];
1183
1184 /* set the hw_params */
1185 format->s_freq = params->s_freq;
1186 format->channels = params->ch;
1187 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1188
1189 /*
1190 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1191 * container so update bit depth accordingly
1192 */
1193 switch (format->valid_bit_depth) {
1194 case SKL_DEPTH_16BIT:
1195 format->bit_depth = format->valid_bit_depth;
1196 break;
1197
1198 case SKL_DEPTH_24BIT:
1199 case SKL_DEPTH_32BIT:
1200 format->bit_depth = SKL_DEPTH_32BIT;
1201 break;
1202
1203 default:
1204 dev_err(dev, "Invalid bit depth %x for pipe\n",
1205 format->valid_bit_depth);
1206 return -EINVAL;
1207 }
1208
1209 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1210 mconfig->ibs = (format->s_freq / 1000) *
1211 (format->channels) *
1212 (format->bit_depth >> 3);
1213 } else {
1214 mconfig->obs = (format->s_freq / 1000) *
1215 (format->channels) *
1216 (format->bit_depth >> 3);
1217 }
1218
1219 return 0;
1220 }
1221
1222 /*
1223 * Query the module config for the FE DAI
1224 * This is used to find the hw_params set for that DAI and apply to FE
1225 * pipeline
1226 */
1227 struct skl_module_cfg *
1228 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1229 {
1230 struct snd_soc_dapm_widget *w;
1231 struct snd_soc_dapm_path *p = NULL;
1232
1233 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1234 w = dai->playback_widget;
1235 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1236 if (p->connect && p->sink->power &&
1237 !is_skl_dsp_widget_type(p->sink))
1238 continue;
1239
1240 if (p->sink->priv) {
1241 dev_dbg(dai->dev, "set params for %s\n",
1242 p->sink->name);
1243 return p->sink->priv;
1244 }
1245 }
1246 } else {
1247 w = dai->capture_widget;
1248 snd_soc_dapm_widget_for_each_source_path(w, p) {
1249 if (p->connect && p->source->power &&
1250 !is_skl_dsp_widget_type(p->source))
1251 continue;
1252
1253 if (p->source->priv) {
1254 dev_dbg(dai->dev, "set params for %s\n",
1255 p->source->name);
1256 return p->source->priv;
1257 }
1258 }
1259 }
1260
1261 return NULL;
1262 }
1263
1264 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1265 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1266 {
1267 struct snd_soc_dapm_path *p;
1268 struct skl_module_cfg *mconfig = NULL;
1269
1270 snd_soc_dapm_widget_for_each_source_path(w, p) {
1271 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1272 if (p->connect &&
1273 (p->sink->id == snd_soc_dapm_aif_out) &&
1274 p->source->priv) {
1275 mconfig = p->source->priv;
1276 return mconfig;
1277 }
1278 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1279 if (mconfig)
1280 return mconfig;
1281 }
1282 }
1283 return mconfig;
1284 }
1285
1286 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1287 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1288 {
1289 struct snd_soc_dapm_path *p;
1290 struct skl_module_cfg *mconfig = NULL;
1291
1292 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1293 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1294 if (p->connect &&
1295 (p->source->id == snd_soc_dapm_aif_in) &&
1296 p->sink->priv) {
1297 mconfig = p->sink->priv;
1298 return mconfig;
1299 }
1300 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1301 if (mconfig)
1302 return mconfig;
1303 }
1304 }
1305 return mconfig;
1306 }
1307
1308 struct skl_module_cfg *
1309 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1310 {
1311 struct snd_soc_dapm_widget *w;
1312 struct skl_module_cfg *mconfig;
1313
1314 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1315 w = dai->playback_widget;
1316 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1317 } else {
1318 w = dai->capture_widget;
1319 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1320 }
1321 return mconfig;
1322 }
1323
1324 static u8 skl_tplg_be_link_type(int dev_type)
1325 {
1326 int ret;
1327
1328 switch (dev_type) {
1329 case SKL_DEVICE_BT:
1330 ret = NHLT_LINK_SSP;
1331 break;
1332
1333 case SKL_DEVICE_DMIC:
1334 ret = NHLT_LINK_DMIC;
1335 break;
1336
1337 case SKL_DEVICE_I2S:
1338 ret = NHLT_LINK_SSP;
1339 break;
1340
1341 case SKL_DEVICE_HDALINK:
1342 ret = NHLT_LINK_HDA;
1343 break;
1344
1345 default:
1346 ret = NHLT_LINK_INVALID;
1347 break;
1348 }
1349
1350 return ret;
1351 }
1352
1353 /*
1354 * Fill the BE gateway parameters
1355 * The BE gateway expects a blob of parameters which are kept in the ACPI
1356 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1357 * The port can have multiple settings so pick based on the PCM
1358 * parameters
1359 */
1360 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1361 struct skl_module_cfg *mconfig,
1362 struct skl_pipe_params *params)
1363 {
1364 struct nhlt_specific_cfg *cfg;
1365 struct skl *skl = get_skl_ctx(dai->dev);
1366 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1367
1368 skl_tplg_fill_dma_id(mconfig, params);
1369
1370 if (link_type == NHLT_LINK_HDA)
1371 return 0;
1372
1373 /* update the blob based on virtual bus_id*/
1374 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1375 params->s_fmt, params->ch,
1376 params->s_freq, params->stream);
1377 if (cfg) {
1378 mconfig->formats_config.caps_size = cfg->size;
1379 mconfig->formats_config.caps = (u32 *) &cfg->caps;
1380 } else {
1381 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1382 mconfig->vbus_id, link_type,
1383 params->stream);
1384 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1385 params->ch, params->s_freq, params->s_fmt);
1386 return -EINVAL;
1387 }
1388
1389 return 0;
1390 }
1391
1392 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1393 struct snd_soc_dapm_widget *w,
1394 struct skl_pipe_params *params)
1395 {
1396 struct snd_soc_dapm_path *p;
1397 int ret = -EIO;
1398
1399 snd_soc_dapm_widget_for_each_source_path(w, p) {
1400 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1401 p->source->priv) {
1402
1403 ret = skl_tplg_be_fill_pipe_params(dai,
1404 p->source->priv, params);
1405 if (ret < 0)
1406 return ret;
1407 } else {
1408 ret = skl_tplg_be_set_src_pipe_params(dai,
1409 p->source, params);
1410 if (ret < 0)
1411 return ret;
1412 }
1413 }
1414
1415 return ret;
1416 }
1417
1418 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1419 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1420 {
1421 struct snd_soc_dapm_path *p = NULL;
1422 int ret = -EIO;
1423
1424 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1425 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1426 p->sink->priv) {
1427
1428 ret = skl_tplg_be_fill_pipe_params(dai,
1429 p->sink->priv, params);
1430 if (ret < 0)
1431 return ret;
1432 } else {
1433 ret = skl_tplg_be_set_sink_pipe_params(
1434 dai, p->sink, params);
1435 if (ret < 0)
1436 return ret;
1437 }
1438 }
1439
1440 return ret;
1441 }
1442
1443 /*
1444 * BE hw_params can be a source parameters (capture) or sink parameters
1445 * (playback). Based on sink and source we need to either find the source
1446 * list or the sink list and set the pipeline parameters
1447 */
1448 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1449 struct skl_pipe_params *params)
1450 {
1451 struct snd_soc_dapm_widget *w;
1452
1453 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1454 w = dai->playback_widget;
1455
1456 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1457
1458 } else {
1459 w = dai->capture_widget;
1460
1461 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1462 }
1463
1464 return 0;
1465 }
1466
1467 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1468 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1469 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
1470 {SKL_PGA_EVENT, skl_tplg_pga_event},
1471 };
1472
1473 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1474 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1475 skl_tplg_tlv_control_set},
1476 };
1477
1478 static int skl_tplg_fill_pipe_tkn(struct device *dev,
1479 struct skl_pipe *pipe, u32 tkn,
1480 u32 tkn_val)
1481 {
1482
1483 switch (tkn) {
1484 case SKL_TKN_U32_PIPE_CONN_TYPE:
1485 pipe->conn_type = tkn_val;
1486 break;
1487
1488 case SKL_TKN_U32_PIPE_PRIORITY:
1489 pipe->pipe_priority = tkn_val;
1490 break;
1491
1492 case SKL_TKN_U32_PIPE_MEM_PGS:
1493 pipe->memory_pages = tkn_val;
1494 break;
1495
1496 default:
1497 dev_err(dev, "Token not handled %d\n", tkn);
1498 return -EINVAL;
1499 }
1500
1501 return 0;
1502 }
1503
1504 /*
1505 * Add pipeline by parsing the relevant tokens
1506 * Return an existing pipe if the pipe already exists.
1507 */
1508 static int skl_tplg_add_pipe(struct device *dev,
1509 struct skl_module_cfg *mconfig, struct skl *skl,
1510 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
1511 {
1512 struct skl_pipeline *ppl;
1513 struct skl_pipe *pipe;
1514 struct skl_pipe_params *params;
1515
1516 list_for_each_entry(ppl, &skl->ppl_list, node) {
1517 if (ppl->pipe->ppl_id == tkn_elem->value) {
1518 mconfig->pipe = ppl->pipe;
1519 return EEXIST;
1520 }
1521 }
1522
1523 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1524 if (!ppl)
1525 return -ENOMEM;
1526
1527 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1528 if (!pipe)
1529 return -ENOMEM;
1530
1531 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1532 if (!params)
1533 return -ENOMEM;
1534
1535 pipe->p_params = params;
1536 pipe->ppl_id = tkn_elem->value;
1537 INIT_LIST_HEAD(&pipe->w_list);
1538
1539 ppl->pipe = pipe;
1540 list_add(&ppl->node, &skl->ppl_list);
1541
1542 mconfig->pipe = pipe;
1543 mconfig->pipe->state = SKL_PIPE_INVALID;
1544
1545 return 0;
1546 }
1547
1548 static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1549 struct skl_module_pin *m_pin,
1550 int pin_index, u32 value)
1551 {
1552 switch (tkn) {
1553 case SKL_TKN_U32_PIN_MOD_ID:
1554 m_pin[pin_index].id.module_id = value;
1555 break;
1556
1557 case SKL_TKN_U32_PIN_INST_ID:
1558 m_pin[pin_index].id.instance_id = value;
1559 break;
1560
1561 default:
1562 dev_err(dev, "%d Not a pin token\n", value);
1563 return -EINVAL;
1564 }
1565
1566 return 0;
1567 }
1568
1569 /*
1570 * Parse for pin config specific tokens to fill up the
1571 * module private data
1572 */
1573 static int skl_tplg_fill_pins_info(struct device *dev,
1574 struct skl_module_cfg *mconfig,
1575 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1576 int dir, int pin_count)
1577 {
1578 int ret;
1579 struct skl_module_pin *m_pin;
1580
1581 switch (dir) {
1582 case SKL_DIR_IN:
1583 m_pin = mconfig->m_in_pin;
1584 break;
1585
1586 case SKL_DIR_OUT:
1587 m_pin = mconfig->m_out_pin;
1588 break;
1589
1590 default:
1591 dev_err(dev, "Invalid direction value");
1592 return -EINVAL;
1593 }
1594
1595 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1596 m_pin, pin_count, tkn_elem->value);
1597
1598 if (ret < 0)
1599 return ret;
1600
1601 m_pin[pin_count].in_use = false;
1602 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1603
1604 return 0;
1605 }
1606
1607 /*
1608 * Fill up input/output module config format based
1609 * on the direction
1610 */
1611 static int skl_tplg_fill_fmt(struct device *dev,
1612 struct skl_module_cfg *mconfig, u32 tkn,
1613 u32 value, u32 dir, u32 pin_count)
1614 {
1615 struct skl_module_fmt *dst_fmt;
1616
1617 switch (dir) {
1618 case SKL_DIR_IN:
1619 dst_fmt = mconfig->in_fmt;
1620 dst_fmt += pin_count;
1621 break;
1622
1623 case SKL_DIR_OUT:
1624 dst_fmt = mconfig->out_fmt;
1625 dst_fmt += pin_count;
1626 break;
1627
1628 default:
1629 dev_err(dev, "Invalid direction value");
1630 return -EINVAL;
1631 }
1632
1633 switch (tkn) {
1634 case SKL_TKN_U32_FMT_CH:
1635 dst_fmt->channels = value;
1636 break;
1637
1638 case SKL_TKN_U32_FMT_FREQ:
1639 dst_fmt->s_freq = value;
1640 break;
1641
1642 case SKL_TKN_U32_FMT_BIT_DEPTH:
1643 dst_fmt->bit_depth = value;
1644 break;
1645
1646 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1647 dst_fmt->valid_bit_depth = value;
1648 break;
1649
1650 case SKL_TKN_U32_FMT_CH_CONFIG:
1651 dst_fmt->ch_cfg = value;
1652 break;
1653
1654 case SKL_TKN_U32_FMT_INTERLEAVE:
1655 dst_fmt->interleaving_style = value;
1656 break;
1657
1658 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1659 dst_fmt->sample_type = value;
1660 break;
1661
1662 case SKL_TKN_U32_FMT_CH_MAP:
1663 dst_fmt->ch_map = value;
1664 break;
1665
1666 default:
1667 dev_err(dev, "Invalid token %d", tkn);
1668 return -EINVAL;
1669 }
1670
1671 return 0;
1672 }
1673
1674 static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1675 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1676 {
1677 if (uuid_tkn->token == SKL_TKN_UUID)
1678 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1679 else {
1680 dev_err(dev, "Not an UUID token tkn %d", uuid_tkn->token);
1681 return -EINVAL;
1682 }
1683
1684 return 0;
1685 }
1686
1687 static void skl_tplg_fill_pin_dynamic_val(
1688 struct skl_module_pin *mpin, u32 pin_count, u32 value)
1689 {
1690 int i;
1691
1692 for (i = 0; i < pin_count; i++)
1693 mpin[i].is_dynamic = value;
1694 }
1695
1696 /*
1697 * Parse tokens to fill up the module private data
1698 */
1699 static int skl_tplg_get_token(struct device *dev,
1700 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1701 struct skl *skl, struct skl_module_cfg *mconfig)
1702 {
1703 int tkn_count = 0;
1704 int ret;
1705 static int is_pipe_exists;
1706 static int pin_index, dir;
1707
1708 if (tkn_elem->token > SKL_TKN_MAX)
1709 return -EINVAL;
1710
1711 switch (tkn_elem->token) {
1712 case SKL_TKN_U8_IN_QUEUE_COUNT:
1713 mconfig->max_in_queue = tkn_elem->value;
1714 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1715 sizeof(*mconfig->m_in_pin),
1716 GFP_KERNEL);
1717 if (!mconfig->m_in_pin)
1718 return -ENOMEM;
1719
1720 break;
1721
1722 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1723 mconfig->max_out_queue = tkn_elem->value;
1724 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1725 sizeof(*mconfig->m_out_pin),
1726 GFP_KERNEL);
1727
1728 if (!mconfig->m_out_pin)
1729 return -ENOMEM;
1730
1731 break;
1732
1733 case SKL_TKN_U8_DYN_IN_PIN:
1734 if (!mconfig->m_in_pin)
1735 return -ENOMEM;
1736
1737 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1738 mconfig->max_in_queue, tkn_elem->value);
1739
1740 break;
1741
1742 case SKL_TKN_U8_DYN_OUT_PIN:
1743 if (!mconfig->m_out_pin)
1744 return -ENOMEM;
1745
1746 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1747 mconfig->max_out_queue, tkn_elem->value);
1748
1749 break;
1750
1751 case SKL_TKN_U8_TIME_SLOT:
1752 mconfig->time_slot = tkn_elem->value;
1753 break;
1754
1755 case SKL_TKN_U8_CORE_ID:
1756 mconfig->core_id = tkn_elem->value;
1757
1758 case SKL_TKN_U8_MOD_TYPE:
1759 mconfig->m_type = tkn_elem->value;
1760 break;
1761
1762 case SKL_TKN_U8_DEV_TYPE:
1763 mconfig->dev_type = tkn_elem->value;
1764 break;
1765
1766 case SKL_TKN_U8_HW_CONN_TYPE:
1767 mconfig->hw_conn_type = tkn_elem->value;
1768 break;
1769
1770 case SKL_TKN_U16_MOD_INST_ID:
1771 mconfig->id.instance_id =
1772 tkn_elem->value;
1773 break;
1774
1775 case SKL_TKN_U32_MEM_PAGES:
1776 mconfig->mem_pages = tkn_elem->value;
1777 break;
1778
1779 case SKL_TKN_U32_MAX_MCPS:
1780 mconfig->mcps = tkn_elem->value;
1781 break;
1782
1783 case SKL_TKN_U32_OBS:
1784 mconfig->obs = tkn_elem->value;
1785 break;
1786
1787 case SKL_TKN_U32_IBS:
1788 mconfig->ibs = tkn_elem->value;
1789 break;
1790
1791 case SKL_TKN_U32_VBUS_ID:
1792 mconfig->vbus_id = tkn_elem->value;
1793 break;
1794
1795 case SKL_TKN_U32_PARAMS_FIXUP:
1796 mconfig->params_fixup = tkn_elem->value;
1797 break;
1798
1799 case SKL_TKN_U32_CONVERTER:
1800 mconfig->converter = tkn_elem->value;
1801 break;
1802
1803 case SKL_TKN_U32_PIPE_ID:
1804 ret = skl_tplg_add_pipe(dev,
1805 mconfig, skl, tkn_elem);
1806
1807 if (ret < 0)
1808 return is_pipe_exists;
1809
1810 if (ret == EEXIST)
1811 is_pipe_exists = 1;
1812
1813 break;
1814
1815 case SKL_TKN_U32_PIPE_CONN_TYPE:
1816 case SKL_TKN_U32_PIPE_PRIORITY:
1817 case SKL_TKN_U32_PIPE_MEM_PGS:
1818 if (is_pipe_exists) {
1819 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1820 tkn_elem->token, tkn_elem->value);
1821 if (ret < 0)
1822 return ret;
1823 }
1824
1825 break;
1826
1827 /*
1828 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1829 * direction and the pin count. The first four bits represent
1830 * direction and next four the pin count.
1831 */
1832 case SKL_TKN_U32_DIR_PIN_COUNT:
1833 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1834 pin_index = (tkn_elem->value &
1835 SKL_PIN_COUNT_MASK) >> 4;
1836
1837 break;
1838
1839 case SKL_TKN_U32_FMT_CH:
1840 case SKL_TKN_U32_FMT_FREQ:
1841 case SKL_TKN_U32_FMT_BIT_DEPTH:
1842 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1843 case SKL_TKN_U32_FMT_CH_CONFIG:
1844 case SKL_TKN_U32_FMT_INTERLEAVE:
1845 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1846 case SKL_TKN_U32_FMT_CH_MAP:
1847 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1848 tkn_elem->value, dir, pin_index);
1849
1850 if (ret < 0)
1851 return ret;
1852
1853 break;
1854
1855 case SKL_TKN_U32_PIN_MOD_ID:
1856 case SKL_TKN_U32_PIN_INST_ID:
1857 ret = skl_tplg_fill_pins_info(dev,
1858 mconfig, tkn_elem, dir,
1859 pin_index);
1860 if (ret < 0)
1861 return ret;
1862
1863 break;
1864
1865 case SKL_TKN_U32_CAPS_SIZE:
1866 mconfig->formats_config.caps_size =
1867 tkn_elem->value;
1868
1869 break;
1870
1871 case SKL_TKN_U32_PROC_DOMAIN:
1872 mconfig->domain =
1873 tkn_elem->value;
1874
1875 break;
1876
1877 case SKL_TKN_U8_IN_PIN_TYPE:
1878 case SKL_TKN_U8_OUT_PIN_TYPE:
1879 case SKL_TKN_U8_CONN_TYPE:
1880 break;
1881
1882 default:
1883 dev_err(dev, "Token %d not handled\n",
1884 tkn_elem->token);
1885 return -EINVAL;
1886 }
1887
1888 tkn_count++;
1889
1890 return tkn_count;
1891 }
1892
1893 /*
1894 * Parse the vendor array for specific tokens to construct
1895 * module private data
1896 */
1897 static int skl_tplg_get_tokens(struct device *dev,
1898 char *pvt_data, struct skl *skl,
1899 struct skl_module_cfg *mconfig, int block_size)
1900 {
1901 struct snd_soc_tplg_vendor_array *array;
1902 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1903 int tkn_count = 0, ret;
1904 int off = 0, tuple_size = 0;
1905
1906 if (block_size <= 0)
1907 return -EINVAL;
1908
1909 while (tuple_size < block_size) {
1910 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
1911
1912 off += array->size;
1913
1914 switch (array->type) {
1915 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
1916 dev_warn(dev, "no string tokens expected for skl tplg");
1917 continue;
1918
1919 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
1920 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
1921 if (ret < 0)
1922 return ret;
1923
1924 tuple_size += sizeof(*array->uuid);
1925
1926 continue;
1927
1928 default:
1929 tkn_elem = array->value;
1930 tkn_count = 0;
1931 break;
1932 }
1933
1934 while (tkn_count <= (array->num_elems - 1)) {
1935 ret = skl_tplg_get_token(dev, tkn_elem,
1936 skl, mconfig);
1937
1938 if (ret < 0)
1939 return ret;
1940
1941 tkn_count = tkn_count + ret;
1942 tkn_elem++;
1943 }
1944
1945 tuple_size += tkn_count * sizeof(*tkn_elem);
1946 }
1947
1948 return 0;
1949 }
1950
1951 /*
1952 * Every data block is preceded by a descriptor to read the number
1953 * of data blocks, they type of the block and it's size
1954 */
1955 static int skl_tplg_get_desc_blocks(struct device *dev,
1956 struct snd_soc_tplg_vendor_array *array)
1957 {
1958 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1959
1960 tkn_elem = array->value;
1961
1962 switch (tkn_elem->token) {
1963 case SKL_TKN_U8_NUM_BLOCKS:
1964 case SKL_TKN_U8_BLOCK_TYPE:
1965 case SKL_TKN_U16_BLOCK_SIZE:
1966 return tkn_elem->value;
1967
1968 default:
1969 dev_err(dev, "Invalid descriptor token %d", tkn_elem->token);
1970 break;
1971 }
1972
1973 return -EINVAL;
1974 }
1975
1976 /*
1977 * Parse the private data for the token and corresponding value.
1978 * The private data can have multiple data blocks. So, a data block
1979 * is preceded by a descriptor for number of blocks and a descriptor
1980 * for the type and size of the suceeding data block.
1981 */
1982 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
1983 struct skl *skl, struct device *dev,
1984 struct skl_module_cfg *mconfig)
1985 {
1986 struct snd_soc_tplg_vendor_array *array;
1987 int num_blocks, block_size = 0, block_type, off = 0;
1988 char *data;
1989 int ret;
1990
1991 /* Read the NUM_DATA_BLOCKS descriptor */
1992 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
1993 ret = skl_tplg_get_desc_blocks(dev, array);
1994 if (ret < 0)
1995 return ret;
1996 num_blocks = ret;
1997
1998 off += array->size;
1999 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2000
2001 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2002 while (num_blocks > 0) {
2003 ret = skl_tplg_get_desc_blocks(dev, array);
2004
2005 if (ret < 0)
2006 return ret;
2007 block_type = ret;
2008 off += array->size;
2009
2010 array = (struct snd_soc_tplg_vendor_array *)
2011 (tplg_w->priv.data + off);
2012
2013 ret = skl_tplg_get_desc_blocks(dev, array);
2014
2015 if (ret < 0)
2016 return ret;
2017 block_size = ret;
2018 off += array->size;
2019
2020 array = (struct snd_soc_tplg_vendor_array *)
2021 (tplg_w->priv.data + off);
2022
2023 data = (tplg_w->priv.data + off);
2024
2025 if (block_type == SKL_TYPE_TUPLE) {
2026 ret = skl_tplg_get_tokens(dev, data,
2027 skl, mconfig, block_size);
2028
2029 if (ret < 0)
2030 return ret;
2031
2032 --num_blocks;
2033 } else {
2034 if (mconfig->formats_config.caps_size > 0)
2035 memcpy(mconfig->formats_config.caps, data,
2036 mconfig->formats_config.caps_size);
2037 --num_blocks;
2038 }
2039 }
2040
2041 return 0;
2042 }
2043
2044 static void skl_clear_pin_config(struct snd_soc_platform *platform,
2045 struct snd_soc_dapm_widget *w)
2046 {
2047 int i;
2048 struct skl_module_cfg *mconfig;
2049 struct skl_pipe *pipe;
2050
2051 if (!strncmp(w->dapm->component->name, platform->component.name,
2052 strlen(platform->component.name))) {
2053 mconfig = w->priv;
2054 pipe = mconfig->pipe;
2055 for (i = 0; i < mconfig->max_in_queue; i++) {
2056 mconfig->m_in_pin[i].in_use = false;
2057 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2058 }
2059 for (i = 0; i < mconfig->max_out_queue; i++) {
2060 mconfig->m_out_pin[i].in_use = false;
2061 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2062 }
2063 pipe->state = SKL_PIPE_INVALID;
2064 mconfig->m_state = SKL_MODULE_UNINIT;
2065 }
2066 }
2067
2068 void skl_cleanup_resources(struct skl *skl)
2069 {
2070 struct skl_sst *ctx = skl->skl_sst;
2071 struct snd_soc_platform *soc_platform = skl->platform;
2072 struct snd_soc_dapm_widget *w;
2073 struct snd_soc_card *card;
2074
2075 if (soc_platform == NULL)
2076 return;
2077
2078 card = soc_platform->component.card;
2079 if (!card || !card->instantiated)
2080 return;
2081
2082 skl->resource.mem = 0;
2083 skl->resource.mcps = 0;
2084
2085 list_for_each_entry(w, &card->widgets, list) {
2086 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2087 skl_clear_pin_config(soc_platform, w);
2088 }
2089
2090 skl_clear_module_cnt(ctx->dsp);
2091 }
2092
2093 /*
2094 * Topology core widget load callback
2095 *
2096 * This is used to save the private data for each widget which gives
2097 * information to the driver about module and pipeline parameters which DSP
2098 * FW expects like ids, resource values, formats etc
2099 */
2100 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
2101 struct snd_soc_dapm_widget *w,
2102 struct snd_soc_tplg_dapm_widget *tplg_w)
2103 {
2104 int ret;
2105 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2106 struct skl *skl = ebus_to_skl(ebus);
2107 struct hdac_bus *bus = ebus_to_hbus(ebus);
2108 struct skl_module_cfg *mconfig;
2109
2110 if (!tplg_w->priv.size)
2111 goto bind_event;
2112
2113 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2114
2115 if (!mconfig)
2116 return -ENOMEM;
2117
2118 w->priv = mconfig;
2119
2120 /*
2121 * module binary can be loaded later, so set it to query when
2122 * module is load for a use case
2123 */
2124 mconfig->id.module_id = -1;
2125
2126 /* Parse private data for tuples */
2127 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2128 if (ret < 0)
2129 return ret;
2130 bind_event:
2131 if (tplg_w->event_type == 0) {
2132 dev_dbg(bus->dev, "ASoC: No event handler required\n");
2133 return 0;
2134 }
2135
2136 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
2137 ARRAY_SIZE(skl_tplg_widget_ops),
2138 tplg_w->event_type);
2139
2140 if (ret) {
2141 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2142 __func__, tplg_w->event_type);
2143 return -EINVAL;
2144 }
2145
2146 return 0;
2147 }
2148
2149 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2150 struct snd_soc_tplg_bytes_control *bc)
2151 {
2152 struct skl_algo_data *ac;
2153 struct skl_dfw_algo_data *dfw_ac =
2154 (struct skl_dfw_algo_data *)bc->priv.data;
2155
2156 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2157 if (!ac)
2158 return -ENOMEM;
2159
2160 /* Fill private data */
2161 ac->max = dfw_ac->max;
2162 ac->param_id = dfw_ac->param_id;
2163 ac->set_params = dfw_ac->set_params;
2164 ac->size = dfw_ac->max;
2165
2166 if (ac->max) {
2167 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2168 if (!ac->params)
2169 return -ENOMEM;
2170
2171 memcpy(ac->params, dfw_ac->params, ac->max);
2172 }
2173
2174 be->dobj.private = ac;
2175 return 0;
2176 }
2177
2178 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2179 struct snd_kcontrol_new *kctl,
2180 struct snd_soc_tplg_ctl_hdr *hdr)
2181 {
2182 struct soc_bytes_ext *sb;
2183 struct snd_soc_tplg_bytes_control *tplg_bc;
2184 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2185 struct hdac_bus *bus = ebus_to_hbus(ebus);
2186
2187 switch (hdr->ops.info) {
2188 case SND_SOC_TPLG_CTL_BYTES:
2189 tplg_bc = container_of(hdr,
2190 struct snd_soc_tplg_bytes_control, hdr);
2191 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2192 sb = (struct soc_bytes_ext *)kctl->private_value;
2193 if (tplg_bc->priv.size)
2194 return skl_init_algo_data(
2195 bus->dev, sb, tplg_bc);
2196 }
2197 break;
2198
2199 default:
2200 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2201 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2202 break;
2203 }
2204
2205 return 0;
2206 }
2207
2208 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2209 struct snd_soc_tplg_vendor_string_elem *str_elem,
2210 struct skl_dfw_manifest *minfo)
2211 {
2212 int tkn_count = 0;
2213 static int ref_count;
2214
2215 switch (str_elem->token) {
2216 case SKL_TKN_STR_LIB_NAME:
2217 if (ref_count > minfo->lib_count - 1) {
2218 ref_count = 0;
2219 return -EINVAL;
2220 }
2221
2222 strncpy(minfo->lib[ref_count].name, str_elem->string,
2223 ARRAY_SIZE(minfo->lib[ref_count].name));
2224 ref_count++;
2225 tkn_count++;
2226 break;
2227
2228 default:
2229 dev_err(dev, "Not a string token %d", str_elem->token);
2230 break;
2231 }
2232
2233 return tkn_count;
2234 }
2235
2236 static int skl_tplg_get_str_tkn(struct device *dev,
2237 struct snd_soc_tplg_vendor_array *array,
2238 struct skl_dfw_manifest *minfo)
2239 {
2240 int tkn_count = 0, ret;
2241 struct snd_soc_tplg_vendor_string_elem *str_elem;
2242
2243 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2244 while (tkn_count < array->num_elems) {
2245 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo);
2246 str_elem++;
2247
2248 if (ret < 0)
2249 return ret;
2250
2251 tkn_count = tkn_count + ret;
2252 }
2253
2254 return tkn_count;
2255 }
2256
2257 static int skl_tplg_get_int_tkn(struct device *dev,
2258 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2259 struct skl_dfw_manifest *minfo)
2260 {
2261 int tkn_count = 0;
2262
2263 switch (tkn_elem->token) {
2264 case SKL_TKN_U32_LIB_COUNT:
2265 minfo->lib_count = tkn_elem->value;
2266 tkn_count++;
2267 break;
2268
2269 default:
2270 dev_err(dev, "Not a manifest token %d", tkn_elem->token);
2271 return -EINVAL;
2272 }
2273
2274 return tkn_count;
2275 }
2276
2277 /*
2278 * Fill the manifest structure by parsing the tokens based on the
2279 * type.
2280 */
2281 static int skl_tplg_get_manifest_tkn(struct device *dev,
2282 char *pvt_data, struct skl_dfw_manifest *minfo,
2283 int block_size)
2284 {
2285 int tkn_count = 0, ret;
2286 int off = 0, tuple_size = 0;
2287 struct snd_soc_tplg_vendor_array *array;
2288 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2289
2290 if (block_size <= 0)
2291 return -EINVAL;
2292
2293 while (tuple_size < block_size) {
2294 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2295 off += array->size;
2296 switch (array->type) {
2297 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2298 ret = skl_tplg_get_str_tkn(dev, array, minfo);
2299
2300 if (ret < 0)
2301 return ret;
2302 tkn_count += ret;
2303
2304 tuple_size += tkn_count *
2305 sizeof(struct snd_soc_tplg_vendor_string_elem);
2306 continue;
2307
2308 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2309 dev_warn(dev, "no uuid tokens for skl tplf manifest");
2310 continue;
2311
2312 default:
2313 tkn_elem = array->value;
2314 tkn_count = 0;
2315 break;
2316 }
2317
2318 while (tkn_count <= array->num_elems - 1) {
2319 ret = skl_tplg_get_int_tkn(dev,
2320 tkn_elem, minfo);
2321 if (ret < 0)
2322 return ret;
2323
2324 tkn_count = tkn_count + ret;
2325 tkn_elem++;
2326 tuple_size += tkn_count *
2327 sizeof(struct snd_soc_tplg_vendor_value_elem);
2328 break;
2329 }
2330 tkn_count = 0;
2331 }
2332
2333 return 0;
2334 }
2335
2336 /*
2337 * Parse manifest private data for tokens. The private data block is
2338 * preceded by descriptors for type and size of data block.
2339 */
2340 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
2341 struct device *dev, struct skl_dfw_manifest *minfo)
2342 {
2343 struct snd_soc_tplg_vendor_array *array;
2344 int num_blocks, block_size = 0, block_type, off = 0;
2345 char *data;
2346 int ret;
2347
2348 /* Read the NUM_DATA_BLOCKS descriptor */
2349 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2350 ret = skl_tplg_get_desc_blocks(dev, array);
2351 if (ret < 0)
2352 return ret;
2353 num_blocks = ret;
2354
2355 off += array->size;
2356 array = (struct snd_soc_tplg_vendor_array *)
2357 (manifest->priv.data + off);
2358
2359 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2360 while (num_blocks > 0) {
2361 ret = skl_tplg_get_desc_blocks(dev, array);
2362
2363 if (ret < 0)
2364 return ret;
2365 block_type = ret;
2366 off += array->size;
2367
2368 array = (struct snd_soc_tplg_vendor_array *)
2369 (manifest->priv.data + off);
2370
2371 ret = skl_tplg_get_desc_blocks(dev, array);
2372
2373 if (ret < 0)
2374 return ret;
2375 block_size = ret;
2376 off += array->size;
2377
2378 array = (struct snd_soc_tplg_vendor_array *)
2379 (manifest->priv.data + off);
2380
2381 data = (manifest->priv.data + off);
2382
2383 if (block_type == SKL_TYPE_TUPLE) {
2384 ret = skl_tplg_get_manifest_tkn(dev, data, minfo,
2385 block_size);
2386
2387 if (ret < 0)
2388 return ret;
2389
2390 --num_blocks;
2391 } else {
2392 return -EINVAL;
2393 }
2394 }
2395
2396 return 0;
2397 }
2398
2399 static int skl_manifest_load(struct snd_soc_component *cmpnt,
2400 struct snd_soc_tplg_manifest *manifest)
2401 {
2402 struct skl_dfw_manifest *minfo;
2403 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2404 struct hdac_bus *bus = ebus_to_hbus(ebus);
2405 struct skl *skl = ebus_to_skl(ebus);
2406 int ret = 0;
2407
2408 /* proceed only if we have private data defined */
2409 if (manifest->priv.size == 0)
2410 return 0;
2411
2412 minfo = &skl->skl_sst->manifest;
2413
2414 skl_tplg_get_manifest_data(manifest, bus->dev, minfo);
2415
2416 if (minfo->lib_count > HDA_MAX_LIB) {
2417 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
2418 minfo->lib_count);
2419 ret = -EINVAL;
2420 }
2421
2422 return ret;
2423 }
2424
2425 static struct snd_soc_tplg_ops skl_tplg_ops = {
2426 .widget_load = skl_tplg_widget_load,
2427 .control_load = skl_tplg_control_load,
2428 .bytes_ext_ops = skl_tlv_ops,
2429 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
2430 .manifest = skl_manifest_load,
2431 };
2432
2433 /*
2434 * A pipe can have multiple modules, each of them will be a DAPM widget as
2435 * well. While managing a pipeline we need to get the list of all the
2436 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2437 * helps to get the SKL type widgets in that pipeline
2438 */
2439 static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2440 {
2441 struct snd_soc_dapm_widget *w;
2442 struct skl_module_cfg *mcfg = NULL;
2443 struct skl_pipe_module *p_module = NULL;
2444 struct skl_pipe *pipe;
2445
2446 list_for_each_entry(w, &platform->component.card->widgets, list) {
2447 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2448 mcfg = w->priv;
2449 pipe = mcfg->pipe;
2450
2451 p_module = devm_kzalloc(platform->dev,
2452 sizeof(*p_module), GFP_KERNEL);
2453 if (!p_module)
2454 return -ENOMEM;
2455
2456 p_module->w = w;
2457 list_add_tail(&p_module->node, &pipe->w_list);
2458 }
2459 }
2460
2461 return 0;
2462 }
2463
2464 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2465 {
2466 struct skl_pipe_module *w_module;
2467 struct snd_soc_dapm_widget *w;
2468 struct skl_module_cfg *mconfig;
2469 bool host_found = false, link_found = false;
2470
2471 list_for_each_entry(w_module, &pipe->w_list, node) {
2472 w = w_module->w;
2473 mconfig = w->priv;
2474
2475 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2476 host_found = true;
2477 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2478 link_found = true;
2479 }
2480
2481 if (host_found && link_found)
2482 pipe->passthru = true;
2483 else
2484 pipe->passthru = false;
2485 }
2486
2487 /* This will be read from topology manifest, currently defined here */
2488 #define SKL_MAX_MCPS 30000000
2489 #define SKL_FW_MAX_MEM 1000000
2490
2491 /*
2492 * SKL topology init routine
2493 */
2494 int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2495 {
2496 int ret;
2497 const struct firmware *fw;
2498 struct hdac_bus *bus = ebus_to_hbus(ebus);
2499 struct skl *skl = ebus_to_skl(ebus);
2500 struct skl_pipeline *ppl;
2501
2502 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
2503 if (ret < 0) {
2504 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
2505 skl->tplg_name, ret);
2506 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2507 if (ret < 0) {
2508 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2509 "dfw_sst.bin", ret);
2510 return ret;
2511 }
2512 }
2513
2514 /*
2515 * The complete tplg for SKL is loaded as index 0, we don't use
2516 * any other index
2517 */
2518 ret = snd_soc_tplg_component_load(&platform->component,
2519 &skl_tplg_ops, fw, 0);
2520 if (ret < 0) {
2521 dev_err(bus->dev, "tplg component load failed%d\n", ret);
2522 release_firmware(fw);
2523 return -EINVAL;
2524 }
2525
2526 skl->resource.max_mcps = SKL_MAX_MCPS;
2527 skl->resource.max_mem = SKL_FW_MAX_MEM;
2528
2529 skl->tplg = fw;
2530 ret = skl_tplg_create_pipe_widget_list(platform);
2531 if (ret < 0)
2532 return ret;
2533
2534 list_for_each_entry(ppl, &skl->ppl_list, node)
2535 skl_tplg_set_pipe_type(skl, ppl->pipe);
2536
2537 return 0;
2538 }
This page took 0.082977 seconds and 5 git commands to generate.