tracing: Have max_latency be defined for HWLAT_TRACER as well
[deliverable/linux.git] / sound / soc / mediatek / common / mtk-afe-fe-dai.c
1 /*
2 * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
3 *
4 * Copyright (c) 2016 MediaTek Inc.
5 * Author: Garlic Tseng <garlic.tseng@mediatek.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/module.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <sound/soc.h>
21 #include "mtk-afe-fe-dai.h"
22 #include "mtk-base-afe.h"
23
24 #define AFE_BASE_END_OFFSET 8
25
26 int mtk_regmap_update_bits(struct regmap *map, int reg, unsigned int mask,
27 unsigned int val)
28 {
29 if (reg < 0)
30 return 0;
31 return regmap_update_bits(map, reg, mask, val);
32 }
33
34 int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
35 {
36 if (reg < 0)
37 return 0;
38 return regmap_write(map, reg, val);
39 }
40
41 int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
42 struct snd_soc_dai *dai)
43 {
44 struct snd_soc_pcm_runtime *rtd = substream->private_data;
45 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
46 struct snd_pcm_runtime *runtime = substream->runtime;
47 int memif_num = rtd->cpu_dai->id;
48 struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
49 const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
50 int ret;
51
52 memif->substream = substream;
53
54 snd_pcm_hw_constraint_step(substream->runtime, 0,
55 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
56 /* enable agent */
57 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
58 1 << memif->data->agent_disable_shift,
59 0 << memif->data->agent_disable_shift);
60
61 snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
62
63 /*
64 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
65 * smaller than period_size due to AFE's internal buffer.
66 * This easily leads to overrun when avail_min is period_size.
67 * One more period can hold the possible unread buffer.
68 */
69 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
70 int periods_max = mtk_afe_hardware->periods_max;
71
72 ret = snd_pcm_hw_constraint_minmax(runtime,
73 SNDRV_PCM_HW_PARAM_PERIODS,
74 3, periods_max);
75 if (ret < 0) {
76 dev_err(afe->dev, "hw_constraint_minmax failed\n");
77 return ret;
78 }
79 }
80
81 ret = snd_pcm_hw_constraint_integer(runtime,
82 SNDRV_PCM_HW_PARAM_PERIODS);
83 if (ret < 0)
84 dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
85
86 /* dynamic allocate irq to memif */
87 if (memif->irq_usage < 0) {
88 int irq_id = mtk_dynamic_irq_acquire(afe);
89
90 if (irq_id != afe->irqs_size) {
91 /* link */
92 memif->irq_usage = irq_id;
93 } else {
94 dev_err(afe->dev, "%s() error: no more asys irq\n",
95 __func__);
96 ret = -EBUSY;
97 }
98 }
99 return ret;
100 }
101 EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
102
103 void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
104 struct snd_soc_dai *dai)
105 {
106 struct snd_soc_pcm_runtime *rtd = substream->private_data;
107 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
108 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
109 int irq_id;
110
111 irq_id = memif->irq_usage;
112
113 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
114 1 << memif->data->agent_disable_shift,
115 1 << memif->data->agent_disable_shift);
116
117 if (!memif->const_irq) {
118 mtk_dynamic_irq_release(afe, irq_id);
119 memif->irq_usage = -1;
120 memif->substream = NULL;
121 }
122 }
123 EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
124
125 int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
126 struct snd_pcm_hw_params *params,
127 struct snd_soc_dai *dai)
128 {
129 struct snd_soc_pcm_runtime *rtd = substream->private_data;
130 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
131 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
132 int msb_at_bit33 = 0;
133 int ret, fs = 0;
134
135 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
136 if (ret < 0)
137 return ret;
138
139 msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
140 memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
141 memif->buffer_size = substream->runtime->dma_bytes;
142
143 /* start */
144 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
145 memif->phys_buf_addr);
146 /* end */
147 mtk_regmap_write(afe->regmap,
148 memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
149 memif->phys_buf_addr + memif->buffer_size - 1);
150
151 /* set MSB to 33-bit */
152 mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
153 1 << memif->data->msb_shift,
154 msb_at_bit33 << memif->data->msb_shift);
155
156 /* set channel */
157 if (memif->data->mono_shift >= 0) {
158 unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
159
160 mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
161 1 << memif->data->mono_shift,
162 mono << memif->data->mono_shift);
163 }
164
165 /* set rate */
166 if (memif->data->fs_shift < 0)
167 return 0;
168
169 fs = afe->memif_fs(substream, params_rate(params));
170
171 if (fs < 0)
172 return -EINVAL;
173
174 mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
175 memif->data->fs_maskbit << memif->data->fs_shift,
176 fs << memif->data->fs_shift);
177
178 return 0;
179 }
180 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
181
182 int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
183 struct snd_soc_dai *dai)
184 {
185 return snd_pcm_lib_free_pages(substream);
186 }
187 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
188
189 int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
190 struct snd_soc_dai *dai)
191 {
192 struct snd_soc_pcm_runtime *rtd = substream->private_data;
193 struct snd_pcm_runtime * const runtime = substream->runtime;
194 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
195 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
196 struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
197 const struct mtk_base_irq_data *irq_data = irqs->irq_data;
198 unsigned int counter = runtime->period_size;
199 int fs;
200
201 dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
202
203 switch (cmd) {
204 case SNDRV_PCM_TRIGGER_START:
205 case SNDRV_PCM_TRIGGER_RESUME:
206 if (memif->data->enable_shift >= 0)
207 mtk_regmap_update_bits(afe->regmap,
208 memif->data->enable_reg,
209 1 << memif->data->enable_shift,
210 1 << memif->data->enable_shift);
211
212 /* set irq counter */
213 mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
214 irq_data->irq_cnt_maskbit
215 << irq_data->irq_cnt_shift,
216 counter << irq_data->irq_cnt_shift);
217
218 /* set irq fs */
219 fs = afe->irq_fs(substream, runtime->rate);
220
221 if (fs < 0)
222 return -EINVAL;
223
224 mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
225 irq_data->irq_fs_maskbit
226 << irq_data->irq_fs_shift,
227 fs << irq_data->irq_fs_shift);
228
229 /* enable interrupt */
230 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
231 1 << irq_data->irq_en_shift,
232 1 << irq_data->irq_en_shift);
233
234 return 0;
235 case SNDRV_PCM_TRIGGER_STOP:
236 case SNDRV_PCM_TRIGGER_SUSPEND:
237 mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
238 1 << memif->data->enable_shift, 0);
239 /* disable interrupt */
240 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
241 1 << irq_data->irq_en_shift,
242 0 << irq_data->irq_en_shift);
243 /* and clear pending IRQ */
244 mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
245 1 << irq_data->irq_clr_shift);
246 return 0;
247 default:
248 return -EINVAL;
249 }
250 }
251 EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
252
253 int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
254 struct snd_soc_dai *dai)
255 {
256 struct snd_soc_pcm_runtime *rtd = substream->private_data;
257 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
258 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
259 int hd_audio = 0;
260
261 /* set hd mode */
262 switch (substream->runtime->format) {
263 case SNDRV_PCM_FORMAT_S16_LE:
264 hd_audio = 0;
265 break;
266 case SNDRV_PCM_FORMAT_S32_LE:
267 hd_audio = 1;
268 break;
269 case SNDRV_PCM_FORMAT_S24_LE:
270 hd_audio = 1;
271 break;
272 default:
273 dev_err(afe->dev, "%s() error: unsupported format %d\n",
274 __func__, substream->runtime->format);
275 break;
276 }
277
278 mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
279 1 << memif->data->hd_shift,
280 hd_audio << memif->data->hd_shift);
281
282 return 0;
283 }
284 EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
285
286 const struct snd_soc_dai_ops mtk_afe_fe_ops = {
287 .startup = mtk_afe_fe_startup,
288 .shutdown = mtk_afe_fe_shutdown,
289 .hw_params = mtk_afe_fe_hw_params,
290 .hw_free = mtk_afe_fe_hw_free,
291 .prepare = mtk_afe_fe_prepare,
292 .trigger = mtk_afe_fe_trigger,
293 };
294 EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
295
296 static DEFINE_MUTEX(irqs_lock);
297 int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
298 {
299 int i;
300
301 mutex_lock(&afe->irq_alloc_lock);
302 for (i = 0; i < afe->irqs_size; ++i) {
303 if (afe->irqs[i].irq_occupyed == 0) {
304 afe->irqs[i].irq_occupyed = 1;
305 mutex_unlock(&afe->irq_alloc_lock);
306 return i;
307 }
308 }
309 mutex_unlock(&afe->irq_alloc_lock);
310 return afe->irqs_size;
311 }
312 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
313
314 int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
315 {
316 mutex_lock(&afe->irq_alloc_lock);
317 if (irq_id >= 0 && irq_id < afe->irqs_size) {
318 afe->irqs[irq_id].irq_occupyed = 0;
319 mutex_unlock(&afe->irq_alloc_lock);
320 return 0;
321 }
322 mutex_unlock(&afe->irq_alloc_lock);
323 return -EINVAL;
324 }
325 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
326
327 int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
328 {
329 struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
330 struct device *dev = afe->dev;
331 struct regmap *regmap = afe->regmap;
332 int i;
333
334 if (pm_runtime_status_suspended(dev) || afe->suspended)
335 return 0;
336
337 if (!afe->reg_back_up)
338 afe->reg_back_up =
339 devm_kcalloc(dev, afe->reg_back_up_list_num,
340 sizeof(unsigned int), GFP_KERNEL);
341
342 for (i = 0; i < afe->reg_back_up_list_num; i++)
343 regmap_read(regmap, afe->reg_back_up_list[i],
344 &afe->reg_back_up[i]);
345
346 afe->suspended = true;
347 afe->runtime_suspend(dev);
348 return 0;
349 }
350 EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
351
352 int mtk_afe_dai_resume(struct snd_soc_dai *dai)
353 {
354 struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
355 struct device *dev = afe->dev;
356 struct regmap *regmap = afe->regmap;
357 int i = 0;
358
359 if (pm_runtime_status_suspended(dev) || !afe->suspended)
360 return 0;
361
362 afe->runtime_resume(dev);
363
364 if (!afe->reg_back_up)
365 dev_dbg(dev, "%s no reg_backup\n", __func__);
366
367 for (i = 0; i < afe->reg_back_up_list_num; i++)
368 mtk_regmap_write(regmap, afe->reg_back_up_list[i],
369 afe->reg_back_up[i]);
370
371 afe->suspended = false;
372 return 0;
373 }
374 EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
375
376 MODULE_DESCRIPTION("Mediatek simple fe dai operator");
377 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
378 MODULE_LICENSE("GPL v2");
379
This page took 0.04183 seconds and 5 git commands to generate.