ALSA: hda - Implement unbind more safely
[deliverable/linux.git] / sound / pci / hda / hda_controller.c
1 /*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_controller.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
37
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
44 #else
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
49 #endif
50
51 /*
52 * AZX stream operations.
53 */
54
55 /* start a stream */
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
57 {
58 /*
59 * Before stream start, initialize parameter
60 */
61 azx_dev->insufficient = 1;
62
63 /* enable SIE */
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
70 }
71
72 /* stop DMA */
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
74 {
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
79 }
80
81 /* stop a stream */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
83 {
84 azx_stream_clear(chip, azx_dev);
85 /* disable SIE */
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
88 }
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
90
91 /* reset stream */
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
93 {
94 unsigned char val;
95 int timeout;
96
97 azx_stream_clear(chip, azx_dev);
98
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
102 udelay(3);
103 timeout = 300;
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
106 ;
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
109 udelay(3);
110
111 timeout = 300;
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
115 ;
116
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
119 }
120
121 /*
122 * set up the SD for streaming
123 */
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
125 {
126 unsigned int val;
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
136
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
139
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
143
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
146
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
153
154 /* enable the position buffer */
155 if (chip->get_position[0] != azx_get_pos_lpib ||
156 chip->get_position[1] != azx_get_pos_lpib) {
157 if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
160 }
161
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
165
166 return 0;
167 }
168
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
172 {
173 int dev, i, nums;
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
178
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
182 } else {
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
185 }
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
188 dsp_lock(azx_dev);
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 if (azx_dev->assigned_key == key) {
191 azx_dev->opened = 1;
192 azx_dev->assigned_key = key;
193 dsp_unlock(azx_dev);
194 return azx_dev;
195 }
196 if (!res ||
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
198 res = azx_dev;
199 }
200 dsp_unlock(azx_dev);
201 }
202 if (res) {
203 dsp_lock(res);
204 res->opened = 1;
205 res->assigned_key = key;
206 dsp_unlock(res);
207 }
208 return res;
209 }
210
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev *azx_dev)
213 {
214 azx_dev->opened = 0;
215 }
216
217 static cycle_t azx_cc_read(const struct cyclecounter *cc)
218 {
219 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
220 struct snd_pcm_substream *substream = azx_dev->substream;
221 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
222 struct azx *chip = apcm->chip;
223
224 return azx_readl(chip, WALLCLK);
225 }
226
227 static void azx_timecounter_init(struct snd_pcm_substream *substream,
228 bool force, cycle_t last)
229 {
230 struct azx_dev *azx_dev = get_azx_dev(substream);
231 struct timecounter *tc = &azx_dev->azx_tc;
232 struct cyclecounter *cc = &azx_dev->azx_cc;
233 u64 nsec;
234
235 cc->read = azx_cc_read;
236 cc->mask = CLOCKSOURCE_MASK(32);
237
238 /*
239 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 * To avoid any saturation issues in intermediate operations,
241 * the 125 factor is applied first. The division is applied
242 * last after reading the timecounter value.
243 * Applying the 1/3 factor as part of the multiplication
244 * requires at least 20 bits for a decent precision, however
245 * overflows occur after about 4 hours or less, not a option.
246 */
247
248 cc->mult = 125; /* saturation after 195 years */
249 cc->shift = 0;
250
251 nsec = 0; /* audio time is elapsed time since trigger */
252 timecounter_init(tc, cc, nsec);
253 if (force)
254 /*
255 * force timecounter to use predefined value,
256 * used for synchronized starts
257 */
258 tc->cycle_last = last;
259 }
260
261 static inline struct hda_pcm_stream *
262 to_hda_pcm_stream(struct snd_pcm_substream *substream)
263 {
264 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
265 return &apcm->info->stream[substream->stream];
266 }
267
268 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
269 u64 nsec)
270 {
271 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
272 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
273 u64 codec_frames, codec_nsecs;
274
275 if (!hinfo->ops.get_delay)
276 return nsec;
277
278 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
279 codec_nsecs = div_u64(codec_frames * 1000000000LL,
280 substream->runtime->rate);
281
282 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
283 return nsec + codec_nsecs;
284
285 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
286 }
287
288 /*
289 * set up a BDL entry
290 */
291 static int setup_bdle(struct azx *chip,
292 struct snd_dma_buffer *dmab,
293 struct azx_dev *azx_dev, u32 **bdlp,
294 int ofs, int size, int with_ioc)
295 {
296 u32 *bdl = *bdlp;
297
298 while (size > 0) {
299 dma_addr_t addr;
300 int chunk;
301
302 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
303 return -EINVAL;
304
305 addr = snd_sgbuf_get_addr(dmab, ofs);
306 /* program the address field of the BDL entry */
307 bdl[0] = cpu_to_le32((u32)addr);
308 bdl[1] = cpu_to_le32(upper_32_bits(addr));
309 /* program the size field of the BDL entry */
310 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
311 /* one BDLE cannot cross 4K boundary on CTHDA chips */
312 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
313 u32 remain = 0x1000 - (ofs & 0xfff);
314 if (chunk > remain)
315 chunk = remain;
316 }
317 bdl[2] = cpu_to_le32(chunk);
318 /* program the IOC to enable interrupt
319 * only when the whole fragment is processed
320 */
321 size -= chunk;
322 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
323 bdl += 4;
324 azx_dev->frags++;
325 ofs += chunk;
326 }
327 *bdlp = bdl;
328 return ofs;
329 }
330
331 /*
332 * set up BDL entries
333 */
334 static int azx_setup_periods(struct azx *chip,
335 struct snd_pcm_substream *substream,
336 struct azx_dev *azx_dev)
337 {
338 u32 *bdl;
339 int i, ofs, periods, period_bytes;
340 int pos_adj = 0;
341
342 /* reset BDL address */
343 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
344 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
345
346 period_bytes = azx_dev->period_bytes;
347 periods = azx_dev->bufsize / period_bytes;
348
349 /* program the initial BDL entries */
350 bdl = (u32 *)azx_dev->bdl.area;
351 ofs = 0;
352 azx_dev->frags = 0;
353
354 if (chip->bdl_pos_adj)
355 pos_adj = chip->bdl_pos_adj[chip->dev_index];
356 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
357 struct snd_pcm_runtime *runtime = substream->runtime;
358 int pos_align = pos_adj;
359 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
360 if (!pos_adj)
361 pos_adj = pos_align;
362 else
363 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
364 pos_align;
365 pos_adj = frames_to_bytes(runtime, pos_adj);
366 if (pos_adj >= period_bytes) {
367 dev_warn(chip->card->dev,"Too big adjustment %d\n",
368 pos_adj);
369 pos_adj = 0;
370 } else {
371 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
372 azx_dev,
373 &bdl, ofs, pos_adj, true);
374 if (ofs < 0)
375 goto error;
376 }
377 } else
378 pos_adj = 0;
379
380 for (i = 0; i < periods; i++) {
381 if (i == periods - 1 && pos_adj)
382 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
383 azx_dev, &bdl, ofs,
384 period_bytes - pos_adj, 0);
385 else
386 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
387 azx_dev, &bdl, ofs,
388 period_bytes,
389 !azx_dev->no_period_wakeup);
390 if (ofs < 0)
391 goto error;
392 }
393 return 0;
394
395 error:
396 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
397 azx_dev->bufsize, period_bytes);
398 return -EINVAL;
399 }
400
401 /*
402 * PCM ops
403 */
404
405 static int azx_pcm_close(struct snd_pcm_substream *substream)
406 {
407 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
408 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
409 struct azx *chip = apcm->chip;
410 struct azx_dev *azx_dev = get_azx_dev(substream);
411 unsigned long flags;
412
413 mutex_lock(&chip->open_mutex);
414 spin_lock_irqsave(&chip->reg_lock, flags);
415 azx_dev->substream = NULL;
416 azx_dev->running = 0;
417 spin_unlock_irqrestore(&chip->reg_lock, flags);
418 azx_release_device(azx_dev);
419 if (hinfo->ops.close)
420 hinfo->ops.close(hinfo, apcm->codec, substream);
421 snd_hda_power_down(apcm->codec);
422 mutex_unlock(&chip->open_mutex);
423 snd_hda_codec_pcm_put(apcm->info);
424 return 0;
425 }
426
427 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
428 struct snd_pcm_hw_params *hw_params)
429 {
430 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
431 struct azx *chip = apcm->chip;
432 int ret;
433
434 dsp_lock(get_azx_dev(substream));
435 if (dsp_is_locked(get_azx_dev(substream))) {
436 ret = -EBUSY;
437 goto unlock;
438 }
439
440 ret = chip->ops->substream_alloc_pages(chip, substream,
441 params_buffer_bytes(hw_params));
442 unlock:
443 dsp_unlock(get_azx_dev(substream));
444 return ret;
445 }
446
447 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
448 {
449 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
450 struct azx_dev *azx_dev = get_azx_dev(substream);
451 struct azx *chip = apcm->chip;
452 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
453 int err;
454
455 /* reset BDL address */
456 dsp_lock(azx_dev);
457 if (!dsp_is_locked(azx_dev)) {
458 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
459 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
460 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
461 azx_dev->bufsize = 0;
462 azx_dev->period_bytes = 0;
463 azx_dev->format_val = 0;
464 }
465
466 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
467
468 err = chip->ops->substream_free_pages(chip, substream);
469 azx_dev->prepared = 0;
470 dsp_unlock(azx_dev);
471 return err;
472 }
473
474 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
475 {
476 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
477 struct azx *chip = apcm->chip;
478 struct azx_dev *azx_dev = get_azx_dev(substream);
479 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
480 struct snd_pcm_runtime *runtime = substream->runtime;
481 unsigned int bufsize, period_bytes, format_val, stream_tag;
482 int err;
483 struct hda_spdif_out *spdif =
484 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
485 unsigned short ctls = spdif ? spdif->ctls : 0;
486
487 dsp_lock(azx_dev);
488 if (dsp_is_locked(azx_dev)) {
489 err = -EBUSY;
490 goto unlock;
491 }
492
493 azx_stream_reset(chip, azx_dev);
494 format_val = snd_hda_calc_stream_format(apcm->codec,
495 runtime->rate,
496 runtime->channels,
497 runtime->format,
498 hinfo->maxbps,
499 ctls);
500 if (!format_val) {
501 dev_err(chip->card->dev,
502 "invalid format_val, rate=%d, ch=%d, format=%d\n",
503 runtime->rate, runtime->channels, runtime->format);
504 err = -EINVAL;
505 goto unlock;
506 }
507
508 bufsize = snd_pcm_lib_buffer_bytes(substream);
509 period_bytes = snd_pcm_lib_period_bytes(substream);
510
511 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
512 bufsize, format_val);
513
514 if (bufsize != azx_dev->bufsize ||
515 period_bytes != azx_dev->period_bytes ||
516 format_val != azx_dev->format_val ||
517 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
518 azx_dev->bufsize = bufsize;
519 azx_dev->period_bytes = period_bytes;
520 azx_dev->format_val = format_val;
521 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
522 err = azx_setup_periods(chip, substream, azx_dev);
523 if (err < 0)
524 goto unlock;
525 }
526
527 /* when LPIB delay correction gives a small negative value,
528 * we ignore it; currently set the threshold statically to
529 * 64 frames
530 */
531 if (runtime->period_size > 64)
532 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
533 else
534 azx_dev->delay_negative_threshold = 0;
535
536 /* wallclk has 24Mhz clock source */
537 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
538 runtime->rate) * 1000);
539 azx_setup_controller(chip, azx_dev);
540 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
541 azx_dev->fifo_size =
542 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
543 else
544 azx_dev->fifo_size = 0;
545
546 stream_tag = azx_dev->stream_tag;
547 /* CA-IBG chips need the playback stream starting from 1 */
548 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
549 stream_tag > chip->capture_streams)
550 stream_tag -= chip->capture_streams;
551 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
552 azx_dev->format_val, substream);
553
554 unlock:
555 if (!err)
556 azx_dev->prepared = 1;
557 dsp_unlock(azx_dev);
558 return err;
559 }
560
561 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
562 {
563 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
564 struct azx *chip = apcm->chip;
565 struct azx_dev *azx_dev;
566 struct snd_pcm_substream *s;
567 int rstart = 0, start, nsync = 0, sbits = 0;
568 int nwait, timeout;
569
570 azx_dev = get_azx_dev(substream);
571 trace_azx_pcm_trigger(chip, azx_dev, cmd);
572
573 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
574 return -EPIPE;
575
576 switch (cmd) {
577 case SNDRV_PCM_TRIGGER_START:
578 rstart = 1;
579 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
580 case SNDRV_PCM_TRIGGER_RESUME:
581 start = 1;
582 break;
583 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
584 case SNDRV_PCM_TRIGGER_SUSPEND:
585 case SNDRV_PCM_TRIGGER_STOP:
586 start = 0;
587 break;
588 default:
589 return -EINVAL;
590 }
591
592 snd_pcm_group_for_each_entry(s, substream) {
593 if (s->pcm->card != substream->pcm->card)
594 continue;
595 azx_dev = get_azx_dev(s);
596 sbits |= 1 << azx_dev->index;
597 nsync++;
598 snd_pcm_trigger_done(s, substream);
599 }
600
601 spin_lock(&chip->reg_lock);
602
603 /* first, set SYNC bits of corresponding streams */
604 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
605 azx_writel(chip, OLD_SSYNC,
606 azx_readl(chip, OLD_SSYNC) | sbits);
607 else
608 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
609
610 snd_pcm_group_for_each_entry(s, substream) {
611 if (s->pcm->card != substream->pcm->card)
612 continue;
613 azx_dev = get_azx_dev(s);
614 if (start) {
615 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
616 if (!rstart)
617 azx_dev->start_wallclk -=
618 azx_dev->period_wallclk;
619 azx_stream_start(chip, azx_dev);
620 } else {
621 azx_stream_stop(chip, azx_dev);
622 }
623 azx_dev->running = start;
624 }
625 spin_unlock(&chip->reg_lock);
626 if (start) {
627 /* wait until all FIFOs get ready */
628 for (timeout = 5000; timeout; timeout--) {
629 nwait = 0;
630 snd_pcm_group_for_each_entry(s, substream) {
631 if (s->pcm->card != substream->pcm->card)
632 continue;
633 azx_dev = get_azx_dev(s);
634 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
635 SD_STS_FIFO_READY))
636 nwait++;
637 }
638 if (!nwait)
639 break;
640 cpu_relax();
641 }
642 } else {
643 /* wait until all RUN bits are cleared */
644 for (timeout = 5000; timeout; timeout--) {
645 nwait = 0;
646 snd_pcm_group_for_each_entry(s, substream) {
647 if (s->pcm->card != substream->pcm->card)
648 continue;
649 azx_dev = get_azx_dev(s);
650 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
651 SD_CTL_DMA_START)
652 nwait++;
653 }
654 if (!nwait)
655 break;
656 cpu_relax();
657 }
658 }
659 spin_lock(&chip->reg_lock);
660 /* reset SYNC bits */
661 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
662 azx_writel(chip, OLD_SSYNC,
663 azx_readl(chip, OLD_SSYNC) & ~sbits);
664 else
665 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
666 if (start) {
667 azx_timecounter_init(substream, 0, 0);
668 snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
669 substream->runtime->trigger_tstamp_latched = true;
670
671 if (nsync > 1) {
672 cycle_t cycle_last;
673
674 /* same start cycle for master and group */
675 azx_dev = get_azx_dev(substream);
676 cycle_last = azx_dev->azx_tc.cycle_last;
677
678 snd_pcm_group_for_each_entry(s, substream) {
679 if (s->pcm->card != substream->pcm->card)
680 continue;
681 azx_timecounter_init(s, 1, cycle_last);
682 }
683 }
684 }
685 spin_unlock(&chip->reg_lock);
686 return 0;
687 }
688
689 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
690 {
691 return azx_sd_readl(chip, azx_dev, SD_LPIB);
692 }
693 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
694
695 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
696 {
697 return le32_to_cpu(*azx_dev->posbuf);
698 }
699 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
700
701 unsigned int azx_get_position(struct azx *chip,
702 struct azx_dev *azx_dev)
703 {
704 struct snd_pcm_substream *substream = azx_dev->substream;
705 unsigned int pos;
706 int stream = substream->stream;
707 int delay = 0;
708
709 if (chip->get_position[stream])
710 pos = chip->get_position[stream](chip, azx_dev);
711 else /* use the position buffer as default */
712 pos = azx_get_pos_posbuf(chip, azx_dev);
713
714 if (pos >= azx_dev->bufsize)
715 pos = 0;
716
717 if (substream->runtime) {
718 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
719 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
720
721 if (chip->get_delay[stream])
722 delay += chip->get_delay[stream](chip, azx_dev, pos);
723 if (hinfo->ops.get_delay)
724 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
725 substream);
726 substream->runtime->delay = delay;
727 }
728
729 trace_azx_get_position(chip, azx_dev, pos, delay);
730 return pos;
731 }
732 EXPORT_SYMBOL_GPL(azx_get_position);
733
734 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
735 {
736 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
737 struct azx *chip = apcm->chip;
738 struct azx_dev *azx_dev = get_azx_dev(substream);
739 return bytes_to_frames(substream->runtime,
740 azx_get_position(chip, azx_dev));
741 }
742
743 static int azx_get_time_info(struct snd_pcm_substream *substream,
744 struct timespec *system_ts, struct timespec *audio_ts,
745 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
746 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
747 {
748 struct azx_dev *azx_dev = get_azx_dev(substream);
749 u64 nsec;
750
751 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
752 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
753
754 snd_pcm_gettime(substream->runtime, system_ts);
755
756 nsec = timecounter_read(&azx_dev->azx_tc);
757 nsec = div_u64(nsec, 3); /* can be optimized */
758 if (audio_tstamp_config->report_delay)
759 nsec = azx_adjust_codec_delay(substream, nsec);
760
761 *audio_ts = ns_to_timespec(nsec);
762
763 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
764 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
765 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
766
767 } else
768 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
769
770 return 0;
771 }
772
773 static struct snd_pcm_hardware azx_pcm_hw = {
774 .info = (SNDRV_PCM_INFO_MMAP |
775 SNDRV_PCM_INFO_INTERLEAVED |
776 SNDRV_PCM_INFO_BLOCK_TRANSFER |
777 SNDRV_PCM_INFO_MMAP_VALID |
778 /* No full-resume yet implemented */
779 /* SNDRV_PCM_INFO_RESUME |*/
780 SNDRV_PCM_INFO_PAUSE |
781 SNDRV_PCM_INFO_SYNC_START |
782 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
783 SNDRV_PCM_INFO_HAS_LINK_ATIME |
784 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
785 .formats = SNDRV_PCM_FMTBIT_S16_LE,
786 .rates = SNDRV_PCM_RATE_48000,
787 .rate_min = 48000,
788 .rate_max = 48000,
789 .channels_min = 2,
790 .channels_max = 2,
791 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
792 .period_bytes_min = 128,
793 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
794 .periods_min = 2,
795 .periods_max = AZX_MAX_FRAG,
796 .fifo_size = 0,
797 };
798
799 static int azx_pcm_open(struct snd_pcm_substream *substream)
800 {
801 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
802 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
803 struct azx *chip = apcm->chip;
804 struct azx_dev *azx_dev;
805 struct snd_pcm_runtime *runtime = substream->runtime;
806 unsigned long flags;
807 int err;
808 int buff_step;
809
810 snd_hda_codec_pcm_get(apcm->info);
811 mutex_lock(&chip->open_mutex);
812 azx_dev = azx_assign_device(chip, substream);
813 if (azx_dev == NULL) {
814 err = -EBUSY;
815 goto unlock;
816 }
817 runtime->hw = azx_pcm_hw;
818 runtime->hw.channels_min = hinfo->channels_min;
819 runtime->hw.channels_max = hinfo->channels_max;
820 runtime->hw.formats = hinfo->formats;
821 runtime->hw.rates = hinfo->rates;
822 snd_pcm_limit_hw_rates(runtime);
823 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
824
825 /* avoid wrap-around with wall-clock */
826 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
827 20,
828 178000000);
829
830 if (chip->align_buffer_size)
831 /* constrain buffer sizes to be multiple of 128
832 bytes. This is more efficient in terms of memory
833 access but isn't required by the HDA spec and
834 prevents users from specifying exact period/buffer
835 sizes. For example for 44.1kHz, a period size set
836 to 20ms will be rounded to 19.59ms. */
837 buff_step = 128;
838 else
839 /* Don't enforce steps on buffer sizes, still need to
840 be multiple of 4 bytes (HDA spec). Tested on Intel
841 HDA controllers, may not work on all devices where
842 option needs to be disabled */
843 buff_step = 4;
844
845 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
846 buff_step);
847 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
848 buff_step);
849 snd_hda_power_up(apcm->codec);
850 if (hinfo->ops.open)
851 err = hinfo->ops.open(hinfo, apcm->codec, substream);
852 else
853 err = -ENODEV;
854 if (err < 0) {
855 azx_release_device(azx_dev);
856 goto powerdown;
857 }
858 snd_pcm_limit_hw_rates(runtime);
859 /* sanity check */
860 if (snd_BUG_ON(!runtime->hw.channels_min) ||
861 snd_BUG_ON(!runtime->hw.channels_max) ||
862 snd_BUG_ON(!runtime->hw.formats) ||
863 snd_BUG_ON(!runtime->hw.rates)) {
864 azx_release_device(azx_dev);
865 if (hinfo->ops.close)
866 hinfo->ops.close(hinfo, apcm->codec, substream);
867 err = -EINVAL;
868 goto powerdown;
869 }
870
871 /* disable LINK_ATIME timestamps for capture streams
872 until we figure out how to handle digital inputs */
873 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
874 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
875 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
876 }
877
878 spin_lock_irqsave(&chip->reg_lock, flags);
879 azx_dev->substream = substream;
880 azx_dev->running = 0;
881 spin_unlock_irqrestore(&chip->reg_lock, flags);
882
883 runtime->private_data = azx_dev;
884 snd_pcm_set_sync(substream);
885 mutex_unlock(&chip->open_mutex);
886 return 0;
887
888 powerdown:
889 snd_hda_power_down(apcm->codec);
890 unlock:
891 mutex_unlock(&chip->open_mutex);
892 snd_hda_codec_pcm_put(apcm->info);
893 return err;
894 }
895
896 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
897 struct vm_area_struct *area)
898 {
899 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
900 struct azx *chip = apcm->chip;
901 if (chip->ops->pcm_mmap_prepare)
902 chip->ops->pcm_mmap_prepare(substream, area);
903 return snd_pcm_lib_default_mmap(substream, area);
904 }
905
906 static struct snd_pcm_ops azx_pcm_ops = {
907 .open = azx_pcm_open,
908 .close = azx_pcm_close,
909 .ioctl = snd_pcm_lib_ioctl,
910 .hw_params = azx_pcm_hw_params,
911 .hw_free = azx_pcm_hw_free,
912 .prepare = azx_pcm_prepare,
913 .trigger = azx_pcm_trigger,
914 .pointer = azx_pcm_pointer,
915 .get_time_info = azx_get_time_info,
916 .mmap = azx_pcm_mmap,
917 .page = snd_pcm_sgbuf_ops_page,
918 };
919
920 static void azx_pcm_free(struct snd_pcm *pcm)
921 {
922 struct azx_pcm *apcm = pcm->private_data;
923 if (apcm) {
924 list_del(&apcm->list);
925 apcm->info->pcm = NULL;
926 kfree(apcm);
927 }
928 }
929
930 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
931
932 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
933 struct hda_pcm *cpcm)
934 {
935 struct azx *chip = bus->private_data;
936 struct snd_pcm *pcm;
937 struct azx_pcm *apcm;
938 int pcm_dev = cpcm->device;
939 unsigned int size;
940 int s, err;
941
942 list_for_each_entry(apcm, &chip->pcm_list, list) {
943 if (apcm->pcm->device == pcm_dev) {
944 dev_err(chip->card->dev, "PCM %d already exists\n",
945 pcm_dev);
946 return -EBUSY;
947 }
948 }
949 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
950 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
951 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
952 &pcm);
953 if (err < 0)
954 return err;
955 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
956 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
957 if (apcm == NULL)
958 return -ENOMEM;
959 apcm->chip = chip;
960 apcm->pcm = pcm;
961 apcm->codec = codec;
962 apcm->info = cpcm;
963 pcm->private_data = apcm;
964 pcm->private_free = azx_pcm_free;
965 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
966 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
967 list_add_tail(&apcm->list, &chip->pcm_list);
968 cpcm->pcm = pcm;
969 for (s = 0; s < 2; s++) {
970 if (cpcm->stream[s].substreams)
971 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
972 }
973 /* buffer pre-allocation */
974 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
975 if (size > MAX_PREALLOC_SIZE)
976 size = MAX_PREALLOC_SIZE;
977 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
978 chip->card->dev,
979 size, MAX_PREALLOC_SIZE);
980 return 0;
981 }
982
983 /*
984 * CORB / RIRB interface
985 */
986 static int azx_alloc_cmd_io(struct azx *chip)
987 {
988 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
989 return chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
990 PAGE_SIZE, &chip->rb);
991 }
992 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
993
994 static void azx_init_cmd_io(struct azx *chip)
995 {
996 int timeout;
997
998 spin_lock_irq(&chip->reg_lock);
999 /* CORB set up */
1000 chip->corb.addr = chip->rb.addr;
1001 chip->corb.buf = (u32 *)chip->rb.area;
1002 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1003 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1004
1005 /* set the corb size to 256 entries (ULI requires explicitly) */
1006 azx_writeb(chip, CORBSIZE, 0x02);
1007 /* set the corb write pointer to 0 */
1008 azx_writew(chip, CORBWP, 0);
1009
1010 /* reset the corb hw read pointer */
1011 azx_writew(chip, CORBRP, AZX_CORBRP_RST);
1012 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1013 for (timeout = 1000; timeout > 0; timeout--) {
1014 if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
1015 break;
1016 udelay(1);
1017 }
1018 if (timeout <= 0)
1019 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1020 azx_readw(chip, CORBRP));
1021
1022 azx_writew(chip, CORBRP, 0);
1023 for (timeout = 1000; timeout > 0; timeout--) {
1024 if (azx_readw(chip, CORBRP) == 0)
1025 break;
1026 udelay(1);
1027 }
1028 if (timeout <= 0)
1029 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1030 azx_readw(chip, CORBRP));
1031 }
1032
1033 /* enable corb dma */
1034 azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1035
1036 /* RIRB set up */
1037 chip->rirb.addr = chip->rb.addr + 2048;
1038 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1039 chip->rirb.wp = chip->rirb.rp = 0;
1040 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1041 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1042 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1043
1044 /* set the rirb size to 256 entries (ULI requires explicitly) */
1045 azx_writeb(chip, RIRBSIZE, 0x02);
1046 /* reset the rirb hw write pointer */
1047 azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1048 /* set N=1, get RIRB response interrupt for new entry */
1049 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1050 azx_writew(chip, RINTCNT, 0xc0);
1051 else
1052 azx_writew(chip, RINTCNT, 1);
1053 /* enable rirb dma and response irq */
1054 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1055 spin_unlock_irq(&chip->reg_lock);
1056 }
1057 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1058
1059 static void azx_free_cmd_io(struct azx *chip)
1060 {
1061 spin_lock_irq(&chip->reg_lock);
1062 /* disable ringbuffer DMAs */
1063 azx_writeb(chip, RIRBCTL, 0);
1064 azx_writeb(chip, CORBCTL, 0);
1065 spin_unlock_irq(&chip->reg_lock);
1066 }
1067 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1068
1069 static unsigned int azx_command_addr(u32 cmd)
1070 {
1071 unsigned int addr = cmd >> 28;
1072
1073 if (addr >= AZX_MAX_CODECS) {
1074 snd_BUG();
1075 addr = 0;
1076 }
1077
1078 return addr;
1079 }
1080
1081 /* send a command */
1082 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1083 {
1084 struct azx *chip = bus->private_data;
1085 unsigned int addr = azx_command_addr(val);
1086 unsigned int wp, rp;
1087
1088 spin_lock_irq(&chip->reg_lock);
1089
1090 /* add command to corb */
1091 wp = azx_readw(chip, CORBWP);
1092 if (wp == 0xffff) {
1093 /* something wrong, controller likely turned to D3 */
1094 spin_unlock_irq(&chip->reg_lock);
1095 return -EIO;
1096 }
1097 wp++;
1098 wp %= AZX_MAX_CORB_ENTRIES;
1099
1100 rp = azx_readw(chip, CORBRP);
1101 if (wp == rp) {
1102 /* oops, it's full */
1103 spin_unlock_irq(&chip->reg_lock);
1104 return -EAGAIN;
1105 }
1106
1107 chip->rirb.cmds[addr]++;
1108 chip->corb.buf[wp] = cpu_to_le32(val);
1109 azx_writew(chip, CORBWP, wp);
1110
1111 spin_unlock_irq(&chip->reg_lock);
1112
1113 return 0;
1114 }
1115
1116 #define AZX_RIRB_EX_UNSOL_EV (1<<4)
1117
1118 /* retrieve RIRB entry - called from interrupt handler */
1119 static void azx_update_rirb(struct azx *chip)
1120 {
1121 unsigned int rp, wp;
1122 unsigned int addr;
1123 u32 res, res_ex;
1124
1125 wp = azx_readw(chip, RIRBWP);
1126 if (wp == 0xffff) {
1127 /* something wrong, controller likely turned to D3 */
1128 return;
1129 }
1130
1131 if (wp == chip->rirb.wp)
1132 return;
1133 chip->rirb.wp = wp;
1134
1135 while (chip->rirb.rp != wp) {
1136 chip->rirb.rp++;
1137 chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1138
1139 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1140 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1141 res = le32_to_cpu(chip->rirb.buf[rp]);
1142 addr = res_ex & 0xf;
1143 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1144 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1145 res, res_ex,
1146 chip->rirb.rp, wp);
1147 snd_BUG();
1148 } else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1149 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1150 else if (chip->rirb.cmds[addr]) {
1151 chip->rirb.res[addr] = res;
1152 smp_wmb();
1153 chip->rirb.cmds[addr]--;
1154 } else if (printk_ratelimit()) {
1155 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1156 res, res_ex,
1157 chip->last_cmd[addr]);
1158 }
1159 }
1160 }
1161
1162 /* receive a response */
1163 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1164 unsigned int addr)
1165 {
1166 struct azx *chip = bus->private_data;
1167 unsigned long timeout;
1168 unsigned long loopcounter;
1169 int do_poll = 0;
1170
1171 again:
1172 timeout = jiffies + msecs_to_jiffies(1000);
1173
1174 for (loopcounter = 0;; loopcounter++) {
1175 if (chip->polling_mode || do_poll) {
1176 spin_lock_irq(&chip->reg_lock);
1177 azx_update_rirb(chip);
1178 spin_unlock_irq(&chip->reg_lock);
1179 }
1180 if (!chip->rirb.cmds[addr]) {
1181 smp_rmb();
1182 bus->rirb_error = 0;
1183
1184 if (!do_poll)
1185 chip->poll_count = 0;
1186 return chip->rirb.res[addr]; /* the last value */
1187 }
1188 if (time_after(jiffies, timeout))
1189 break;
1190 if (bus->needs_damn_long_delay || loopcounter > 3000)
1191 msleep(2); /* temporary workaround */
1192 else {
1193 udelay(10);
1194 cond_resched();
1195 }
1196 }
1197
1198 if (!bus->no_response_fallback)
1199 return -1;
1200
1201 if (!chip->polling_mode && chip->poll_count < 2) {
1202 dev_dbg(chip->card->dev,
1203 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1204 chip->last_cmd[addr]);
1205 do_poll = 1;
1206 chip->poll_count++;
1207 goto again;
1208 }
1209
1210
1211 if (!chip->polling_mode) {
1212 dev_warn(chip->card->dev,
1213 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1214 chip->last_cmd[addr]);
1215 chip->polling_mode = 1;
1216 goto again;
1217 }
1218
1219 if (chip->msi) {
1220 dev_warn(chip->card->dev,
1221 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1222 chip->last_cmd[addr]);
1223 if (chip->ops->disable_msi_reset_irq(chip) &&
1224 chip->ops->disable_msi_reset_irq(chip) < 0) {
1225 bus->rirb_error = 1;
1226 return -1;
1227 }
1228 goto again;
1229 }
1230
1231 if (chip->probing) {
1232 /* If this critical timeout happens during the codec probing
1233 * phase, this is likely an access to a non-existing codec
1234 * slot. Better to return an error and reset the system.
1235 */
1236 return -1;
1237 }
1238
1239 /* a fatal communication error; need either to reset or to fallback
1240 * to the single_cmd mode
1241 */
1242 bus->rirb_error = 1;
1243 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1244 bus->response_reset = 1;
1245 return -1; /* give a chance to retry */
1246 }
1247
1248 dev_err(chip->card->dev,
1249 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1250 chip->last_cmd[addr]);
1251 chip->single_cmd = 1;
1252 bus->response_reset = 0;
1253 /* release CORB/RIRB */
1254 azx_free_cmd_io(chip);
1255 /* disable unsolicited responses */
1256 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1257 return -1;
1258 }
1259
1260 /*
1261 * Use the single immediate command instead of CORB/RIRB for simplicity
1262 *
1263 * Note: according to Intel, this is not preferred use. The command was
1264 * intended for the BIOS only, and may get confused with unsolicited
1265 * responses. So, we shouldn't use it for normal operation from the
1266 * driver.
1267 * I left the codes, however, for debugging/testing purposes.
1268 */
1269
1270 /* receive a response */
1271 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1272 {
1273 int timeout = 50;
1274
1275 while (timeout--) {
1276 /* check IRV busy bit */
1277 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1278 /* reuse rirb.res as the response return value */
1279 chip->rirb.res[addr] = azx_readl(chip, IR);
1280 return 0;
1281 }
1282 udelay(1);
1283 }
1284 if (printk_ratelimit())
1285 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1286 azx_readw(chip, IRS));
1287 chip->rirb.res[addr] = -1;
1288 return -EIO;
1289 }
1290
1291 /* send a command */
1292 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1293 {
1294 struct azx *chip = bus->private_data;
1295 unsigned int addr = azx_command_addr(val);
1296 int timeout = 50;
1297
1298 bus->rirb_error = 0;
1299 while (timeout--) {
1300 /* check ICB busy bit */
1301 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1302 /* Clear IRV valid bit */
1303 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1304 AZX_IRS_VALID);
1305 azx_writel(chip, IC, val);
1306 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1307 AZX_IRS_BUSY);
1308 return azx_single_wait_for_response(chip, addr);
1309 }
1310 udelay(1);
1311 }
1312 if (printk_ratelimit())
1313 dev_dbg(chip->card->dev,
1314 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1315 azx_readw(chip, IRS), val);
1316 return -EIO;
1317 }
1318
1319 /* receive a response */
1320 static unsigned int azx_single_get_response(struct hda_bus *bus,
1321 unsigned int addr)
1322 {
1323 struct azx *chip = bus->private_data;
1324 return chip->rirb.res[addr];
1325 }
1326
1327 /*
1328 * The below are the main callbacks from hda_codec.
1329 *
1330 * They are just the skeleton to call sub-callbacks according to the
1331 * current setting of chip->single_cmd.
1332 */
1333
1334 /* send a command */
1335 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1336 {
1337 struct azx *chip = bus->private_data;
1338
1339 if (chip->disabled)
1340 return 0;
1341 chip->last_cmd[azx_command_addr(val)] = val;
1342 if (chip->single_cmd)
1343 return azx_single_send_cmd(bus, val);
1344 else
1345 return azx_corb_send_cmd(bus, val);
1346 }
1347 EXPORT_SYMBOL_GPL(azx_send_cmd);
1348
1349 /* get a response */
1350 static unsigned int azx_get_response(struct hda_bus *bus,
1351 unsigned int addr)
1352 {
1353 struct azx *chip = bus->private_data;
1354 if (chip->disabled)
1355 return 0;
1356 if (chip->single_cmd)
1357 return azx_single_get_response(bus, addr);
1358 else
1359 return azx_rirb_get_response(bus, addr);
1360 }
1361 EXPORT_SYMBOL_GPL(azx_get_response);
1362
1363 #ifdef CONFIG_SND_HDA_DSP_LOADER
1364 /*
1365 * DSP loading code (e.g. for CA0132)
1366 */
1367
1368 /* use the first stream for loading DSP */
1369 static struct azx_dev *
1370 azx_get_dsp_loader_dev(struct azx *chip)
1371 {
1372 return &chip->azx_dev[chip->playback_index_offset];
1373 }
1374
1375 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1376 unsigned int byte_size,
1377 struct snd_dma_buffer *bufp)
1378 {
1379 u32 *bdl;
1380 struct azx *chip = bus->private_data;
1381 struct azx_dev *azx_dev;
1382 int err;
1383
1384 azx_dev = azx_get_dsp_loader_dev(chip);
1385
1386 dsp_lock(azx_dev);
1387 spin_lock_irq(&chip->reg_lock);
1388 if (azx_dev->running || azx_dev->locked) {
1389 spin_unlock_irq(&chip->reg_lock);
1390 err = -EBUSY;
1391 goto unlock;
1392 }
1393 azx_dev->prepared = 0;
1394 chip->saved_azx_dev = *azx_dev;
1395 azx_dev->locked = 1;
1396 spin_unlock_irq(&chip->reg_lock);
1397
1398 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1399 byte_size, bufp);
1400 if (err < 0)
1401 goto err_alloc;
1402
1403 azx_dev->bufsize = byte_size;
1404 azx_dev->period_bytes = byte_size;
1405 azx_dev->format_val = format;
1406
1407 azx_stream_reset(chip, azx_dev);
1408
1409 /* reset BDL address */
1410 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1411 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1412
1413 azx_dev->frags = 0;
1414 bdl = (u32 *)azx_dev->bdl.area;
1415 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1416 if (err < 0)
1417 goto error;
1418
1419 azx_setup_controller(chip, azx_dev);
1420 dsp_unlock(azx_dev);
1421 return azx_dev->stream_tag;
1422
1423 error:
1424 chip->ops->dma_free_pages(chip, bufp);
1425 err_alloc:
1426 spin_lock_irq(&chip->reg_lock);
1427 if (azx_dev->opened)
1428 *azx_dev = chip->saved_azx_dev;
1429 azx_dev->locked = 0;
1430 spin_unlock_irq(&chip->reg_lock);
1431 unlock:
1432 dsp_unlock(azx_dev);
1433 return err;
1434 }
1435
1436 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1437 {
1438 struct azx *chip = bus->private_data;
1439 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1440
1441 if (start)
1442 azx_stream_start(chip, azx_dev);
1443 else
1444 azx_stream_stop(chip, azx_dev);
1445 azx_dev->running = start;
1446 }
1447
1448 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1449 struct snd_dma_buffer *dmab)
1450 {
1451 struct azx *chip = bus->private_data;
1452 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1453
1454 if (!dmab->area || !azx_dev->locked)
1455 return;
1456
1457 dsp_lock(azx_dev);
1458 /* reset BDL address */
1459 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1460 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1461 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1462 azx_dev->bufsize = 0;
1463 azx_dev->period_bytes = 0;
1464 azx_dev->format_val = 0;
1465
1466 chip->ops->dma_free_pages(chip, dmab);
1467 dmab->area = NULL;
1468
1469 spin_lock_irq(&chip->reg_lock);
1470 if (azx_dev->opened)
1471 *azx_dev = chip->saved_azx_dev;
1472 azx_dev->locked = 0;
1473 spin_unlock_irq(&chip->reg_lock);
1474 dsp_unlock(azx_dev);
1475 }
1476 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1477
1478 int azx_alloc_stream_pages(struct azx *chip)
1479 {
1480 int i, err;
1481
1482 for (i = 0; i < chip->num_streams; i++) {
1483 dsp_lock_init(&chip->azx_dev[i]);
1484 /* allocate memory for the BDL for each stream */
1485 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1486 BDL_SIZE,
1487 &chip->azx_dev[i].bdl);
1488 if (err < 0)
1489 return -ENOMEM;
1490 }
1491 /* allocate memory for the position buffer */
1492 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1493 chip->num_streams * 8, &chip->posbuf);
1494 if (err < 0)
1495 return -ENOMEM;
1496
1497 /* allocate CORB/RIRB */
1498 err = azx_alloc_cmd_io(chip);
1499 if (err < 0)
1500 return err;
1501 return 0;
1502 }
1503 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1504
1505 void azx_free_stream_pages(struct azx *chip)
1506 {
1507 int i;
1508 if (chip->azx_dev) {
1509 for (i = 0; i < chip->num_streams; i++)
1510 if (chip->azx_dev[i].bdl.area)
1511 chip->ops->dma_free_pages(
1512 chip, &chip->azx_dev[i].bdl);
1513 }
1514 if (chip->rb.area)
1515 chip->ops->dma_free_pages(chip, &chip->rb);
1516 if (chip->posbuf.area)
1517 chip->ops->dma_free_pages(chip, &chip->posbuf);
1518 }
1519 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1520
1521 /*
1522 * Lowlevel interface
1523 */
1524
1525 /* enter link reset */
1526 void azx_enter_link_reset(struct azx *chip)
1527 {
1528 unsigned long timeout;
1529
1530 /* reset controller */
1531 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1532
1533 timeout = jiffies + msecs_to_jiffies(100);
1534 while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1535 time_before(jiffies, timeout))
1536 usleep_range(500, 1000);
1537 }
1538 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1539
1540 /* exit link reset */
1541 static void azx_exit_link_reset(struct azx *chip)
1542 {
1543 unsigned long timeout;
1544
1545 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1546
1547 timeout = jiffies + msecs_to_jiffies(100);
1548 while (!azx_readb(chip, GCTL) &&
1549 time_before(jiffies, timeout))
1550 usleep_range(500, 1000);
1551 }
1552
1553 /* reset codec link */
1554 static int azx_reset(struct azx *chip, bool full_reset)
1555 {
1556 if (!full_reset)
1557 goto __skip;
1558
1559 /* clear STATESTS */
1560 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1561
1562 /* reset controller */
1563 azx_enter_link_reset(chip);
1564
1565 /* delay for >= 100us for codec PLL to settle per spec
1566 * Rev 0.9 section 5.5.1
1567 */
1568 usleep_range(500, 1000);
1569
1570 /* Bring controller out of reset */
1571 azx_exit_link_reset(chip);
1572
1573 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1574 usleep_range(1000, 1200);
1575
1576 __skip:
1577 /* check to see if controller is ready */
1578 if (!azx_readb(chip, GCTL)) {
1579 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1580 return -EBUSY;
1581 }
1582
1583 /* Accept unsolicited responses */
1584 if (!chip->single_cmd)
1585 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1586 AZX_GCTL_UNSOL);
1587
1588 /* detect codecs */
1589 if (!chip->codec_mask) {
1590 chip->codec_mask = azx_readw(chip, STATESTS);
1591 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1592 chip->codec_mask);
1593 }
1594
1595 return 0;
1596 }
1597
1598 /* enable interrupts */
1599 static void azx_int_enable(struct azx *chip)
1600 {
1601 /* enable controller CIE and GIE */
1602 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1603 AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1604 }
1605
1606 /* disable interrupts */
1607 static void azx_int_disable(struct azx *chip)
1608 {
1609 int i;
1610
1611 /* disable interrupts in stream descriptor */
1612 for (i = 0; i < chip->num_streams; i++) {
1613 struct azx_dev *azx_dev = &chip->azx_dev[i];
1614 azx_sd_writeb(chip, azx_dev, SD_CTL,
1615 azx_sd_readb(chip, azx_dev, SD_CTL) &
1616 ~SD_INT_MASK);
1617 }
1618
1619 /* disable SIE for all streams */
1620 azx_writeb(chip, INTCTL, 0);
1621
1622 /* disable controller CIE and GIE */
1623 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1624 ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1625 }
1626
1627 /* clear interrupts */
1628 static void azx_int_clear(struct azx *chip)
1629 {
1630 int i;
1631
1632 /* clear stream status */
1633 for (i = 0; i < chip->num_streams; i++) {
1634 struct azx_dev *azx_dev = &chip->azx_dev[i];
1635 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1636 }
1637
1638 /* clear STATESTS */
1639 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1640
1641 /* clear rirb status */
1642 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1643
1644 /* clear int status */
1645 azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1646 }
1647
1648 /*
1649 * reset and start the controller registers
1650 */
1651 void azx_init_chip(struct azx *chip, bool full_reset)
1652 {
1653 if (chip->initialized)
1654 return;
1655
1656 /* reset controller */
1657 azx_reset(chip, full_reset);
1658
1659 /* initialize interrupts */
1660 azx_int_clear(chip);
1661 azx_int_enable(chip);
1662
1663 /* initialize the codec command I/O */
1664 if (!chip->single_cmd)
1665 azx_init_cmd_io(chip);
1666
1667 /* program the position buffer */
1668 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1669 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1670
1671 chip->initialized = 1;
1672 }
1673 EXPORT_SYMBOL_GPL(azx_init_chip);
1674
1675 void azx_stop_chip(struct azx *chip)
1676 {
1677 if (!chip->initialized)
1678 return;
1679
1680 /* disable interrupts */
1681 azx_int_disable(chip);
1682 azx_int_clear(chip);
1683
1684 /* disable CORB/RIRB */
1685 azx_free_cmd_io(chip);
1686
1687 /* disable position buffer */
1688 azx_writel(chip, DPLBASE, 0);
1689 azx_writel(chip, DPUBASE, 0);
1690
1691 chip->initialized = 0;
1692 }
1693 EXPORT_SYMBOL_GPL(azx_stop_chip);
1694
1695 /*
1696 * interrupt handler
1697 */
1698 irqreturn_t azx_interrupt(int irq, void *dev_id)
1699 {
1700 struct azx *chip = dev_id;
1701 struct azx_dev *azx_dev;
1702 u32 status;
1703 u8 sd_status;
1704 int i;
1705
1706 #ifdef CONFIG_PM
1707 if (azx_has_pm_runtime(chip))
1708 if (!pm_runtime_active(chip->card->dev))
1709 return IRQ_NONE;
1710 #endif
1711
1712 spin_lock(&chip->reg_lock);
1713
1714 if (chip->disabled) {
1715 spin_unlock(&chip->reg_lock);
1716 return IRQ_NONE;
1717 }
1718
1719 status = azx_readl(chip, INTSTS);
1720 if (status == 0 || status == 0xffffffff) {
1721 spin_unlock(&chip->reg_lock);
1722 return IRQ_NONE;
1723 }
1724
1725 for (i = 0; i < chip->num_streams; i++) {
1726 azx_dev = &chip->azx_dev[i];
1727 if (status & azx_dev->sd_int_sta_mask) {
1728 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1729 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1730 if (!azx_dev->substream || !azx_dev->running ||
1731 !(sd_status & SD_INT_COMPLETE))
1732 continue;
1733 /* check whether this IRQ is really acceptable */
1734 if (!chip->ops->position_check ||
1735 chip->ops->position_check(chip, azx_dev)) {
1736 spin_unlock(&chip->reg_lock);
1737 snd_pcm_period_elapsed(azx_dev->substream);
1738 spin_lock(&chip->reg_lock);
1739 }
1740 }
1741 }
1742
1743 /* clear rirb int */
1744 status = azx_readb(chip, RIRBSTS);
1745 if (status & RIRB_INT_MASK) {
1746 if (status & RIRB_INT_RESPONSE) {
1747 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1748 udelay(80);
1749 azx_update_rirb(chip);
1750 }
1751 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1752 }
1753
1754 spin_unlock(&chip->reg_lock);
1755
1756 return IRQ_HANDLED;
1757 }
1758 EXPORT_SYMBOL_GPL(azx_interrupt);
1759
1760 /*
1761 * Codec initerface
1762 */
1763
1764 /*
1765 * Probe the given codec address
1766 */
1767 static int probe_codec(struct azx *chip, int addr)
1768 {
1769 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1770 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1771 unsigned int res;
1772
1773 mutex_lock(&chip->bus->cmd_mutex);
1774 chip->probing = 1;
1775 azx_send_cmd(chip->bus, cmd);
1776 res = azx_get_response(chip->bus, addr);
1777 chip->probing = 0;
1778 mutex_unlock(&chip->bus->cmd_mutex);
1779 if (res == -1)
1780 return -EIO;
1781 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1782 return 0;
1783 }
1784
1785 static void azx_bus_reset(struct hda_bus *bus)
1786 {
1787 struct azx *chip = bus->private_data;
1788
1789 bus->in_reset = 1;
1790 azx_stop_chip(chip);
1791 azx_init_chip(chip, true);
1792 if (chip->initialized)
1793 snd_hda_bus_reset(chip->bus);
1794 bus->in_reset = 0;
1795 }
1796
1797 static int get_jackpoll_interval(struct azx *chip)
1798 {
1799 int i;
1800 unsigned int j;
1801
1802 if (!chip->jackpoll_ms)
1803 return 0;
1804
1805 i = chip->jackpoll_ms[chip->dev_index];
1806 if (i == 0)
1807 return 0;
1808 if (i < 50 || i > 60000)
1809 j = 0;
1810 else
1811 j = msecs_to_jiffies(i);
1812 if (j == 0)
1813 dev_warn(chip->card->dev,
1814 "jackpoll_ms value out of range: %d\n", i);
1815 return j;
1816 }
1817
1818 static struct hda_bus_ops bus_ops = {
1819 .command = azx_send_cmd,
1820 .get_response = azx_get_response,
1821 .attach_pcm = azx_attach_pcm_stream,
1822 .bus_reset = azx_bus_reset,
1823 #ifdef CONFIG_SND_HDA_DSP_LOADER
1824 .load_dsp_prepare = azx_load_dsp_prepare,
1825 .load_dsp_trigger = azx_load_dsp_trigger,
1826 .load_dsp_cleanup = azx_load_dsp_cleanup,
1827 #endif
1828 };
1829
1830 /* HD-audio bus initialization */
1831 int azx_bus_create(struct azx *chip, const char *model)
1832 {
1833 struct hda_bus *bus;
1834 int err;
1835
1836 err = snd_hda_bus_new(chip->card, &bus);
1837 if (err < 0)
1838 return err;
1839
1840 chip->bus = bus;
1841 bus->private_data = chip;
1842 bus->pci = chip->pci;
1843 bus->modelname = model;
1844 bus->ops = bus_ops;
1845
1846 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1847 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1848 bus->needs_damn_long_delay = 1;
1849 }
1850
1851 /* AMD chipsets often cause the communication stalls upon certain
1852 * sequence like the pin-detection. It seems that forcing the synced
1853 * access works around the stall. Grrr...
1854 */
1855 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1856 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1857 bus->sync_write = 1;
1858 bus->allow_bus_reset = 1;
1859 }
1860
1861 return 0;
1862 }
1863 EXPORT_SYMBOL_GPL(azx_bus_create);
1864
1865 /* Probe codecs */
1866 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1867 {
1868 struct hda_bus *bus = chip->bus;
1869 int c, codecs, err;
1870
1871 codecs = 0;
1872 if (!max_slots)
1873 max_slots = AZX_DEFAULT_CODECS;
1874
1875 /* First try to probe all given codec slots */
1876 for (c = 0; c < max_slots; c++) {
1877 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1878 if (probe_codec(chip, c) < 0) {
1879 /* Some BIOSen give you wrong codec addresses
1880 * that don't exist
1881 */
1882 dev_warn(chip->card->dev,
1883 "Codec #%d probe error; disabling it...\n", c);
1884 chip->codec_mask &= ~(1 << c);
1885 /* More badly, accessing to a non-existing
1886 * codec often screws up the controller chip,
1887 * and disturbs the further communications.
1888 * Thus if an error occurs during probing,
1889 * better to reset the controller chip to
1890 * get back to the sanity state.
1891 */
1892 azx_stop_chip(chip);
1893 azx_init_chip(chip, true);
1894 }
1895 }
1896 }
1897
1898 /* Then create codec instances */
1899 for (c = 0; c < max_slots; c++) {
1900 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1901 struct hda_codec *codec;
1902 err = snd_hda_codec_new(bus, bus->card, c, &codec);
1903 if (err < 0)
1904 continue;
1905 codec->jackpoll_interval = get_jackpoll_interval(chip);
1906 codec->beep_mode = chip->beep_mode;
1907 codecs++;
1908 }
1909 }
1910 if (!codecs) {
1911 dev_err(chip->card->dev, "no codecs initialized\n");
1912 return -ENXIO;
1913 }
1914 return 0;
1915 }
1916 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1917
1918 /* configure each codec instance */
1919 int azx_codec_configure(struct azx *chip)
1920 {
1921 struct hda_codec *codec;
1922 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1923 snd_hda_codec_configure(codec);
1924 }
1925 return 0;
1926 }
1927 EXPORT_SYMBOL_GPL(azx_codec_configure);
1928
1929
1930 static bool is_input_stream(struct azx *chip, unsigned char index)
1931 {
1932 return (index >= chip->capture_index_offset &&
1933 index < chip->capture_index_offset + chip->capture_streams);
1934 }
1935
1936 /* initialize SD streams */
1937 int azx_init_stream(struct azx *chip)
1938 {
1939 int i;
1940 int in_stream_tag = 0;
1941 int out_stream_tag = 0;
1942
1943 /* initialize each stream (aka device)
1944 * assign the starting bdl address to each stream (device)
1945 * and initialize
1946 */
1947 for (i = 0; i < chip->num_streams; i++) {
1948 struct azx_dev *azx_dev = &chip->azx_dev[i];
1949 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1950 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1951 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1952 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1953 azx_dev->sd_int_sta_mask = 1 << i;
1954 azx_dev->index = i;
1955
1956 /* stream tag must be unique throughout
1957 * the stream direction group,
1958 * valid values 1...15
1959 * use separate stream tag if the flag
1960 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1961 */
1962 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1963 azx_dev->stream_tag =
1964 is_input_stream(chip, i) ?
1965 ++in_stream_tag :
1966 ++out_stream_tag;
1967 else
1968 azx_dev->stream_tag = i + 1;
1969 }
1970
1971 return 0;
1972 }
1973 EXPORT_SYMBOL_GPL(azx_init_stream);
1974
1975 /*
1976 * reboot notifier for hang-up problem at power-down
1977 */
1978 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1979 {
1980 struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1981 snd_hda_bus_reboot_notify(chip->bus);
1982 azx_stop_chip(chip);
1983 return NOTIFY_OK;
1984 }
1985
1986 void azx_notifier_register(struct azx *chip)
1987 {
1988 chip->reboot_notifier.notifier_call = azx_halt;
1989 register_reboot_notifier(&chip->reboot_notifier);
1990 }
1991 EXPORT_SYMBOL_GPL(azx_notifier_register);
1992
1993 void azx_notifier_unregister(struct azx *chip)
1994 {
1995 if (chip->reboot_notifier.notifier_call)
1996 unregister_reboot_notifier(&chip->reboot_notifier);
1997 }
1998 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1999
2000 MODULE_LICENSE("GPL");
2001 MODULE_DESCRIPTION("Common HDA driver functions");
This page took 0.074726 seconds and 5 git commands to generate.