ad85f9bfaf57bb82d2c33ae086673832d03c9be4
[deliverable/linux.git] / sound / pci / hda / hda_controller.c
1 /*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_controller.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
37
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
44 #else
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
49 #endif
50
51 /*
52 * AZX stream operations.
53 */
54
55 /* start a stream */
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
57 {
58 /*
59 * Before stream start, initialize parameter
60 */
61 azx_dev->insufficient = 1;
62
63 /* enable SIE */
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
70 }
71
72 /* stop DMA */
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
74 {
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
79 }
80
81 /* stop a stream */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
83 {
84 azx_stream_clear(chip, azx_dev);
85 /* disable SIE */
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
88 }
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
90
91 /* reset stream */
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
93 {
94 unsigned char val;
95 int timeout;
96
97 azx_stream_clear(chip, azx_dev);
98
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
102 udelay(3);
103 timeout = 300;
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
106 ;
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
109 udelay(3);
110
111 timeout = 300;
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
115 ;
116
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
119 }
120
121 /*
122 * set up the SD for streaming
123 */
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
125 {
126 unsigned int val;
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
136
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
139
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
143
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
146
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
153
154 /* enable the position buffer */
155 if (chip->get_position[0] != azx_get_pos_lpib ||
156 chip->get_position[1] != azx_get_pos_lpib) {
157 if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
160 }
161
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
165
166 return 0;
167 }
168
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
172 {
173 int dev, i, nums;
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
178
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
182 } else {
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
185 }
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
188 dsp_lock(azx_dev);
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 if (azx_dev->assigned_key == key) {
191 azx_dev->opened = 1;
192 azx_dev->assigned_key = key;
193 dsp_unlock(azx_dev);
194 return azx_dev;
195 }
196 if (!res ||
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
198 res = azx_dev;
199 }
200 dsp_unlock(azx_dev);
201 }
202 if (res) {
203 dsp_lock(res);
204 res->opened = 1;
205 res->assigned_key = key;
206 dsp_unlock(res);
207 }
208 return res;
209 }
210
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev *azx_dev)
213 {
214 azx_dev->opened = 0;
215 }
216
217 static cycle_t azx_cc_read(const struct cyclecounter *cc)
218 {
219 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
220 struct snd_pcm_substream *substream = azx_dev->substream;
221 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
222 struct azx *chip = apcm->chip;
223
224 return azx_readl(chip, WALLCLK);
225 }
226
227 static void azx_timecounter_init(struct snd_pcm_substream *substream,
228 bool force, cycle_t last)
229 {
230 struct azx_dev *azx_dev = get_azx_dev(substream);
231 struct timecounter *tc = &azx_dev->azx_tc;
232 struct cyclecounter *cc = &azx_dev->azx_cc;
233 u64 nsec;
234
235 cc->read = azx_cc_read;
236 cc->mask = CLOCKSOURCE_MASK(32);
237
238 /*
239 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 * To avoid any saturation issues in intermediate operations,
241 * the 125 factor is applied first. The division is applied
242 * last after reading the timecounter value.
243 * Applying the 1/3 factor as part of the multiplication
244 * requires at least 20 bits for a decent precision, however
245 * overflows occur after about 4 hours or less, not a option.
246 */
247
248 cc->mult = 125; /* saturation after 195 years */
249 cc->shift = 0;
250
251 nsec = 0; /* audio time is elapsed time since trigger */
252 timecounter_init(tc, cc, nsec);
253 if (force)
254 /*
255 * force timecounter to use predefined value,
256 * used for synchronized starts
257 */
258 tc->cycle_last = last;
259 }
260
261 static inline struct hda_pcm_stream *
262 to_hda_pcm_stream(struct snd_pcm_substream *substream)
263 {
264 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
265 return &apcm->info->stream[substream->stream];
266 }
267
268 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
269 u64 nsec)
270 {
271 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
272 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
273 u64 codec_frames, codec_nsecs;
274
275 if (!hinfo->ops.get_delay)
276 return nsec;
277
278 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
279 codec_nsecs = div_u64(codec_frames * 1000000000LL,
280 substream->runtime->rate);
281
282 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
283 return nsec + codec_nsecs;
284
285 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
286 }
287
288 /*
289 * set up a BDL entry
290 */
291 static int setup_bdle(struct azx *chip,
292 struct snd_dma_buffer *dmab,
293 struct azx_dev *azx_dev, u32 **bdlp,
294 int ofs, int size, int with_ioc)
295 {
296 u32 *bdl = *bdlp;
297
298 while (size > 0) {
299 dma_addr_t addr;
300 int chunk;
301
302 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
303 return -EINVAL;
304
305 addr = snd_sgbuf_get_addr(dmab, ofs);
306 /* program the address field of the BDL entry */
307 bdl[0] = cpu_to_le32((u32)addr);
308 bdl[1] = cpu_to_le32(upper_32_bits(addr));
309 /* program the size field of the BDL entry */
310 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
311 /* one BDLE cannot cross 4K boundary on CTHDA chips */
312 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
313 u32 remain = 0x1000 - (ofs & 0xfff);
314 if (chunk > remain)
315 chunk = remain;
316 }
317 bdl[2] = cpu_to_le32(chunk);
318 /* program the IOC to enable interrupt
319 * only when the whole fragment is processed
320 */
321 size -= chunk;
322 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
323 bdl += 4;
324 azx_dev->frags++;
325 ofs += chunk;
326 }
327 *bdlp = bdl;
328 return ofs;
329 }
330
331 /*
332 * set up BDL entries
333 */
334 static int azx_setup_periods(struct azx *chip,
335 struct snd_pcm_substream *substream,
336 struct azx_dev *azx_dev)
337 {
338 u32 *bdl;
339 int i, ofs, periods, period_bytes;
340 int pos_adj = 0;
341
342 /* reset BDL address */
343 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
344 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
345
346 period_bytes = azx_dev->period_bytes;
347 periods = azx_dev->bufsize / period_bytes;
348
349 /* program the initial BDL entries */
350 bdl = (u32 *)azx_dev->bdl.area;
351 ofs = 0;
352 azx_dev->frags = 0;
353
354 if (chip->bdl_pos_adj)
355 pos_adj = chip->bdl_pos_adj[chip->dev_index];
356 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
357 struct snd_pcm_runtime *runtime = substream->runtime;
358 int pos_align = pos_adj;
359 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
360 if (!pos_adj)
361 pos_adj = pos_align;
362 else
363 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
364 pos_align;
365 pos_adj = frames_to_bytes(runtime, pos_adj);
366 if (pos_adj >= period_bytes) {
367 dev_warn(chip->card->dev,"Too big adjustment %d\n",
368 pos_adj);
369 pos_adj = 0;
370 } else {
371 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
372 azx_dev,
373 &bdl, ofs, pos_adj, true);
374 if (ofs < 0)
375 goto error;
376 }
377 } else
378 pos_adj = 0;
379
380 for (i = 0; i < periods; i++) {
381 if (i == periods - 1 && pos_adj)
382 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
383 azx_dev, &bdl, ofs,
384 period_bytes - pos_adj, 0);
385 else
386 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
387 azx_dev, &bdl, ofs,
388 period_bytes,
389 !azx_dev->no_period_wakeup);
390 if (ofs < 0)
391 goto error;
392 }
393 return 0;
394
395 error:
396 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
397 azx_dev->bufsize, period_bytes);
398 return -EINVAL;
399 }
400
401 /*
402 * PCM ops
403 */
404
405 static int azx_pcm_close(struct snd_pcm_substream *substream)
406 {
407 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
408 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
409 struct azx *chip = apcm->chip;
410 struct azx_dev *azx_dev = get_azx_dev(substream);
411 unsigned long flags;
412
413 mutex_lock(&chip->open_mutex);
414 spin_lock_irqsave(&chip->reg_lock, flags);
415 azx_dev->substream = NULL;
416 azx_dev->running = 0;
417 spin_unlock_irqrestore(&chip->reg_lock, flags);
418 azx_release_device(azx_dev);
419 if (hinfo->ops.close)
420 hinfo->ops.close(hinfo, apcm->codec, substream);
421 snd_hda_power_down(apcm->codec);
422 mutex_unlock(&chip->open_mutex);
423 return 0;
424 }
425
426 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
427 struct snd_pcm_hw_params *hw_params)
428 {
429 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
430 struct azx *chip = apcm->chip;
431 int ret;
432
433 dsp_lock(get_azx_dev(substream));
434 if (dsp_is_locked(get_azx_dev(substream))) {
435 ret = -EBUSY;
436 goto unlock;
437 }
438
439 ret = chip->ops->substream_alloc_pages(chip, substream,
440 params_buffer_bytes(hw_params));
441 unlock:
442 dsp_unlock(get_azx_dev(substream));
443 return ret;
444 }
445
446 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
447 {
448 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
449 struct azx_dev *azx_dev = get_azx_dev(substream);
450 struct azx *chip = apcm->chip;
451 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
452 int err;
453
454 /* reset BDL address */
455 dsp_lock(azx_dev);
456 if (!dsp_is_locked(azx_dev)) {
457 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
458 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
459 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
460 azx_dev->bufsize = 0;
461 azx_dev->period_bytes = 0;
462 azx_dev->format_val = 0;
463 }
464
465 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
466
467 err = chip->ops->substream_free_pages(chip, substream);
468 azx_dev->prepared = 0;
469 dsp_unlock(azx_dev);
470 return err;
471 }
472
473 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
474 {
475 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
476 struct azx *chip = apcm->chip;
477 struct azx_dev *azx_dev = get_azx_dev(substream);
478 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
479 struct snd_pcm_runtime *runtime = substream->runtime;
480 unsigned int bufsize, period_bytes, format_val, stream_tag;
481 int err;
482 struct hda_spdif_out *spdif =
483 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
484 unsigned short ctls = spdif ? spdif->ctls : 0;
485
486 dsp_lock(azx_dev);
487 if (dsp_is_locked(azx_dev)) {
488 err = -EBUSY;
489 goto unlock;
490 }
491
492 azx_stream_reset(chip, azx_dev);
493 format_val = snd_hda_calc_stream_format(apcm->codec,
494 runtime->rate,
495 runtime->channels,
496 runtime->format,
497 hinfo->maxbps,
498 ctls);
499 if (!format_val) {
500 dev_err(chip->card->dev,
501 "invalid format_val, rate=%d, ch=%d, format=%d\n",
502 runtime->rate, runtime->channels, runtime->format);
503 err = -EINVAL;
504 goto unlock;
505 }
506
507 bufsize = snd_pcm_lib_buffer_bytes(substream);
508 period_bytes = snd_pcm_lib_period_bytes(substream);
509
510 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
511 bufsize, format_val);
512
513 if (bufsize != azx_dev->bufsize ||
514 period_bytes != azx_dev->period_bytes ||
515 format_val != azx_dev->format_val ||
516 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
517 azx_dev->bufsize = bufsize;
518 azx_dev->period_bytes = period_bytes;
519 azx_dev->format_val = format_val;
520 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
521 err = azx_setup_periods(chip, substream, azx_dev);
522 if (err < 0)
523 goto unlock;
524 }
525
526 /* when LPIB delay correction gives a small negative value,
527 * we ignore it; currently set the threshold statically to
528 * 64 frames
529 */
530 if (runtime->period_size > 64)
531 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
532 else
533 azx_dev->delay_negative_threshold = 0;
534
535 /* wallclk has 24Mhz clock source */
536 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
537 runtime->rate) * 1000);
538 azx_setup_controller(chip, azx_dev);
539 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
540 azx_dev->fifo_size =
541 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
542 else
543 azx_dev->fifo_size = 0;
544
545 stream_tag = azx_dev->stream_tag;
546 /* CA-IBG chips need the playback stream starting from 1 */
547 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
548 stream_tag > chip->capture_streams)
549 stream_tag -= chip->capture_streams;
550 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
551 azx_dev->format_val, substream);
552
553 unlock:
554 if (!err)
555 azx_dev->prepared = 1;
556 dsp_unlock(azx_dev);
557 return err;
558 }
559
560 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
561 {
562 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
563 struct azx *chip = apcm->chip;
564 struct azx_dev *azx_dev;
565 struct snd_pcm_substream *s;
566 int rstart = 0, start, nsync = 0, sbits = 0;
567 int nwait, timeout;
568
569 azx_dev = get_azx_dev(substream);
570 trace_azx_pcm_trigger(chip, azx_dev, cmd);
571
572 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
573 return -EPIPE;
574
575 switch (cmd) {
576 case SNDRV_PCM_TRIGGER_START:
577 rstart = 1;
578 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
579 case SNDRV_PCM_TRIGGER_RESUME:
580 start = 1;
581 break;
582 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
583 case SNDRV_PCM_TRIGGER_SUSPEND:
584 case SNDRV_PCM_TRIGGER_STOP:
585 start = 0;
586 break;
587 default:
588 return -EINVAL;
589 }
590
591 snd_pcm_group_for_each_entry(s, substream) {
592 if (s->pcm->card != substream->pcm->card)
593 continue;
594 azx_dev = get_azx_dev(s);
595 sbits |= 1 << azx_dev->index;
596 nsync++;
597 snd_pcm_trigger_done(s, substream);
598 }
599
600 spin_lock(&chip->reg_lock);
601
602 /* first, set SYNC bits of corresponding streams */
603 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
604 azx_writel(chip, OLD_SSYNC,
605 azx_readl(chip, OLD_SSYNC) | sbits);
606 else
607 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
608
609 snd_pcm_group_for_each_entry(s, substream) {
610 if (s->pcm->card != substream->pcm->card)
611 continue;
612 azx_dev = get_azx_dev(s);
613 if (start) {
614 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
615 if (!rstart)
616 azx_dev->start_wallclk -=
617 azx_dev->period_wallclk;
618 azx_stream_start(chip, azx_dev);
619 } else {
620 azx_stream_stop(chip, azx_dev);
621 }
622 azx_dev->running = start;
623 }
624 spin_unlock(&chip->reg_lock);
625 if (start) {
626 /* wait until all FIFOs get ready */
627 for (timeout = 5000; timeout; timeout--) {
628 nwait = 0;
629 snd_pcm_group_for_each_entry(s, substream) {
630 if (s->pcm->card != substream->pcm->card)
631 continue;
632 azx_dev = get_azx_dev(s);
633 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
634 SD_STS_FIFO_READY))
635 nwait++;
636 }
637 if (!nwait)
638 break;
639 cpu_relax();
640 }
641 } else {
642 /* wait until all RUN bits are cleared */
643 for (timeout = 5000; timeout; timeout--) {
644 nwait = 0;
645 snd_pcm_group_for_each_entry(s, substream) {
646 if (s->pcm->card != substream->pcm->card)
647 continue;
648 azx_dev = get_azx_dev(s);
649 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
650 SD_CTL_DMA_START)
651 nwait++;
652 }
653 if (!nwait)
654 break;
655 cpu_relax();
656 }
657 }
658 spin_lock(&chip->reg_lock);
659 /* reset SYNC bits */
660 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
661 azx_writel(chip, OLD_SSYNC,
662 azx_readl(chip, OLD_SSYNC) & ~sbits);
663 else
664 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
665 if (start) {
666 azx_timecounter_init(substream, 0, 0);
667 snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
668 substream->runtime->trigger_tstamp_latched = true;
669
670 if (nsync > 1) {
671 cycle_t cycle_last;
672
673 /* same start cycle for master and group */
674 azx_dev = get_azx_dev(substream);
675 cycle_last = azx_dev->azx_tc.cycle_last;
676
677 snd_pcm_group_for_each_entry(s, substream) {
678 if (s->pcm->card != substream->pcm->card)
679 continue;
680 azx_timecounter_init(s, 1, cycle_last);
681 }
682 }
683 }
684 spin_unlock(&chip->reg_lock);
685 return 0;
686 }
687
688 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
689 {
690 return azx_sd_readl(chip, azx_dev, SD_LPIB);
691 }
692 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
693
694 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
695 {
696 return le32_to_cpu(*azx_dev->posbuf);
697 }
698 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
699
700 unsigned int azx_get_position(struct azx *chip,
701 struct azx_dev *azx_dev)
702 {
703 struct snd_pcm_substream *substream = azx_dev->substream;
704 unsigned int pos;
705 int stream = substream->stream;
706 int delay = 0;
707
708 if (chip->get_position[stream])
709 pos = chip->get_position[stream](chip, azx_dev);
710 else /* use the position buffer as default */
711 pos = azx_get_pos_posbuf(chip, azx_dev);
712
713 if (pos >= azx_dev->bufsize)
714 pos = 0;
715
716 if (substream->runtime) {
717 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
718 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
719
720 if (chip->get_delay[stream])
721 delay += chip->get_delay[stream](chip, azx_dev, pos);
722 if (hinfo->ops.get_delay)
723 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
724 substream);
725 substream->runtime->delay = delay;
726 }
727
728 trace_azx_get_position(chip, azx_dev, pos, delay);
729 return pos;
730 }
731 EXPORT_SYMBOL_GPL(azx_get_position);
732
733 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
734 {
735 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
736 struct azx *chip = apcm->chip;
737 struct azx_dev *azx_dev = get_azx_dev(substream);
738 return bytes_to_frames(substream->runtime,
739 azx_get_position(chip, azx_dev));
740 }
741
742 static int azx_get_time_info(struct snd_pcm_substream *substream,
743 struct timespec *system_ts, struct timespec *audio_ts,
744 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
745 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
746 {
747 struct azx_dev *azx_dev = get_azx_dev(substream);
748 u64 nsec;
749
750 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
751 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
752
753 snd_pcm_gettime(substream->runtime, system_ts);
754
755 nsec = timecounter_read(&azx_dev->azx_tc);
756 nsec = div_u64(nsec, 3); /* can be optimized */
757 if (audio_tstamp_config->report_delay)
758 nsec = azx_adjust_codec_delay(substream, nsec);
759
760 *audio_ts = ns_to_timespec(nsec);
761
762 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
763 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
764 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
765
766 } else
767 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
768
769 return 0;
770 }
771
772 static struct snd_pcm_hardware azx_pcm_hw = {
773 .info = (SNDRV_PCM_INFO_MMAP |
774 SNDRV_PCM_INFO_INTERLEAVED |
775 SNDRV_PCM_INFO_BLOCK_TRANSFER |
776 SNDRV_PCM_INFO_MMAP_VALID |
777 /* No full-resume yet implemented */
778 /* SNDRV_PCM_INFO_RESUME |*/
779 SNDRV_PCM_INFO_PAUSE |
780 SNDRV_PCM_INFO_SYNC_START |
781 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
782 SNDRV_PCM_INFO_HAS_LINK_ATIME |
783 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
784 .formats = SNDRV_PCM_FMTBIT_S16_LE,
785 .rates = SNDRV_PCM_RATE_48000,
786 .rate_min = 48000,
787 .rate_max = 48000,
788 .channels_min = 2,
789 .channels_max = 2,
790 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
791 .period_bytes_min = 128,
792 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
793 .periods_min = 2,
794 .periods_max = AZX_MAX_FRAG,
795 .fifo_size = 0,
796 };
797
798 static int azx_pcm_open(struct snd_pcm_substream *substream)
799 {
800 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
801 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
802 struct azx *chip = apcm->chip;
803 struct azx_dev *azx_dev;
804 struct snd_pcm_runtime *runtime = substream->runtime;
805 unsigned long flags;
806 int err;
807 int buff_step;
808
809 mutex_lock(&chip->open_mutex);
810 azx_dev = azx_assign_device(chip, substream);
811 if (azx_dev == NULL) {
812 err = -EBUSY;
813 goto unlock;
814 }
815 runtime->hw = azx_pcm_hw;
816 runtime->hw.channels_min = hinfo->channels_min;
817 runtime->hw.channels_max = hinfo->channels_max;
818 runtime->hw.formats = hinfo->formats;
819 runtime->hw.rates = hinfo->rates;
820 snd_pcm_limit_hw_rates(runtime);
821 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
822
823 /* avoid wrap-around with wall-clock */
824 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
825 20,
826 178000000);
827
828 if (chip->align_buffer_size)
829 /* constrain buffer sizes to be multiple of 128
830 bytes. This is more efficient in terms of memory
831 access but isn't required by the HDA spec and
832 prevents users from specifying exact period/buffer
833 sizes. For example for 44.1kHz, a period size set
834 to 20ms will be rounded to 19.59ms. */
835 buff_step = 128;
836 else
837 /* Don't enforce steps on buffer sizes, still need to
838 be multiple of 4 bytes (HDA spec). Tested on Intel
839 HDA controllers, may not work on all devices where
840 option needs to be disabled */
841 buff_step = 4;
842
843 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
844 buff_step);
845 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
846 buff_step);
847 snd_hda_power_up(apcm->codec);
848 if (hinfo->ops.open)
849 err = hinfo->ops.open(hinfo, apcm->codec, substream);
850 else
851 err = -ENODEV;
852 if (err < 0) {
853 azx_release_device(azx_dev);
854 goto powerdown;
855 }
856 snd_pcm_limit_hw_rates(runtime);
857 /* sanity check */
858 if (snd_BUG_ON(!runtime->hw.channels_min) ||
859 snd_BUG_ON(!runtime->hw.channels_max) ||
860 snd_BUG_ON(!runtime->hw.formats) ||
861 snd_BUG_ON(!runtime->hw.rates)) {
862 azx_release_device(azx_dev);
863 if (hinfo->ops.close)
864 hinfo->ops.close(hinfo, apcm->codec, substream);
865 err = -EINVAL;
866 goto powerdown;
867 }
868
869 /* disable LINK_ATIME timestamps for capture streams
870 until we figure out how to handle digital inputs */
871 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
872 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
873 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
874 }
875
876 spin_lock_irqsave(&chip->reg_lock, flags);
877 azx_dev->substream = substream;
878 azx_dev->running = 0;
879 spin_unlock_irqrestore(&chip->reg_lock, flags);
880
881 runtime->private_data = azx_dev;
882 snd_pcm_set_sync(substream);
883 mutex_unlock(&chip->open_mutex);
884 return 0;
885
886 powerdown:
887 snd_hda_power_down(apcm->codec);
888 unlock:
889 mutex_unlock(&chip->open_mutex);
890 return err;
891 }
892
893 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
894 struct vm_area_struct *area)
895 {
896 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
897 struct azx *chip = apcm->chip;
898 if (chip->ops->pcm_mmap_prepare)
899 chip->ops->pcm_mmap_prepare(substream, area);
900 return snd_pcm_lib_default_mmap(substream, area);
901 }
902
903 static struct snd_pcm_ops azx_pcm_ops = {
904 .open = azx_pcm_open,
905 .close = azx_pcm_close,
906 .ioctl = snd_pcm_lib_ioctl,
907 .hw_params = azx_pcm_hw_params,
908 .hw_free = azx_pcm_hw_free,
909 .prepare = azx_pcm_prepare,
910 .trigger = azx_pcm_trigger,
911 .pointer = azx_pcm_pointer,
912 .get_time_info = azx_get_time_info,
913 .mmap = azx_pcm_mmap,
914 .page = snd_pcm_sgbuf_ops_page,
915 };
916
917 static void azx_pcm_free(struct snd_pcm *pcm)
918 {
919 struct azx_pcm *apcm = pcm->private_data;
920 if (apcm) {
921 list_del(&apcm->list);
922 apcm->info->pcm = NULL;
923 kfree(apcm);
924 }
925 }
926
927 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
928
929 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
930 struct hda_pcm *cpcm)
931 {
932 struct azx *chip = bus->private_data;
933 struct snd_pcm *pcm;
934 struct azx_pcm *apcm;
935 int pcm_dev = cpcm->device;
936 unsigned int size;
937 int s, err;
938
939 list_for_each_entry(apcm, &chip->pcm_list, list) {
940 if (apcm->pcm->device == pcm_dev) {
941 dev_err(chip->card->dev, "PCM %d already exists\n",
942 pcm_dev);
943 return -EBUSY;
944 }
945 }
946 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
947 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
948 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
949 &pcm);
950 if (err < 0)
951 return err;
952 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
953 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
954 if (apcm == NULL)
955 return -ENOMEM;
956 apcm->chip = chip;
957 apcm->pcm = pcm;
958 apcm->codec = codec;
959 apcm->info = cpcm;
960 pcm->private_data = apcm;
961 pcm->private_free = azx_pcm_free;
962 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
963 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
964 list_add_tail(&apcm->list, &chip->pcm_list);
965 cpcm->pcm = pcm;
966 for (s = 0; s < 2; s++) {
967 if (cpcm->stream[s].substreams)
968 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
969 }
970 /* buffer pre-allocation */
971 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
972 if (size > MAX_PREALLOC_SIZE)
973 size = MAX_PREALLOC_SIZE;
974 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
975 chip->card->dev,
976 size, MAX_PREALLOC_SIZE);
977 return 0;
978 }
979
980 /*
981 * CORB / RIRB interface
982 */
983 static int azx_alloc_cmd_io(struct azx *chip)
984 {
985 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
986 return chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
987 PAGE_SIZE, &chip->rb);
988 }
989 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
990
991 static void azx_init_cmd_io(struct azx *chip)
992 {
993 int timeout;
994
995 spin_lock_irq(&chip->reg_lock);
996 /* CORB set up */
997 chip->corb.addr = chip->rb.addr;
998 chip->corb.buf = (u32 *)chip->rb.area;
999 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1000 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1001
1002 /* set the corb size to 256 entries (ULI requires explicitly) */
1003 azx_writeb(chip, CORBSIZE, 0x02);
1004 /* set the corb write pointer to 0 */
1005 azx_writew(chip, CORBWP, 0);
1006
1007 /* reset the corb hw read pointer */
1008 azx_writew(chip, CORBRP, AZX_CORBRP_RST);
1009 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1010 for (timeout = 1000; timeout > 0; timeout--) {
1011 if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
1012 break;
1013 udelay(1);
1014 }
1015 if (timeout <= 0)
1016 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1017 azx_readw(chip, CORBRP));
1018
1019 azx_writew(chip, CORBRP, 0);
1020 for (timeout = 1000; timeout > 0; timeout--) {
1021 if (azx_readw(chip, CORBRP) == 0)
1022 break;
1023 udelay(1);
1024 }
1025 if (timeout <= 0)
1026 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1027 azx_readw(chip, CORBRP));
1028 }
1029
1030 /* enable corb dma */
1031 azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1032
1033 /* RIRB set up */
1034 chip->rirb.addr = chip->rb.addr + 2048;
1035 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1036 chip->rirb.wp = chip->rirb.rp = 0;
1037 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1038 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1039 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1040
1041 /* set the rirb size to 256 entries (ULI requires explicitly) */
1042 azx_writeb(chip, RIRBSIZE, 0x02);
1043 /* reset the rirb hw write pointer */
1044 azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1045 /* set N=1, get RIRB response interrupt for new entry */
1046 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1047 azx_writew(chip, RINTCNT, 0xc0);
1048 else
1049 azx_writew(chip, RINTCNT, 1);
1050 /* enable rirb dma and response irq */
1051 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1052 spin_unlock_irq(&chip->reg_lock);
1053 }
1054 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1055
1056 static void azx_free_cmd_io(struct azx *chip)
1057 {
1058 spin_lock_irq(&chip->reg_lock);
1059 /* disable ringbuffer DMAs */
1060 azx_writeb(chip, RIRBCTL, 0);
1061 azx_writeb(chip, CORBCTL, 0);
1062 spin_unlock_irq(&chip->reg_lock);
1063 }
1064 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1065
1066 static unsigned int azx_command_addr(u32 cmd)
1067 {
1068 unsigned int addr = cmd >> 28;
1069
1070 if (addr >= AZX_MAX_CODECS) {
1071 snd_BUG();
1072 addr = 0;
1073 }
1074
1075 return addr;
1076 }
1077
1078 /* send a command */
1079 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1080 {
1081 struct azx *chip = bus->private_data;
1082 unsigned int addr = azx_command_addr(val);
1083 unsigned int wp, rp;
1084
1085 spin_lock_irq(&chip->reg_lock);
1086
1087 /* add command to corb */
1088 wp = azx_readw(chip, CORBWP);
1089 if (wp == 0xffff) {
1090 /* something wrong, controller likely turned to D3 */
1091 spin_unlock_irq(&chip->reg_lock);
1092 return -EIO;
1093 }
1094 wp++;
1095 wp %= AZX_MAX_CORB_ENTRIES;
1096
1097 rp = azx_readw(chip, CORBRP);
1098 if (wp == rp) {
1099 /* oops, it's full */
1100 spin_unlock_irq(&chip->reg_lock);
1101 return -EAGAIN;
1102 }
1103
1104 chip->rirb.cmds[addr]++;
1105 chip->corb.buf[wp] = cpu_to_le32(val);
1106 azx_writew(chip, CORBWP, wp);
1107
1108 spin_unlock_irq(&chip->reg_lock);
1109
1110 return 0;
1111 }
1112
1113 #define AZX_RIRB_EX_UNSOL_EV (1<<4)
1114
1115 /* retrieve RIRB entry - called from interrupt handler */
1116 static void azx_update_rirb(struct azx *chip)
1117 {
1118 unsigned int rp, wp;
1119 unsigned int addr;
1120 u32 res, res_ex;
1121
1122 wp = azx_readw(chip, RIRBWP);
1123 if (wp == 0xffff) {
1124 /* something wrong, controller likely turned to D3 */
1125 return;
1126 }
1127
1128 if (wp == chip->rirb.wp)
1129 return;
1130 chip->rirb.wp = wp;
1131
1132 while (chip->rirb.rp != wp) {
1133 chip->rirb.rp++;
1134 chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1135
1136 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1137 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1138 res = le32_to_cpu(chip->rirb.buf[rp]);
1139 addr = res_ex & 0xf;
1140 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1141 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1142 res, res_ex,
1143 chip->rirb.rp, wp);
1144 snd_BUG();
1145 } else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1146 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1147 else if (chip->rirb.cmds[addr]) {
1148 chip->rirb.res[addr] = res;
1149 smp_wmb();
1150 chip->rirb.cmds[addr]--;
1151 } else if (printk_ratelimit()) {
1152 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1153 res, res_ex,
1154 chip->last_cmd[addr]);
1155 }
1156 }
1157 }
1158
1159 /* receive a response */
1160 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1161 unsigned int addr)
1162 {
1163 struct azx *chip = bus->private_data;
1164 unsigned long timeout;
1165 unsigned long loopcounter;
1166 int do_poll = 0;
1167
1168 again:
1169 timeout = jiffies + msecs_to_jiffies(1000);
1170
1171 for (loopcounter = 0;; loopcounter++) {
1172 if (chip->polling_mode || do_poll) {
1173 spin_lock_irq(&chip->reg_lock);
1174 azx_update_rirb(chip);
1175 spin_unlock_irq(&chip->reg_lock);
1176 }
1177 if (!chip->rirb.cmds[addr]) {
1178 smp_rmb();
1179 bus->rirb_error = 0;
1180
1181 if (!do_poll)
1182 chip->poll_count = 0;
1183 return chip->rirb.res[addr]; /* the last value */
1184 }
1185 if (time_after(jiffies, timeout))
1186 break;
1187 if (bus->needs_damn_long_delay || loopcounter > 3000)
1188 msleep(2); /* temporary workaround */
1189 else {
1190 udelay(10);
1191 cond_resched();
1192 }
1193 }
1194
1195 if (!bus->no_response_fallback)
1196 return -1;
1197
1198 if (!chip->polling_mode && chip->poll_count < 2) {
1199 dev_dbg(chip->card->dev,
1200 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1201 chip->last_cmd[addr]);
1202 do_poll = 1;
1203 chip->poll_count++;
1204 goto again;
1205 }
1206
1207
1208 if (!chip->polling_mode) {
1209 dev_warn(chip->card->dev,
1210 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1211 chip->last_cmd[addr]);
1212 chip->polling_mode = 1;
1213 goto again;
1214 }
1215
1216 if (chip->msi) {
1217 dev_warn(chip->card->dev,
1218 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1219 chip->last_cmd[addr]);
1220 if (chip->ops->disable_msi_reset_irq(chip) &&
1221 chip->ops->disable_msi_reset_irq(chip) < 0) {
1222 bus->rirb_error = 1;
1223 return -1;
1224 }
1225 goto again;
1226 }
1227
1228 if (chip->probing) {
1229 /* If this critical timeout happens during the codec probing
1230 * phase, this is likely an access to a non-existing codec
1231 * slot. Better to return an error and reset the system.
1232 */
1233 return -1;
1234 }
1235
1236 /* a fatal communication error; need either to reset or to fallback
1237 * to the single_cmd mode
1238 */
1239 bus->rirb_error = 1;
1240 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1241 bus->response_reset = 1;
1242 return -1; /* give a chance to retry */
1243 }
1244
1245 dev_err(chip->card->dev,
1246 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1247 chip->last_cmd[addr]);
1248 chip->single_cmd = 1;
1249 bus->response_reset = 0;
1250 /* release CORB/RIRB */
1251 azx_free_cmd_io(chip);
1252 /* disable unsolicited responses */
1253 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1254 return -1;
1255 }
1256
1257 /*
1258 * Use the single immediate command instead of CORB/RIRB for simplicity
1259 *
1260 * Note: according to Intel, this is not preferred use. The command was
1261 * intended for the BIOS only, and may get confused with unsolicited
1262 * responses. So, we shouldn't use it for normal operation from the
1263 * driver.
1264 * I left the codes, however, for debugging/testing purposes.
1265 */
1266
1267 /* receive a response */
1268 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1269 {
1270 int timeout = 50;
1271
1272 while (timeout--) {
1273 /* check IRV busy bit */
1274 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1275 /* reuse rirb.res as the response return value */
1276 chip->rirb.res[addr] = azx_readl(chip, IR);
1277 return 0;
1278 }
1279 udelay(1);
1280 }
1281 if (printk_ratelimit())
1282 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1283 azx_readw(chip, IRS));
1284 chip->rirb.res[addr] = -1;
1285 return -EIO;
1286 }
1287
1288 /* send a command */
1289 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1290 {
1291 struct azx *chip = bus->private_data;
1292 unsigned int addr = azx_command_addr(val);
1293 int timeout = 50;
1294
1295 bus->rirb_error = 0;
1296 while (timeout--) {
1297 /* check ICB busy bit */
1298 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1299 /* Clear IRV valid bit */
1300 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1301 AZX_IRS_VALID);
1302 azx_writel(chip, IC, val);
1303 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1304 AZX_IRS_BUSY);
1305 return azx_single_wait_for_response(chip, addr);
1306 }
1307 udelay(1);
1308 }
1309 if (printk_ratelimit())
1310 dev_dbg(chip->card->dev,
1311 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1312 azx_readw(chip, IRS), val);
1313 return -EIO;
1314 }
1315
1316 /* receive a response */
1317 static unsigned int azx_single_get_response(struct hda_bus *bus,
1318 unsigned int addr)
1319 {
1320 struct azx *chip = bus->private_data;
1321 return chip->rirb.res[addr];
1322 }
1323
1324 /*
1325 * The below are the main callbacks from hda_codec.
1326 *
1327 * They are just the skeleton to call sub-callbacks according to the
1328 * current setting of chip->single_cmd.
1329 */
1330
1331 /* send a command */
1332 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1333 {
1334 struct azx *chip = bus->private_data;
1335
1336 if (chip->disabled)
1337 return 0;
1338 chip->last_cmd[azx_command_addr(val)] = val;
1339 if (chip->single_cmd)
1340 return azx_single_send_cmd(bus, val);
1341 else
1342 return azx_corb_send_cmd(bus, val);
1343 }
1344 EXPORT_SYMBOL_GPL(azx_send_cmd);
1345
1346 /* get a response */
1347 static unsigned int azx_get_response(struct hda_bus *bus,
1348 unsigned int addr)
1349 {
1350 struct azx *chip = bus->private_data;
1351 if (chip->disabled)
1352 return 0;
1353 if (chip->single_cmd)
1354 return azx_single_get_response(bus, addr);
1355 else
1356 return azx_rirb_get_response(bus, addr);
1357 }
1358 EXPORT_SYMBOL_GPL(azx_get_response);
1359
1360 #ifdef CONFIG_SND_HDA_DSP_LOADER
1361 /*
1362 * DSP loading code (e.g. for CA0132)
1363 */
1364
1365 /* use the first stream for loading DSP */
1366 static struct azx_dev *
1367 azx_get_dsp_loader_dev(struct azx *chip)
1368 {
1369 return &chip->azx_dev[chip->playback_index_offset];
1370 }
1371
1372 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1373 unsigned int byte_size,
1374 struct snd_dma_buffer *bufp)
1375 {
1376 u32 *bdl;
1377 struct azx *chip = bus->private_data;
1378 struct azx_dev *azx_dev;
1379 int err;
1380
1381 azx_dev = azx_get_dsp_loader_dev(chip);
1382
1383 dsp_lock(azx_dev);
1384 spin_lock_irq(&chip->reg_lock);
1385 if (azx_dev->running || azx_dev->locked) {
1386 spin_unlock_irq(&chip->reg_lock);
1387 err = -EBUSY;
1388 goto unlock;
1389 }
1390 azx_dev->prepared = 0;
1391 chip->saved_azx_dev = *azx_dev;
1392 azx_dev->locked = 1;
1393 spin_unlock_irq(&chip->reg_lock);
1394
1395 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1396 byte_size, bufp);
1397 if (err < 0)
1398 goto err_alloc;
1399
1400 azx_dev->bufsize = byte_size;
1401 azx_dev->period_bytes = byte_size;
1402 azx_dev->format_val = format;
1403
1404 azx_stream_reset(chip, azx_dev);
1405
1406 /* reset BDL address */
1407 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1408 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1409
1410 azx_dev->frags = 0;
1411 bdl = (u32 *)azx_dev->bdl.area;
1412 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1413 if (err < 0)
1414 goto error;
1415
1416 azx_setup_controller(chip, azx_dev);
1417 dsp_unlock(azx_dev);
1418 return azx_dev->stream_tag;
1419
1420 error:
1421 chip->ops->dma_free_pages(chip, bufp);
1422 err_alloc:
1423 spin_lock_irq(&chip->reg_lock);
1424 if (azx_dev->opened)
1425 *azx_dev = chip->saved_azx_dev;
1426 azx_dev->locked = 0;
1427 spin_unlock_irq(&chip->reg_lock);
1428 unlock:
1429 dsp_unlock(azx_dev);
1430 return err;
1431 }
1432
1433 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1434 {
1435 struct azx *chip = bus->private_data;
1436 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1437
1438 if (start)
1439 azx_stream_start(chip, azx_dev);
1440 else
1441 azx_stream_stop(chip, azx_dev);
1442 azx_dev->running = start;
1443 }
1444
1445 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1446 struct snd_dma_buffer *dmab)
1447 {
1448 struct azx *chip = bus->private_data;
1449 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1450
1451 if (!dmab->area || !azx_dev->locked)
1452 return;
1453
1454 dsp_lock(azx_dev);
1455 /* reset BDL address */
1456 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1457 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1458 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1459 azx_dev->bufsize = 0;
1460 azx_dev->period_bytes = 0;
1461 azx_dev->format_val = 0;
1462
1463 chip->ops->dma_free_pages(chip, dmab);
1464 dmab->area = NULL;
1465
1466 spin_lock_irq(&chip->reg_lock);
1467 if (azx_dev->opened)
1468 *azx_dev = chip->saved_azx_dev;
1469 azx_dev->locked = 0;
1470 spin_unlock_irq(&chip->reg_lock);
1471 dsp_unlock(azx_dev);
1472 }
1473 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1474
1475 int azx_alloc_stream_pages(struct azx *chip)
1476 {
1477 int i, err;
1478
1479 for (i = 0; i < chip->num_streams; i++) {
1480 dsp_lock_init(&chip->azx_dev[i]);
1481 /* allocate memory for the BDL for each stream */
1482 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1483 BDL_SIZE,
1484 &chip->azx_dev[i].bdl);
1485 if (err < 0)
1486 return -ENOMEM;
1487 }
1488 /* allocate memory for the position buffer */
1489 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1490 chip->num_streams * 8, &chip->posbuf);
1491 if (err < 0)
1492 return -ENOMEM;
1493
1494 /* allocate CORB/RIRB */
1495 err = azx_alloc_cmd_io(chip);
1496 if (err < 0)
1497 return err;
1498 return 0;
1499 }
1500 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1501
1502 void azx_free_stream_pages(struct azx *chip)
1503 {
1504 int i;
1505 if (chip->azx_dev) {
1506 for (i = 0; i < chip->num_streams; i++)
1507 if (chip->azx_dev[i].bdl.area)
1508 chip->ops->dma_free_pages(
1509 chip, &chip->azx_dev[i].bdl);
1510 }
1511 if (chip->rb.area)
1512 chip->ops->dma_free_pages(chip, &chip->rb);
1513 if (chip->posbuf.area)
1514 chip->ops->dma_free_pages(chip, &chip->posbuf);
1515 }
1516 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1517
1518 /*
1519 * Lowlevel interface
1520 */
1521
1522 /* enter link reset */
1523 void azx_enter_link_reset(struct azx *chip)
1524 {
1525 unsigned long timeout;
1526
1527 /* reset controller */
1528 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1529
1530 timeout = jiffies + msecs_to_jiffies(100);
1531 while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1532 time_before(jiffies, timeout))
1533 usleep_range(500, 1000);
1534 }
1535 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1536
1537 /* exit link reset */
1538 static void azx_exit_link_reset(struct azx *chip)
1539 {
1540 unsigned long timeout;
1541
1542 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1543
1544 timeout = jiffies + msecs_to_jiffies(100);
1545 while (!azx_readb(chip, GCTL) &&
1546 time_before(jiffies, timeout))
1547 usleep_range(500, 1000);
1548 }
1549
1550 /* reset codec link */
1551 static int azx_reset(struct azx *chip, bool full_reset)
1552 {
1553 if (!full_reset)
1554 goto __skip;
1555
1556 /* clear STATESTS */
1557 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1558
1559 /* reset controller */
1560 azx_enter_link_reset(chip);
1561
1562 /* delay for >= 100us for codec PLL to settle per spec
1563 * Rev 0.9 section 5.5.1
1564 */
1565 usleep_range(500, 1000);
1566
1567 /* Bring controller out of reset */
1568 azx_exit_link_reset(chip);
1569
1570 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1571 usleep_range(1000, 1200);
1572
1573 __skip:
1574 /* check to see if controller is ready */
1575 if (!azx_readb(chip, GCTL)) {
1576 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1577 return -EBUSY;
1578 }
1579
1580 /* Accept unsolicited responses */
1581 if (!chip->single_cmd)
1582 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1583 AZX_GCTL_UNSOL);
1584
1585 /* detect codecs */
1586 if (!chip->codec_mask) {
1587 chip->codec_mask = azx_readw(chip, STATESTS);
1588 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1589 chip->codec_mask);
1590 }
1591
1592 return 0;
1593 }
1594
1595 /* enable interrupts */
1596 static void azx_int_enable(struct azx *chip)
1597 {
1598 /* enable controller CIE and GIE */
1599 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1600 AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1601 }
1602
1603 /* disable interrupts */
1604 static void azx_int_disable(struct azx *chip)
1605 {
1606 int i;
1607
1608 /* disable interrupts in stream descriptor */
1609 for (i = 0; i < chip->num_streams; i++) {
1610 struct azx_dev *azx_dev = &chip->azx_dev[i];
1611 azx_sd_writeb(chip, azx_dev, SD_CTL,
1612 azx_sd_readb(chip, azx_dev, SD_CTL) &
1613 ~SD_INT_MASK);
1614 }
1615
1616 /* disable SIE for all streams */
1617 azx_writeb(chip, INTCTL, 0);
1618
1619 /* disable controller CIE and GIE */
1620 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1621 ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1622 }
1623
1624 /* clear interrupts */
1625 static void azx_int_clear(struct azx *chip)
1626 {
1627 int i;
1628
1629 /* clear stream status */
1630 for (i = 0; i < chip->num_streams; i++) {
1631 struct azx_dev *azx_dev = &chip->azx_dev[i];
1632 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1633 }
1634
1635 /* clear STATESTS */
1636 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1637
1638 /* clear rirb status */
1639 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1640
1641 /* clear int status */
1642 azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1643 }
1644
1645 /*
1646 * reset and start the controller registers
1647 */
1648 void azx_init_chip(struct azx *chip, bool full_reset)
1649 {
1650 if (chip->initialized)
1651 return;
1652
1653 /* reset controller */
1654 azx_reset(chip, full_reset);
1655
1656 /* initialize interrupts */
1657 azx_int_clear(chip);
1658 azx_int_enable(chip);
1659
1660 /* initialize the codec command I/O */
1661 if (!chip->single_cmd)
1662 azx_init_cmd_io(chip);
1663
1664 /* program the position buffer */
1665 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1666 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1667
1668 chip->initialized = 1;
1669 }
1670 EXPORT_SYMBOL_GPL(azx_init_chip);
1671
1672 void azx_stop_chip(struct azx *chip)
1673 {
1674 if (!chip->initialized)
1675 return;
1676
1677 /* disable interrupts */
1678 azx_int_disable(chip);
1679 azx_int_clear(chip);
1680
1681 /* disable CORB/RIRB */
1682 azx_free_cmd_io(chip);
1683
1684 /* disable position buffer */
1685 azx_writel(chip, DPLBASE, 0);
1686 azx_writel(chip, DPUBASE, 0);
1687
1688 chip->initialized = 0;
1689 }
1690 EXPORT_SYMBOL_GPL(azx_stop_chip);
1691
1692 /*
1693 * interrupt handler
1694 */
1695 irqreturn_t azx_interrupt(int irq, void *dev_id)
1696 {
1697 struct azx *chip = dev_id;
1698 struct azx_dev *azx_dev;
1699 u32 status;
1700 u8 sd_status;
1701 int i;
1702
1703 #ifdef CONFIG_PM
1704 if (azx_has_pm_runtime(chip))
1705 if (!pm_runtime_active(chip->card->dev))
1706 return IRQ_NONE;
1707 #endif
1708
1709 spin_lock(&chip->reg_lock);
1710
1711 if (chip->disabled) {
1712 spin_unlock(&chip->reg_lock);
1713 return IRQ_NONE;
1714 }
1715
1716 status = azx_readl(chip, INTSTS);
1717 if (status == 0 || status == 0xffffffff) {
1718 spin_unlock(&chip->reg_lock);
1719 return IRQ_NONE;
1720 }
1721
1722 for (i = 0; i < chip->num_streams; i++) {
1723 azx_dev = &chip->azx_dev[i];
1724 if (status & azx_dev->sd_int_sta_mask) {
1725 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1726 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1727 if (!azx_dev->substream || !azx_dev->running ||
1728 !(sd_status & SD_INT_COMPLETE))
1729 continue;
1730 /* check whether this IRQ is really acceptable */
1731 if (!chip->ops->position_check ||
1732 chip->ops->position_check(chip, azx_dev)) {
1733 spin_unlock(&chip->reg_lock);
1734 snd_pcm_period_elapsed(azx_dev->substream);
1735 spin_lock(&chip->reg_lock);
1736 }
1737 }
1738 }
1739
1740 /* clear rirb int */
1741 status = azx_readb(chip, RIRBSTS);
1742 if (status & RIRB_INT_MASK) {
1743 if (status & RIRB_INT_RESPONSE) {
1744 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1745 udelay(80);
1746 azx_update_rirb(chip);
1747 }
1748 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1749 }
1750
1751 spin_unlock(&chip->reg_lock);
1752
1753 return IRQ_HANDLED;
1754 }
1755 EXPORT_SYMBOL_GPL(azx_interrupt);
1756
1757 /*
1758 * Codec initerface
1759 */
1760
1761 /*
1762 * Probe the given codec address
1763 */
1764 static int probe_codec(struct azx *chip, int addr)
1765 {
1766 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1767 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1768 unsigned int res;
1769
1770 mutex_lock(&chip->bus->cmd_mutex);
1771 chip->probing = 1;
1772 azx_send_cmd(chip->bus, cmd);
1773 res = azx_get_response(chip->bus, addr);
1774 chip->probing = 0;
1775 mutex_unlock(&chip->bus->cmd_mutex);
1776 if (res == -1)
1777 return -EIO;
1778 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1779 return 0;
1780 }
1781
1782 static void azx_bus_reset(struct hda_bus *bus)
1783 {
1784 struct azx *chip = bus->private_data;
1785
1786 bus->in_reset = 1;
1787 azx_stop_chip(chip);
1788 azx_init_chip(chip, true);
1789 if (chip->initialized)
1790 snd_hda_bus_reset(chip->bus);
1791 bus->in_reset = 0;
1792 }
1793
1794 static int get_jackpoll_interval(struct azx *chip)
1795 {
1796 int i;
1797 unsigned int j;
1798
1799 if (!chip->jackpoll_ms)
1800 return 0;
1801
1802 i = chip->jackpoll_ms[chip->dev_index];
1803 if (i == 0)
1804 return 0;
1805 if (i < 50 || i > 60000)
1806 j = 0;
1807 else
1808 j = msecs_to_jiffies(i);
1809 if (j == 0)
1810 dev_warn(chip->card->dev,
1811 "jackpoll_ms value out of range: %d\n", i);
1812 return j;
1813 }
1814
1815 static struct hda_bus_ops bus_ops = {
1816 .command = azx_send_cmd,
1817 .get_response = azx_get_response,
1818 .attach_pcm = azx_attach_pcm_stream,
1819 .bus_reset = azx_bus_reset,
1820 #ifdef CONFIG_SND_HDA_DSP_LOADER
1821 .load_dsp_prepare = azx_load_dsp_prepare,
1822 .load_dsp_trigger = azx_load_dsp_trigger,
1823 .load_dsp_cleanup = azx_load_dsp_cleanup,
1824 #endif
1825 };
1826
1827 /* HD-audio bus initialization */
1828 int azx_bus_create(struct azx *chip, const char *model)
1829 {
1830 struct hda_bus *bus;
1831 int err;
1832
1833 err = snd_hda_bus_new(chip->card, &bus);
1834 if (err < 0)
1835 return err;
1836
1837 chip->bus = bus;
1838 bus->private_data = chip;
1839 bus->pci = chip->pci;
1840 bus->modelname = model;
1841 bus->ops = bus_ops;
1842
1843 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1844 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1845 bus->needs_damn_long_delay = 1;
1846 }
1847
1848 /* AMD chipsets often cause the communication stalls upon certain
1849 * sequence like the pin-detection. It seems that forcing the synced
1850 * access works around the stall. Grrr...
1851 */
1852 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1853 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1854 bus->sync_write = 1;
1855 bus->allow_bus_reset = 1;
1856 }
1857
1858 return 0;
1859 }
1860 EXPORT_SYMBOL_GPL(azx_bus_create);
1861
1862 /* Probe codecs */
1863 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1864 {
1865 struct hda_bus *bus = chip->bus;
1866 int c, codecs, err;
1867
1868 codecs = 0;
1869 if (!max_slots)
1870 max_slots = AZX_DEFAULT_CODECS;
1871
1872 /* First try to probe all given codec slots */
1873 for (c = 0; c < max_slots; c++) {
1874 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1875 if (probe_codec(chip, c) < 0) {
1876 /* Some BIOSen give you wrong codec addresses
1877 * that don't exist
1878 */
1879 dev_warn(chip->card->dev,
1880 "Codec #%d probe error; disabling it...\n", c);
1881 chip->codec_mask &= ~(1 << c);
1882 /* More badly, accessing to a non-existing
1883 * codec often screws up the controller chip,
1884 * and disturbs the further communications.
1885 * Thus if an error occurs during probing,
1886 * better to reset the controller chip to
1887 * get back to the sanity state.
1888 */
1889 azx_stop_chip(chip);
1890 azx_init_chip(chip, true);
1891 }
1892 }
1893 }
1894
1895 /* Then create codec instances */
1896 for (c = 0; c < max_slots; c++) {
1897 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1898 struct hda_codec *codec;
1899 err = snd_hda_codec_new(bus, bus->card, c, &codec);
1900 if (err < 0)
1901 continue;
1902 codec->jackpoll_interval = get_jackpoll_interval(chip);
1903 codec->beep_mode = chip->beep_mode;
1904 codecs++;
1905 }
1906 }
1907 if (!codecs) {
1908 dev_err(chip->card->dev, "no codecs initialized\n");
1909 return -ENXIO;
1910 }
1911 return 0;
1912 }
1913 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1914
1915 /* configure each codec instance */
1916 int azx_codec_configure(struct azx *chip)
1917 {
1918 struct hda_codec *codec;
1919 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1920 snd_hda_codec_configure(codec);
1921 }
1922 return 0;
1923 }
1924 EXPORT_SYMBOL_GPL(azx_codec_configure);
1925
1926
1927 static bool is_input_stream(struct azx *chip, unsigned char index)
1928 {
1929 return (index >= chip->capture_index_offset &&
1930 index < chip->capture_index_offset + chip->capture_streams);
1931 }
1932
1933 /* initialize SD streams */
1934 int azx_init_stream(struct azx *chip)
1935 {
1936 int i;
1937 int in_stream_tag = 0;
1938 int out_stream_tag = 0;
1939
1940 /* initialize each stream (aka device)
1941 * assign the starting bdl address to each stream (device)
1942 * and initialize
1943 */
1944 for (i = 0; i < chip->num_streams; i++) {
1945 struct azx_dev *azx_dev = &chip->azx_dev[i];
1946 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1947 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1948 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1949 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1950 azx_dev->sd_int_sta_mask = 1 << i;
1951 azx_dev->index = i;
1952
1953 /* stream tag must be unique throughout
1954 * the stream direction group,
1955 * valid values 1...15
1956 * use separate stream tag if the flag
1957 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1958 */
1959 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1960 azx_dev->stream_tag =
1961 is_input_stream(chip, i) ?
1962 ++in_stream_tag :
1963 ++out_stream_tag;
1964 else
1965 azx_dev->stream_tag = i + 1;
1966 }
1967
1968 return 0;
1969 }
1970 EXPORT_SYMBOL_GPL(azx_init_stream);
1971
1972 /*
1973 * reboot notifier for hang-up problem at power-down
1974 */
1975 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1976 {
1977 struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1978 snd_hda_bus_reboot_notify(chip->bus);
1979 azx_stop_chip(chip);
1980 return NOTIFY_OK;
1981 }
1982
1983 void azx_notifier_register(struct azx *chip)
1984 {
1985 chip->reboot_notifier.notifier_call = azx_halt;
1986 register_reboot_notifier(&chip->reboot_notifier);
1987 }
1988 EXPORT_SYMBOL_GPL(azx_notifier_register);
1989
1990 void azx_notifier_unregister(struct azx *chip)
1991 {
1992 if (chip->reboot_notifier.notifier_call)
1993 unregister_reboot_notifier(&chip->reboot_notifier);
1994 }
1995 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1996
1997 MODULE_LICENSE("GPL");
1998 MODULE_DESCRIPTION("Common HDA driver functions");
This page took 0.070143 seconds and 4 git commands to generate.