3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
32 /* for art-tsc conversion */
36 #include <sound/core.h>
37 #include <sound/initval.h>
38 #include "hda_controller.h"
40 #define CREATE_TRACE_POINTS
41 #include "hda_controller_trace.h"
43 /* DSP lock helpers */
44 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev))
45 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev))
46 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev))
48 /* assign a stream for the PCM */
49 static inline struct azx_dev *
50 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
52 struct hdac_stream *s;
54 s = snd_hdac_stream_assign(azx_bus(chip), substream);
57 return stream_to_azx_dev(s);
60 /* release the assigned stream */
61 static inline void azx_release_device(struct azx_dev *azx_dev)
63 snd_hdac_stream_release(azx_stream(azx_dev));
66 static inline struct hda_pcm_stream *
67 to_hda_pcm_stream(struct snd_pcm_substream *substream)
69 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
70 return &apcm->info->stream[substream->stream];
73 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
76 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
77 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
78 u64 codec_frames, codec_nsecs;
80 if (!hinfo->ops.get_delay)
83 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
84 codec_nsecs = div_u64(codec_frames * 1000000000LL,
85 substream->runtime->rate);
87 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
88 return nsec + codec_nsecs;
90 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
97 static int azx_pcm_close(struct snd_pcm_substream *substream)
99 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
100 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
101 struct azx *chip = apcm->chip;
102 struct azx_dev *azx_dev = get_azx_dev(substream);
104 trace_azx_pcm_close(chip, azx_dev);
105 mutex_lock(&chip->open_mutex);
106 azx_release_device(azx_dev);
107 if (hinfo->ops.close)
108 hinfo->ops.close(hinfo, apcm->codec, substream);
109 snd_hda_power_down(apcm->codec);
110 mutex_unlock(&chip->open_mutex);
111 snd_hda_codec_pcm_put(apcm->info);
115 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
116 struct snd_pcm_hw_params *hw_params)
118 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
119 struct azx *chip = apcm->chip;
120 struct azx_dev *azx_dev = get_azx_dev(substream);
123 trace_azx_pcm_hw_params(chip, azx_dev);
125 if (dsp_is_locked(azx_dev)) {
130 azx_dev->core.bufsize = 0;
131 azx_dev->core.period_bytes = 0;
132 azx_dev->core.format_val = 0;
133 ret = snd_pcm_lib_malloc_pages(substream,
134 params_buffer_bytes(hw_params));
141 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
143 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
144 struct azx_dev *azx_dev = get_azx_dev(substream);
145 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
148 /* reset BDL address */
150 if (!dsp_is_locked(azx_dev))
151 snd_hdac_stream_cleanup(azx_stream(azx_dev));
153 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
155 err = snd_pcm_lib_free_pages(substream);
156 azx_stream(azx_dev)->prepared = 0;
161 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
163 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
164 struct azx *chip = apcm->chip;
165 struct azx_dev *azx_dev = get_azx_dev(substream);
166 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
167 struct snd_pcm_runtime *runtime = substream->runtime;
168 unsigned int format_val, stream_tag;
170 struct hda_spdif_out *spdif =
171 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
172 unsigned short ctls = spdif ? spdif->ctls : 0;
174 trace_azx_pcm_prepare(chip, azx_dev);
176 if (dsp_is_locked(azx_dev)) {
181 snd_hdac_stream_reset(azx_stream(azx_dev));
182 format_val = snd_hdac_calc_stream_format(runtime->rate,
188 dev_err(chip->card->dev,
189 "invalid format_val, rate=%d, ch=%d, format=%d\n",
190 runtime->rate, runtime->channels, runtime->format);
195 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
199 snd_hdac_stream_setup(azx_stream(azx_dev));
201 stream_tag = azx_dev->core.stream_tag;
202 /* CA-IBG chips need the playback stream starting from 1 */
203 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
204 stream_tag > chip->capture_streams)
205 stream_tag -= chip->capture_streams;
206 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
207 azx_dev->core.format_val, substream);
211 azx_stream(azx_dev)->prepared = 1;
216 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
218 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
219 struct azx *chip = apcm->chip;
220 struct hdac_bus *bus = azx_bus(chip);
221 struct azx_dev *azx_dev;
222 struct snd_pcm_substream *s;
223 struct hdac_stream *hstr;
228 azx_dev = get_azx_dev(substream);
229 trace_azx_pcm_trigger(chip, azx_dev, cmd);
231 hstr = azx_stream(azx_dev);
232 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
233 sync_reg = AZX_REG_OLD_SSYNC;
235 sync_reg = AZX_REG_SSYNC;
237 if (dsp_is_locked(azx_dev) || !hstr->prepared)
241 case SNDRV_PCM_TRIGGER_START:
242 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
243 case SNDRV_PCM_TRIGGER_RESUME:
246 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
247 case SNDRV_PCM_TRIGGER_SUSPEND:
248 case SNDRV_PCM_TRIGGER_STOP:
255 snd_pcm_group_for_each_entry(s, substream) {
256 if (s->pcm->card != substream->pcm->card)
258 azx_dev = get_azx_dev(s);
259 sbits |= 1 << azx_dev->core.index;
260 snd_pcm_trigger_done(s, substream);
263 spin_lock(&bus->reg_lock);
265 /* first, set SYNC bits of corresponding streams */
266 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
268 snd_pcm_group_for_each_entry(s, substream) {
269 if (s->pcm->card != substream->pcm->card)
271 azx_dev = get_azx_dev(s);
273 azx_dev->insufficient = 1;
274 snd_hdac_stream_start(azx_stream(azx_dev), true);
276 snd_hdac_stream_stop(azx_stream(azx_dev));
279 spin_unlock(&bus->reg_lock);
281 snd_hdac_stream_sync(hstr, start, sbits);
283 spin_lock(&bus->reg_lock);
284 /* reset SYNC bits */
285 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
287 snd_hdac_stream_timecounter_init(hstr, sbits);
288 spin_unlock(&bus->reg_lock);
292 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
294 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
296 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
298 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
300 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
302 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
304 unsigned int azx_get_position(struct azx *chip,
305 struct azx_dev *azx_dev)
307 struct snd_pcm_substream *substream = azx_dev->core.substream;
309 int stream = substream->stream;
312 if (chip->get_position[stream])
313 pos = chip->get_position[stream](chip, azx_dev);
314 else /* use the position buffer as default */
315 pos = azx_get_pos_posbuf(chip, azx_dev);
317 if (pos >= azx_dev->core.bufsize)
320 if (substream->runtime) {
321 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
322 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
324 if (chip->get_delay[stream])
325 delay += chip->get_delay[stream](chip, azx_dev, pos);
326 if (hinfo->ops.get_delay)
327 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
329 substream->runtime->delay = delay;
332 trace_azx_get_position(chip, azx_dev, pos, delay);
335 EXPORT_SYMBOL_GPL(azx_get_position);
337 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
339 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
340 struct azx *chip = apcm->chip;
341 struct azx_dev *azx_dev = get_azx_dev(substream);
342 return bytes_to_frames(substream->runtime,
343 azx_get_position(chip, azx_dev));
347 * azx_scale64: Scale base by mult/div while not overflowing sanely
349 * Derived from scale64_check_overflow in kernel/time/timekeeping.c
351 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
352 * is about 384307 ie ~4.5 days.
354 * This scales the calculation so that overflow will happen but after 2^64 /
355 * 48000 secs, which is pretty large!
358 * base may overflow, but since there isn’t any additional division
359 * performed on base it’s OK
360 * rem can’t overflow because both are 32-bit values
364 static u64 azx_scale64(u64 base, u32 num, u32 den)
368 rem = do_div(base, den);
378 static int azx_get_sync_time(ktime_t *device,
379 struct system_counterval_t *system, void *ctx)
381 struct snd_pcm_substream *substream = ctx;
382 struct azx_dev *azx_dev = get_azx_dev(substream);
383 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
384 struct azx *chip = apcm->chip;
385 struct snd_pcm_runtime *runtime;
386 u64 ll_counter, ll_counter_l, ll_counter_h;
387 u64 tsc_counter, tsc_counter_l, tsc_counter_h;
388 u32 wallclk_ctr, wallclk_cycles;
394 runtime = substream->runtime;
396 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
401 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
404 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
405 (azx_dev->core.stream_tag - 1);
406 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
408 /* Enable the capture */
409 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
412 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
420 dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
424 /* Read wall clock counter */
425 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
427 /* Read TSC counter */
428 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
429 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
431 /* Read Link counter */
432 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
433 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
435 /* Ack: registers read done */
436 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
438 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
441 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l;
442 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
445 * An error occurs near frame "rollover". The clocks in
446 * frame value indicates whether this error may have
447 * occurred. Here we use the value of 10 i.e.,
448 * HDA_MAX_CYCLE_OFFSET
450 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
451 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
455 * Sleep before we read again, else we may again get
456 * value near to MAX_CYCLE. Try to sleep for different
457 * amount of time so we dont hit the same number again
459 udelay(retry_count++);
461 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
463 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
464 dev_err_ratelimited(chip->card->dev,
465 "Error in WALFCC cycle count\n");
469 *device = ns_to_ktime(azx_scale64(ll_counter,
470 NSEC_PER_SEC, runtime->rate));
471 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
472 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
474 *system = convert_art_to_tsc(tsc_counter);
480 static int azx_get_sync_time(ktime_t *device,
481 struct system_counterval_t *system, void *ctx)
487 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
488 struct system_device_crosststamp *xtstamp)
490 return get_device_system_crosststamp(azx_get_sync_time,
491 substream, NULL, xtstamp);
494 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
495 struct snd_pcm_audio_tstamp_config *ts)
497 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
498 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
504 static int azx_get_time_info(struct snd_pcm_substream *substream,
505 struct timespec *system_ts, struct timespec *audio_ts,
506 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
507 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
509 struct azx_dev *azx_dev = get_azx_dev(substream);
510 struct snd_pcm_runtime *runtime = substream->runtime;
511 struct system_device_crosststamp xtstamp;
515 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
516 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
518 snd_pcm_gettime(substream->runtime, system_ts);
520 nsec = timecounter_read(&azx_dev->core.tc);
521 nsec = div_u64(nsec, 3); /* can be optimized */
522 if (audio_tstamp_config->report_delay)
523 nsec = azx_adjust_codec_delay(substream, nsec);
525 *audio_ts = ns_to_timespec(nsec);
527 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
528 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
529 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
531 } else if (is_link_time_supported(runtime, audio_tstamp_config)) {
533 ret = azx_get_crosststamp(substream, &xtstamp);
537 switch (runtime->tstamp_type) {
538 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
541 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
542 *system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
546 *system_ts = ktime_to_timespec(xtstamp.sys_realtime);
551 *audio_ts = ktime_to_timespec(xtstamp.device);
553 audio_tstamp_report->actual_type =
554 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
555 audio_tstamp_report->accuracy_report = 1;
556 /* 24 MHz WallClock == 42ns resolution */
557 audio_tstamp_report->accuracy = 42;
560 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
566 static struct snd_pcm_hardware azx_pcm_hw = {
567 .info = (SNDRV_PCM_INFO_MMAP |
568 SNDRV_PCM_INFO_INTERLEAVED |
569 SNDRV_PCM_INFO_BLOCK_TRANSFER |
570 SNDRV_PCM_INFO_MMAP_VALID |
571 /* No full-resume yet implemented */
572 /* SNDRV_PCM_INFO_RESUME |*/
573 SNDRV_PCM_INFO_PAUSE |
574 SNDRV_PCM_INFO_SYNC_START |
575 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
576 SNDRV_PCM_INFO_HAS_LINK_ATIME |
577 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
578 .formats = SNDRV_PCM_FMTBIT_S16_LE,
579 .rates = SNDRV_PCM_RATE_48000,
584 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
585 .period_bytes_min = 128,
586 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
588 .periods_max = AZX_MAX_FRAG,
592 static int azx_pcm_open(struct snd_pcm_substream *substream)
594 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
595 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
596 struct azx *chip = apcm->chip;
597 struct azx_dev *azx_dev;
598 struct snd_pcm_runtime *runtime = substream->runtime;
602 snd_hda_codec_pcm_get(apcm->info);
603 mutex_lock(&chip->open_mutex);
604 azx_dev = azx_assign_device(chip, substream);
605 trace_azx_pcm_open(chip, azx_dev);
606 if (azx_dev == NULL) {
610 runtime->private_data = azx_dev;
612 if (chip->gts_present)
613 azx_pcm_hw.info = azx_pcm_hw.info |
614 SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
616 runtime->hw = azx_pcm_hw;
617 runtime->hw.channels_min = hinfo->channels_min;
618 runtime->hw.channels_max = hinfo->channels_max;
619 runtime->hw.formats = hinfo->formats;
620 runtime->hw.rates = hinfo->rates;
621 snd_pcm_limit_hw_rates(runtime);
622 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
624 /* avoid wrap-around with wall-clock */
625 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
629 if (chip->align_buffer_size)
630 /* constrain buffer sizes to be multiple of 128
631 bytes. This is more efficient in terms of memory
632 access but isn't required by the HDA spec and
633 prevents users from specifying exact period/buffer
634 sizes. For example for 44.1kHz, a period size set
635 to 20ms will be rounded to 19.59ms. */
638 /* Don't enforce steps on buffer sizes, still need to
639 be multiple of 4 bytes (HDA spec). Tested on Intel
640 HDA controllers, may not work on all devices where
641 option needs to be disabled */
644 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
646 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
648 snd_hda_power_up(apcm->codec);
650 err = hinfo->ops.open(hinfo, apcm->codec, substream);
654 azx_release_device(azx_dev);
657 snd_pcm_limit_hw_rates(runtime);
659 if (snd_BUG_ON(!runtime->hw.channels_min) ||
660 snd_BUG_ON(!runtime->hw.channels_max) ||
661 snd_BUG_ON(!runtime->hw.formats) ||
662 snd_BUG_ON(!runtime->hw.rates)) {
663 azx_release_device(azx_dev);
664 if (hinfo->ops.close)
665 hinfo->ops.close(hinfo, apcm->codec, substream);
670 /* disable LINK_ATIME timestamps for capture streams
671 until we figure out how to handle digital inputs */
672 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
673 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
674 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
677 snd_pcm_set_sync(substream);
678 mutex_unlock(&chip->open_mutex);
682 snd_hda_power_down(apcm->codec);
684 mutex_unlock(&chip->open_mutex);
685 snd_hda_codec_pcm_put(apcm->info);
689 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
690 struct vm_area_struct *area)
692 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
693 struct azx *chip = apcm->chip;
694 if (chip->ops->pcm_mmap_prepare)
695 chip->ops->pcm_mmap_prepare(substream, area);
696 return snd_pcm_lib_default_mmap(substream, area);
699 static const struct snd_pcm_ops azx_pcm_ops = {
700 .open = azx_pcm_open,
701 .close = azx_pcm_close,
702 .ioctl = snd_pcm_lib_ioctl,
703 .hw_params = azx_pcm_hw_params,
704 .hw_free = azx_pcm_hw_free,
705 .prepare = azx_pcm_prepare,
706 .trigger = azx_pcm_trigger,
707 .pointer = azx_pcm_pointer,
708 .get_time_info = azx_get_time_info,
709 .mmap = azx_pcm_mmap,
710 .page = snd_pcm_sgbuf_ops_page,
713 static void azx_pcm_free(struct snd_pcm *pcm)
715 struct azx_pcm *apcm = pcm->private_data;
717 list_del(&apcm->list);
718 apcm->info->pcm = NULL;
723 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
725 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
726 struct hda_pcm *cpcm)
728 struct hdac_bus *bus = &_bus->core;
729 struct azx *chip = bus_to_azx(bus);
731 struct azx_pcm *apcm;
732 int pcm_dev = cpcm->device;
735 int type = SNDRV_DMA_TYPE_DEV_SG;
737 list_for_each_entry(apcm, &chip->pcm_list, list) {
738 if (apcm->pcm->device == pcm_dev) {
739 dev_err(chip->card->dev, "PCM %d already exists\n",
744 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
745 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
746 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
750 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
751 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
753 snd_device_free(chip->card, pcm);
760 pcm->private_data = apcm;
761 pcm->private_free = azx_pcm_free;
762 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
763 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
764 list_add_tail(&apcm->list, &chip->pcm_list);
766 for (s = 0; s < 2; s++) {
767 if (cpcm->stream[s].substreams)
768 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
770 /* buffer pre-allocation */
771 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
772 if (size > MAX_PREALLOC_SIZE)
773 size = MAX_PREALLOC_SIZE;
775 type = SNDRV_DMA_TYPE_DEV_UC_SG;
776 snd_pcm_lib_preallocate_pages_for_all(pcm, type,
778 size, MAX_PREALLOC_SIZE);
782 static unsigned int azx_command_addr(u32 cmd)
784 unsigned int addr = cmd >> 28;
786 if (addr >= AZX_MAX_CODECS) {
794 /* receive a response */
795 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
798 struct azx *chip = bus_to_azx(bus);
799 struct hda_bus *hbus = &chip->bus;
800 unsigned long timeout;
801 unsigned long loopcounter;
805 timeout = jiffies + msecs_to_jiffies(1000);
807 for (loopcounter = 0;; loopcounter++) {
808 spin_lock_irq(&bus->reg_lock);
809 if (chip->polling_mode || do_poll)
810 snd_hdac_bus_update_rirb(bus);
811 if (!bus->rirb.cmds[addr]) {
813 chip->poll_count = 0;
815 *res = bus->rirb.res[addr]; /* the last value */
816 spin_unlock_irq(&bus->reg_lock);
819 spin_unlock_irq(&bus->reg_lock);
820 if (time_after(jiffies, timeout))
822 if (hbus->needs_damn_long_delay || loopcounter > 3000)
823 msleep(2); /* temporary workaround */
830 if (hbus->no_response_fallback)
833 if (!chip->polling_mode && chip->poll_count < 2) {
834 dev_dbg(chip->card->dev,
835 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
836 bus->last_cmd[addr]);
843 if (!chip->polling_mode) {
844 dev_warn(chip->card->dev,
845 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
846 bus->last_cmd[addr]);
847 chip->polling_mode = 1;
852 dev_warn(chip->card->dev,
853 "No response from codec, disabling MSI: last cmd=0x%08x\n",
854 bus->last_cmd[addr]);
855 if (chip->ops->disable_msi_reset_irq &&
856 chip->ops->disable_msi_reset_irq(chip) < 0)
862 /* If this critical timeout happens during the codec probing
863 * phase, this is likely an access to a non-existing codec
864 * slot. Better to return an error and reset the system.
869 /* no fallback mechanism? */
870 if (!chip->fallback_to_single_cmd)
873 /* a fatal communication error; need either to reset or to fallback
874 * to the single_cmd mode
876 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
877 hbus->response_reset = 1;
878 return -EAGAIN; /* give a chance to retry */
881 dev_err(chip->card->dev,
882 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
883 bus->last_cmd[addr]);
884 chip->single_cmd = 1;
885 hbus->response_reset = 0;
886 snd_hdac_bus_stop_cmd_io(bus);
891 * Use the single immediate command instead of CORB/RIRB for simplicity
893 * Note: according to Intel, this is not preferred use. The command was
894 * intended for the BIOS only, and may get confused with unsolicited
895 * responses. So, we shouldn't use it for normal operation from the
897 * I left the codes, however, for debugging/testing purposes.
900 /* receive a response */
901 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
906 /* check IRV busy bit */
907 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
908 /* reuse rirb.res as the response return value */
909 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
914 if (printk_ratelimit())
915 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
916 azx_readw(chip, IRS));
917 azx_bus(chip)->rirb.res[addr] = -1;
922 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
924 struct azx *chip = bus_to_azx(bus);
925 unsigned int addr = azx_command_addr(val);
928 bus->last_cmd[azx_command_addr(val)] = val;
930 /* check ICB busy bit */
931 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
932 /* Clear IRV valid bit */
933 azx_writew(chip, IRS, azx_readw(chip, IRS) |
935 azx_writel(chip, IC, val);
936 azx_writew(chip, IRS, azx_readw(chip, IRS) |
938 return azx_single_wait_for_response(chip, addr);
942 if (printk_ratelimit())
943 dev_dbg(chip->card->dev,
944 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
945 azx_readw(chip, IRS), val);
949 /* receive a response */
950 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
954 *res = bus->rirb.res[addr];
959 * The below are the main callbacks from hda_codec.
961 * They are just the skeleton to call sub-callbacks according to the
962 * current setting of chip->single_cmd.
966 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
968 struct azx *chip = bus_to_azx(bus);
972 if (chip->single_cmd)
973 return azx_single_send_cmd(bus, val);
975 return snd_hdac_bus_send_cmd(bus, val);
979 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
982 struct azx *chip = bus_to_azx(bus);
986 if (chip->single_cmd)
987 return azx_single_get_response(bus, addr, res);
989 return azx_rirb_get_response(bus, addr, res);
992 static const struct hdac_bus_ops bus_core_ops = {
993 .command = azx_send_cmd,
994 .get_response = azx_get_response,
997 #ifdef CONFIG_SND_HDA_DSP_LOADER
999 * DSP loading code (e.g. for CA0132)
1002 /* use the first stream for loading DSP */
1003 static struct azx_dev *
1004 azx_get_dsp_loader_dev(struct azx *chip)
1006 struct hdac_bus *bus = azx_bus(chip);
1007 struct hdac_stream *s;
1009 list_for_each_entry(s, &bus->stream_list, list)
1010 if (s->index == chip->playback_index_offset)
1011 return stream_to_azx_dev(s);
1016 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
1017 unsigned int byte_size,
1018 struct snd_dma_buffer *bufp)
1020 struct hdac_bus *bus = &codec->bus->core;
1021 struct azx *chip = bus_to_azx(bus);
1022 struct azx_dev *azx_dev;
1023 struct hdac_stream *hstr;
1027 azx_dev = azx_get_dsp_loader_dev(chip);
1028 hstr = azx_stream(azx_dev);
1029 spin_lock_irq(&bus->reg_lock);
1031 chip->saved_azx_dev = *azx_dev;
1034 spin_unlock_irq(&bus->reg_lock);
1036 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
1038 spin_lock_irq(&bus->reg_lock);
1040 *azx_dev = chip->saved_azx_dev;
1041 spin_unlock_irq(&bus->reg_lock);
1048 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1050 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1052 struct hdac_bus *bus = &codec->bus->core;
1053 struct azx *chip = bus_to_azx(bus);
1054 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1056 snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1058 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1060 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1061 struct snd_dma_buffer *dmab)
1063 struct hdac_bus *bus = &codec->bus->core;
1064 struct azx *chip = bus_to_azx(bus);
1065 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1066 struct hdac_stream *hstr = azx_stream(azx_dev);
1068 if (!dmab->area || !hstr->locked)
1071 snd_hdac_dsp_cleanup(hstr, dmab);
1072 spin_lock_irq(&bus->reg_lock);
1074 *azx_dev = chip->saved_azx_dev;
1075 hstr->locked = false;
1076 spin_unlock_irq(&bus->reg_lock);
1078 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1079 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1082 * reset and start the controller registers
1084 void azx_init_chip(struct azx *chip, bool full_reset)
1086 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1087 /* correct RINTCNT for CXT */
1088 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1089 azx_writew(chip, RINTCNT, 0xc0);
1092 EXPORT_SYMBOL_GPL(azx_init_chip);
1094 void azx_stop_all_streams(struct azx *chip)
1096 struct hdac_bus *bus = azx_bus(chip);
1097 struct hdac_stream *s;
1099 list_for_each_entry(s, &bus->stream_list, list)
1100 snd_hdac_stream_stop(s);
1102 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1104 void azx_stop_chip(struct azx *chip)
1106 snd_hdac_bus_stop_chip(azx_bus(chip));
1108 EXPORT_SYMBOL_GPL(azx_stop_chip);
1113 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1115 struct azx *chip = bus_to_azx(bus);
1116 struct azx_dev *azx_dev = stream_to_azx_dev(s);
1118 /* check whether this IRQ is really acceptable */
1119 if (!chip->ops->position_check ||
1120 chip->ops->position_check(chip, azx_dev)) {
1121 spin_unlock(&bus->reg_lock);
1122 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1123 spin_lock(&bus->reg_lock);
1127 irqreturn_t azx_interrupt(int irq, void *dev_id)
1129 struct azx *chip = dev_id;
1130 struct hdac_bus *bus = azx_bus(chip);
1132 bool active, handled = false;
1133 int repeat = 0; /* count for avoiding endless loop */
1136 if (azx_has_pm_runtime(chip))
1137 if (!pm_runtime_active(chip->card->dev))
1141 spin_lock(&bus->reg_lock);
1147 status = azx_readl(chip, INTSTS);
1148 if (status == 0 || status == 0xffffffff)
1153 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1156 /* clear rirb int */
1157 status = azx_readb(chip, RIRBSTS);
1158 if (status & RIRB_INT_MASK) {
1160 if (status & RIRB_INT_RESPONSE) {
1161 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1163 snd_hdac_bus_update_rirb(bus);
1165 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1167 } while (active && ++repeat < 10);
1170 spin_unlock(&bus->reg_lock);
1172 return IRQ_RETVAL(handled);
1174 EXPORT_SYMBOL_GPL(azx_interrupt);
1181 * Probe the given codec address
1183 static int probe_codec(struct azx *chip, int addr)
1185 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1186 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1187 struct hdac_bus *bus = azx_bus(chip);
1189 unsigned int res = -1;
1191 mutex_lock(&bus->cmd_mutex);
1193 azx_send_cmd(bus, cmd);
1194 err = azx_get_response(bus, addr, &res);
1196 mutex_unlock(&bus->cmd_mutex);
1197 if (err < 0 || res == -1)
1199 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1203 void snd_hda_bus_reset(struct hda_bus *bus)
1205 struct azx *chip = bus_to_azx(&bus->core);
1208 azx_stop_chip(chip);
1209 azx_init_chip(chip, true);
1210 if (bus->core.chip_init)
1211 snd_hda_bus_reset_codecs(bus);
1215 /* HD-audio bus initialization */
1216 int azx_bus_init(struct azx *chip, const char *model,
1217 const struct hdac_io_ops *io_ops)
1219 struct hda_bus *bus = &chip->bus;
1222 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops,
1227 bus->card = chip->card;
1228 mutex_init(&bus->prepare_mutex);
1229 bus->pci = chip->pci;
1230 bus->modelname = model;
1231 bus->mixer_assigned = -1;
1232 bus->core.snoop = azx_snoop(chip);
1233 if (chip->get_position[0] != azx_get_pos_lpib ||
1234 chip->get_position[1] != azx_get_pos_lpib)
1235 bus->core.use_posbuf = true;
1236 bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1237 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1238 bus->core.corbrp_self_clear = true;
1240 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1241 bus->core.align_bdle_4k = true;
1243 /* AMD chipsets often cause the communication stalls upon certain
1244 * sequence like the pin-detection. It seems that forcing the synced
1245 * access works around the stall. Grrr...
1247 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1248 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1249 bus->core.sync_write = 1;
1250 bus->allow_bus_reset = 1;
1255 EXPORT_SYMBOL_GPL(azx_bus_init);
1258 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1260 struct hdac_bus *bus = azx_bus(chip);
1265 max_slots = AZX_DEFAULT_CODECS;
1267 /* First try to probe all given codec slots */
1268 for (c = 0; c < max_slots; c++) {
1269 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1270 if (probe_codec(chip, c) < 0) {
1271 /* Some BIOSen give you wrong codec addresses
1274 dev_warn(chip->card->dev,
1275 "Codec #%d probe error; disabling it...\n", c);
1276 bus->codec_mask &= ~(1 << c);
1277 /* More badly, accessing to a non-existing
1278 * codec often screws up the controller chip,
1279 * and disturbs the further communications.
1280 * Thus if an error occurs during probing,
1281 * better to reset the controller chip to
1282 * get back to the sanity state.
1284 azx_stop_chip(chip);
1285 azx_init_chip(chip, true);
1290 /* Then create codec instances */
1291 for (c = 0; c < max_slots; c++) {
1292 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1293 struct hda_codec *codec;
1294 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1297 codec->jackpoll_interval = chip->jackpoll_interval;
1298 codec->beep_mode = chip->beep_mode;
1303 dev_err(chip->card->dev, "no codecs initialized\n");
1308 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1310 /* configure each codec instance */
1311 int azx_codec_configure(struct azx *chip)
1313 struct hda_codec *codec, *next;
1315 /* use _safe version here since snd_hda_codec_configure() deregisters
1316 * the device upon error and deletes itself from the bus list.
1318 list_for_each_codec_safe(codec, next, &chip->bus) {
1319 snd_hda_codec_configure(codec);
1322 if (!azx_bus(chip)->num_codecs)
1326 EXPORT_SYMBOL_GPL(azx_codec_configure);
1328 static int stream_direction(struct azx *chip, unsigned char index)
1330 if (index >= chip->capture_index_offset &&
1331 index < chip->capture_index_offset + chip->capture_streams)
1332 return SNDRV_PCM_STREAM_CAPTURE;
1333 return SNDRV_PCM_STREAM_PLAYBACK;
1336 /* initialize SD streams */
1337 int azx_init_streams(struct azx *chip)
1340 int stream_tags[2] = { 0, 0 };
1342 /* initialize each stream (aka device)
1343 * assign the starting bdl address to each stream (device)
1346 for (i = 0; i < chip->num_streams; i++) {
1347 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1353 dir = stream_direction(chip, i);
1354 /* stream tag must be unique throughout
1355 * the stream direction group,
1356 * valid values 1...15
1357 * use separate stream tag if the flag
1358 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1360 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1361 tag = ++stream_tags[dir];
1364 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1370 EXPORT_SYMBOL_GPL(azx_init_streams);
1372 void azx_free_streams(struct azx *chip)
1374 struct hdac_bus *bus = azx_bus(chip);
1375 struct hdac_stream *s;
1377 while (!list_empty(&bus->stream_list)) {
1378 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1380 kfree(stream_to_azx_dev(s));
1383 EXPORT_SYMBOL_GPL(azx_free_streams);