1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Digital Audio (PCM) abstract layer
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10 #include <linux/time.h>
11 #include <linux/math64.h>
12 #include <linux/export.h>
13 #include <sound/core.h>
14 #include <sound/control.h>
15 #include <sound/tlv.h>
16 #include <sound/info.h>
17 #include <sound/pcm.h>
18 #include <sound/pcm_params.h>
19 #include <sound/timer.h>
21 #include "pcm_local.h"
23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
24 #define CREATE_TRACE_POINTS
25 #include "pcm_trace.h"
27 #define trace_hwptr(substream, pos, in_interrupt)
28 #define trace_xrun(substream)
29 #define trace_hw_ptr_error(substream, reason)
30 #define trace_applptr(substream, prev, curr)
33 static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
37 * fill ring buffer with silence
38 * runtime->silence_start: starting pointer to silence area
39 * runtime->silence_filled: size filled with silence
40 * runtime->silence_threshold: threshold from application
41 * runtime->silence_size: maximal size from application
43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
47 struct snd_pcm_runtime *runtime = substream->runtime;
48 snd_pcm_uframes_t frames, ofs, transfer;
51 if (runtime->silence_size < runtime->boundary) {
52 snd_pcm_sframes_t noise_dist, n;
53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
54 if (runtime->silence_start != appl_ptr) {
55 n = appl_ptr - runtime->silence_start;
57 n += runtime->boundary;
58 if ((snd_pcm_uframes_t)n < runtime->silence_filled)
59 runtime->silence_filled -= n;
61 runtime->silence_filled = 0;
62 runtime->silence_start = appl_ptr;
64 if (runtime->silence_filled >= runtime->buffer_size)
66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
69 frames = runtime->silence_threshold - noise_dist;
70 if (frames > runtime->silence_size)
71 frames = runtime->silence_size;
73 if (new_hw_ptr == ULONG_MAX) { /* initialization */
74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
75 if (avail > runtime->buffer_size)
76 avail = runtime->buffer_size;
77 runtime->silence_filled = avail > 0 ? avail : 0;
78 runtime->silence_start = (runtime->status->hw_ptr +
79 runtime->silence_filled) %
82 ofs = runtime->status->hw_ptr;
83 frames = new_hw_ptr - ofs;
84 if ((snd_pcm_sframes_t)frames < 0)
85 frames += runtime->boundary;
86 runtime->silence_filled -= frames;
87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
88 runtime->silence_filled = 0;
89 runtime->silence_start = new_hw_ptr;
91 runtime->silence_start = ofs;
94 frames = runtime->buffer_size - runtime->silence_filled;
96 if (snd_BUG_ON(frames > runtime->buffer_size))
100 ofs = runtime->silence_start % runtime->buffer_size;
102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
103 err = fill_silence_frames(substream, ofs, transfer);
105 runtime->silence_filled += transfer;
109 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
112 #ifdef CONFIG_SND_DEBUG
113 void snd_pcm_debug_name(struct snd_pcm_substream *substream,
114 char *name, size_t len)
116 snprintf(name, len, "pcmC%dD%d%c:%d",
117 substream->pcm->card->number,
118 substream->pcm->device,
119 substream->stream ? 'c' : 'p',
122 EXPORT_SYMBOL(snd_pcm_debug_name);
125 #define XRUN_DEBUG_BASIC (1<<0)
126 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
127 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
129 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
131 #define xrun_debug(substream, mask) \
132 ((substream)->pstr->xrun_debug & (mask))
134 #define xrun_debug(substream, mask) 0
137 #define dump_stack_on_xrun(substream) do { \
138 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
142 /* call with stream lock held */
143 void __snd_pcm_xrun(struct snd_pcm_substream *substream)
145 struct snd_pcm_runtime *runtime = substream->runtime;
147 trace_xrun(substream);
148 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
149 struct timespec64 tstamp;
151 snd_pcm_gettime(runtime, &tstamp);
152 runtime->status->tstamp.tv_sec = tstamp.tv_sec;
153 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
155 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
156 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
158 snd_pcm_debug_name(substream, name, sizeof(name));
159 pcm_warn(substream->pcm, "XRUN: %s\n", name);
160 dump_stack_on_xrun(substream);
164 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
165 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
167 trace_hw_ptr_error(substream, reason); \
168 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
169 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
170 (in_interrupt) ? 'Q' : 'P', ##args); \
171 dump_stack_on_xrun(substream); \
175 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
177 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
181 int snd_pcm_update_state(struct snd_pcm_substream *substream,
182 struct snd_pcm_runtime *runtime)
184 snd_pcm_uframes_t avail;
186 avail = snd_pcm_avail(substream);
187 if (avail > runtime->avail_max)
188 runtime->avail_max = avail;
189 if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
190 if (avail >= runtime->buffer_size) {
191 snd_pcm_drain_done(substream);
195 if (avail >= runtime->stop_threshold) {
196 __snd_pcm_xrun(substream);
200 if (runtime->twake) {
201 if (avail >= runtime->twake)
202 wake_up(&runtime->tsleep);
203 } else if (avail >= runtime->control->avail_min)
204 wake_up(&runtime->sleep);
208 static void update_audio_tstamp(struct snd_pcm_substream *substream,
209 struct timespec64 *curr_tstamp,
210 struct timespec64 *audio_tstamp)
212 struct snd_pcm_runtime *runtime = substream->runtime;
213 u64 audio_frames, audio_nsecs;
214 struct timespec64 driver_tstamp;
216 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
219 if (!(substream->ops->get_time_info) ||
220 (runtime->audio_tstamp_report.actual_type ==
221 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
224 * provide audio timestamp derived from pointer position
225 * add delay only if requested
228 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
230 if (runtime->audio_tstamp_config.report_delay) {
231 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
232 audio_frames -= runtime->delay;
234 audio_frames += runtime->delay;
236 audio_nsecs = div_u64(audio_frames * 1000000000LL,
238 *audio_tstamp = ns_to_timespec64(audio_nsecs);
241 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
242 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
243 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
244 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
245 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
246 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
251 * re-take a driver timestamp to let apps detect if the reference tstamp
252 * read by low-level hardware was provided with a delay
254 snd_pcm_gettime(substream->runtime, &driver_tstamp);
255 runtime->driver_tstamp = driver_tstamp;
258 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
259 unsigned int in_interrupt)
261 struct snd_pcm_runtime *runtime = substream->runtime;
262 snd_pcm_uframes_t pos;
263 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
264 snd_pcm_sframes_t hdelta, delta;
265 unsigned long jdelta;
266 unsigned long curr_jiffies;
267 struct timespec64 curr_tstamp;
268 struct timespec64 audio_tstamp;
269 int crossed_boundary = 0;
271 old_hw_ptr = runtime->status->hw_ptr;
274 * group pointer, time and jiffies reads to allow for more
275 * accurate correlations/corrections.
276 * The values are stored at the end of this routine after
277 * corrections for hw_ptr position
279 pos = substream->ops->pointer(substream);
280 curr_jiffies = jiffies;
281 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
282 if ((substream->ops->get_time_info) &&
283 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
284 substream->ops->get_time_info(substream, &curr_tstamp,
286 &runtime->audio_tstamp_config,
287 &runtime->audio_tstamp_report);
289 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
290 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
291 snd_pcm_gettime(runtime, &curr_tstamp);
293 snd_pcm_gettime(runtime, &curr_tstamp);
296 if (pos == SNDRV_PCM_POS_XRUN) {
297 __snd_pcm_xrun(substream);
300 if (pos >= runtime->buffer_size) {
301 if (printk_ratelimit()) {
303 snd_pcm_debug_name(substream, name, sizeof(name));
304 pcm_err(substream->pcm,
305 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
306 name, pos, runtime->buffer_size,
307 runtime->period_size);
311 pos -= pos % runtime->min_align;
312 trace_hwptr(substream, pos, in_interrupt);
313 hw_base = runtime->hw_ptr_base;
314 new_hw_ptr = hw_base + pos;
316 /* we know that one period was processed */
317 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
318 delta = runtime->hw_ptr_interrupt + runtime->period_size;
319 if (delta > new_hw_ptr) {
320 /* check for double acknowledged interrupts */
321 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
322 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
323 hw_base += runtime->buffer_size;
324 if (hw_base >= runtime->boundary) {
328 new_hw_ptr = hw_base + pos;
333 /* new_hw_ptr might be lower than old_hw_ptr in case when */
334 /* pointer crosses the end of the ring buffer */
335 if (new_hw_ptr < old_hw_ptr) {
336 hw_base += runtime->buffer_size;
337 if (hw_base >= runtime->boundary) {
341 new_hw_ptr = hw_base + pos;
344 delta = new_hw_ptr - old_hw_ptr;
346 delta += runtime->boundary;
348 if (runtime->no_period_wakeup) {
349 snd_pcm_sframes_t xrun_threshold;
351 * Without regular period interrupts, we have to check
352 * the elapsed time to detect xruns.
354 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
355 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
357 hdelta = jdelta - delta * HZ / runtime->rate;
358 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
359 while (hdelta > xrun_threshold) {
360 delta += runtime->buffer_size;
361 hw_base += runtime->buffer_size;
362 if (hw_base >= runtime->boundary) {
366 new_hw_ptr = hw_base + pos;
367 hdelta -= runtime->hw_ptr_buffer_jiffies;
372 /* something must be really wrong */
373 if (delta >= runtime->buffer_size + runtime->period_size) {
374 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
375 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
376 substream->stream, (long)pos,
377 (long)new_hw_ptr, (long)old_hw_ptr);
381 /* Do jiffies check only in xrun_debug mode */
382 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
383 goto no_jiffies_check;
385 /* Skip the jiffies check for hardwares with BATCH flag.
386 * Such hardware usually just increases the position at each IRQ,
387 * thus it can't give any strange position.
389 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
390 goto no_jiffies_check;
392 if (hdelta < runtime->delay)
393 goto no_jiffies_check;
394 hdelta -= runtime->delay;
395 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
396 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
398 (((runtime->period_size * HZ) / runtime->rate)
400 /* move new_hw_ptr according jiffies not pos variable */
401 new_hw_ptr = old_hw_ptr;
403 /* use loop to avoid checks for delta overflows */
404 /* the delta value is small or zero in most cases */
406 new_hw_ptr += runtime->period_size;
407 if (new_hw_ptr >= runtime->boundary) {
408 new_hw_ptr -= runtime->boundary;
413 /* align hw_base to buffer_size */
414 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
415 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
416 (long)pos, (long)hdelta,
417 (long)runtime->period_size, jdelta,
418 ((hdelta * HZ) / runtime->rate), hw_base,
419 (unsigned long)old_hw_ptr,
420 (unsigned long)new_hw_ptr);
421 /* reset values to proper state */
423 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
426 if (delta > runtime->period_size + runtime->period_size / 2) {
427 hw_ptr_error(substream, in_interrupt,
429 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
430 substream->stream, (long)delta,
436 if (runtime->status->hw_ptr == new_hw_ptr) {
437 runtime->hw_ptr_jiffies = curr_jiffies;
438 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
442 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
443 runtime->silence_size > 0)
444 snd_pcm_playback_silence(substream, new_hw_ptr);
447 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
449 delta += runtime->boundary;
450 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
451 runtime->hw_ptr_interrupt += delta;
452 if (runtime->hw_ptr_interrupt >= runtime->boundary)
453 runtime->hw_ptr_interrupt -= runtime->boundary;
455 runtime->hw_ptr_base = hw_base;
456 runtime->status->hw_ptr = new_hw_ptr;
457 runtime->hw_ptr_jiffies = curr_jiffies;
458 if (crossed_boundary) {
459 snd_BUG_ON(crossed_boundary != 1);
460 runtime->hw_ptr_wrap += runtime->boundary;
463 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
465 return snd_pcm_update_state(substream, runtime);
468 /* CAUTION: call it with irq disabled */
469 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
471 return snd_pcm_update_hw_ptr0(substream, 0);
475 * snd_pcm_set_ops - set the PCM operators
476 * @pcm: the pcm instance
477 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
478 * @ops: the operator table
480 * Sets the given PCM operators to the pcm instance.
482 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
483 const struct snd_pcm_ops *ops)
485 struct snd_pcm_str *stream = &pcm->streams[direction];
486 struct snd_pcm_substream *substream;
488 for (substream = stream->substream; substream != NULL; substream = substream->next)
489 substream->ops = ops;
491 EXPORT_SYMBOL(snd_pcm_set_ops);
494 * snd_pcm_set_sync - set the PCM sync id
495 * @substream: the pcm substream
497 * Sets the PCM sync identifier for the card.
499 void snd_pcm_set_sync(struct snd_pcm_substream *substream)
501 struct snd_pcm_runtime *runtime = substream->runtime;
503 runtime->sync.id32[0] = substream->pcm->card->number;
504 runtime->sync.id32[1] = -1;
505 runtime->sync.id32[2] = -1;
506 runtime->sync.id32[3] = -1;
508 EXPORT_SYMBOL(snd_pcm_set_sync);
511 * Standard ioctl routine
514 static inline unsigned int div32(unsigned int a, unsigned int b,
525 static inline unsigned int div_down(unsigned int a, unsigned int b)
532 static inline unsigned int div_up(unsigned int a, unsigned int b)
544 static inline unsigned int mul(unsigned int a, unsigned int b)
548 if (div_down(UINT_MAX, a) < b)
553 static inline unsigned int muldiv32(unsigned int a, unsigned int b,
554 unsigned int c, unsigned int *r)
556 u_int64_t n = (u_int64_t) a * b;
561 n = div_u64_rem(n, c, r);
570 * snd_interval_refine - refine the interval value of configurator
571 * @i: the interval value to refine
572 * @v: the interval value to refer to
574 * Refines the interval value with the reference value.
575 * The interval is changed to the range satisfying both intervals.
576 * The interval status (min, max, integer, etc.) are evaluated.
578 * Return: Positive if the value is changed, zero if it's not changed, or a
579 * negative error code.
581 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
584 if (snd_BUG_ON(snd_interval_empty(i)))
586 if (i->min < v->min) {
588 i->openmin = v->openmin;
590 } else if (i->min == v->min && !i->openmin && v->openmin) {
594 if (i->max > v->max) {
596 i->openmax = v->openmax;
598 } else if (i->max == v->max && !i->openmax && v->openmax) {
602 if (!i->integer && v->integer) {
615 } else if (!i->openmin && !i->openmax && i->min == i->max)
617 if (snd_interval_checkempty(i)) {
618 snd_interval_none(i);
623 EXPORT_SYMBOL(snd_interval_refine);
625 static int snd_interval_refine_first(struct snd_interval *i)
627 const unsigned int last_max = i->max;
629 if (snd_BUG_ON(snd_interval_empty(i)))
631 if (snd_interval_single(i))
636 /* only exclude max value if also excluded before refine */
637 i->openmax = (i->openmax && i->max >= last_max);
641 static int snd_interval_refine_last(struct snd_interval *i)
643 const unsigned int last_min = i->min;
645 if (snd_BUG_ON(snd_interval_empty(i)))
647 if (snd_interval_single(i))
652 /* only exclude min value if also excluded before refine */
653 i->openmin = (i->openmin && i->min <= last_min);
657 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
659 if (a->empty || b->empty) {
660 snd_interval_none(c);
664 c->min = mul(a->min, b->min);
665 c->openmin = (a->openmin || b->openmin);
666 c->max = mul(a->max, b->max);
667 c->openmax = (a->openmax || b->openmax);
668 c->integer = (a->integer && b->integer);
672 * snd_interval_div - refine the interval value with division
679 * Returns non-zero if the value is changed, zero if not changed.
681 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
684 if (a->empty || b->empty) {
685 snd_interval_none(c);
689 c->min = div32(a->min, b->max, &r);
690 c->openmin = (r || a->openmin || b->openmax);
692 c->max = div32(a->max, b->min, &r);
697 c->openmax = (a->openmax || b->openmin);
706 * snd_interval_muldivk - refine the interval value
709 * @k: divisor (as integer)
714 * Returns non-zero if the value is changed, zero if not changed.
716 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
717 unsigned int k, struct snd_interval *c)
720 if (a->empty || b->empty) {
721 snd_interval_none(c);
725 c->min = muldiv32(a->min, b->min, k, &r);
726 c->openmin = (r || a->openmin || b->openmin);
727 c->max = muldiv32(a->max, b->max, k, &r);
732 c->openmax = (a->openmax || b->openmax);
737 * snd_interval_mulkdiv - refine the interval value
739 * @k: dividend 2 (as integer)
745 * Returns non-zero if the value is changed, zero if not changed.
747 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
748 const struct snd_interval *b, struct snd_interval *c)
751 if (a->empty || b->empty) {
752 snd_interval_none(c);
756 c->min = muldiv32(a->min, k, b->max, &r);
757 c->openmin = (r || a->openmin || b->openmax);
759 c->max = muldiv32(a->max, k, b->min, &r);
764 c->openmax = (a->openmax || b->openmin);
776 * snd_interval_ratnum - refine the interval value
777 * @i: interval to refine
778 * @rats_count: number of ratnum_t
779 * @rats: ratnum_t array
780 * @nump: pointer to store the resultant numerator
781 * @denp: pointer to store the resultant denominator
783 * Return: Positive if the value is changed, zero if it's not changed, or a
784 * negative error code.
786 int snd_interval_ratnum(struct snd_interval *i,
787 unsigned int rats_count, const struct snd_ratnum *rats,
788 unsigned int *nump, unsigned int *denp)
790 unsigned int best_num, best_den;
793 struct snd_interval t;
795 unsigned int result_num, result_den;
798 best_num = best_den = best_diff = 0;
799 for (k = 0; k < rats_count; ++k) {
800 unsigned int num = rats[k].num;
802 unsigned int q = i->min;
806 den = div_up(num, q);
807 if (den < rats[k].den_min)
809 if (den > rats[k].den_max)
810 den = rats[k].den_max;
813 r = (den - rats[k].den_min) % rats[k].den_step;
817 diff = num - q * den;
821 diff * best_den < best_diff * den) {
831 t.min = div_down(best_num, best_den);
832 t.openmin = !!(best_num % best_den);
834 result_num = best_num;
835 result_diff = best_diff;
836 result_den = best_den;
837 best_num = best_den = best_diff = 0;
838 for (k = 0; k < rats_count; ++k) {
839 unsigned int num = rats[k].num;
841 unsigned int q = i->max;
847 den = div_down(num, q);
848 if (den > rats[k].den_max)
850 if (den < rats[k].den_min)
851 den = rats[k].den_min;
854 r = (den - rats[k].den_min) % rats[k].den_step;
856 den += rats[k].den_step - r;
858 diff = q * den - num;
862 diff * best_den < best_diff * den) {
872 t.max = div_up(best_num, best_den);
873 t.openmax = !!(best_num % best_den);
875 err = snd_interval_refine(i, &t);
879 if (snd_interval_single(i)) {
880 if (best_diff * result_den < result_diff * best_den) {
881 result_num = best_num;
882 result_den = best_den;
891 EXPORT_SYMBOL(snd_interval_ratnum);
894 * snd_interval_ratden - refine the interval value
895 * @i: interval to refine
896 * @rats_count: number of struct ratden
897 * @rats: struct ratden array
898 * @nump: pointer to store the resultant numerator
899 * @denp: pointer to store the resultant denominator
901 * Return: Positive if the value is changed, zero if it's not changed, or a
902 * negative error code.
904 static int snd_interval_ratden(struct snd_interval *i,
905 unsigned int rats_count,
906 const struct snd_ratden *rats,
907 unsigned int *nump, unsigned int *denp)
909 unsigned int best_num, best_diff, best_den;
911 struct snd_interval t;
914 best_num = best_den = best_diff = 0;
915 for (k = 0; k < rats_count; ++k) {
917 unsigned int den = rats[k].den;
918 unsigned int q = i->min;
921 if (num > rats[k].num_max)
923 if (num < rats[k].num_min)
924 num = rats[k].num_max;
927 r = (num - rats[k].num_min) % rats[k].num_step;
929 num += rats[k].num_step - r;
931 diff = num - q * den;
933 diff * best_den < best_diff * den) {
943 t.min = div_down(best_num, best_den);
944 t.openmin = !!(best_num % best_den);
946 best_num = best_den = best_diff = 0;
947 for (k = 0; k < rats_count; ++k) {
949 unsigned int den = rats[k].den;
950 unsigned int q = i->max;
953 if (num < rats[k].num_min)
955 if (num > rats[k].num_max)
956 num = rats[k].num_max;
959 r = (num - rats[k].num_min) % rats[k].num_step;
963 diff = q * den - num;
965 diff * best_den < best_diff * den) {
975 t.max = div_up(best_num, best_den);
976 t.openmax = !!(best_num % best_den);
978 err = snd_interval_refine(i, &t);
982 if (snd_interval_single(i)) {
992 * snd_interval_list - refine the interval value from the list
993 * @i: the interval value to refine
994 * @count: the number of elements in the list
995 * @list: the value list
996 * @mask: the bit-mask to evaluate
998 * Refines the interval value from the list.
999 * When mask is non-zero, only the elements corresponding to bit 1 are
1002 * Return: Positive if the value is changed, zero if it's not changed, or a
1003 * negative error code.
1005 int snd_interval_list(struct snd_interval *i, unsigned int count,
1006 const unsigned int *list, unsigned int mask)
1009 struct snd_interval list_range;
1015 snd_interval_any(&list_range);
1016 list_range.min = UINT_MAX;
1018 for (k = 0; k < count; k++) {
1019 if (mask && !(mask & (1 << k)))
1021 if (!snd_interval_test(i, list[k]))
1023 list_range.min = min(list_range.min, list[k]);
1024 list_range.max = max(list_range.max, list[k]);
1026 return snd_interval_refine(i, &list_range);
1028 EXPORT_SYMBOL(snd_interval_list);
1031 * snd_interval_ranges - refine the interval value from the list of ranges
1032 * @i: the interval value to refine
1033 * @count: the number of elements in the list of ranges
1034 * @ranges: the ranges list
1035 * @mask: the bit-mask to evaluate
1037 * Refines the interval value from the list of ranges.
1038 * When mask is non-zero, only the elements corresponding to bit 1 are
1041 * Return: Positive if the value is changed, zero if it's not changed, or a
1042 * negative error code.
1044 int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1045 const struct snd_interval *ranges, unsigned int mask)
1048 struct snd_interval range_union;
1049 struct snd_interval range;
1052 snd_interval_none(i);
1055 snd_interval_any(&range_union);
1056 range_union.min = UINT_MAX;
1057 range_union.max = 0;
1058 for (k = 0; k < count; k++) {
1059 if (mask && !(mask & (1 << k)))
1061 snd_interval_copy(&range, &ranges[k]);
1062 if (snd_interval_refine(&range, i) < 0)
1064 if (snd_interval_empty(&range))
1067 if (range.min < range_union.min) {
1068 range_union.min = range.min;
1069 range_union.openmin = 1;
1071 if (range.min == range_union.min && !range.openmin)
1072 range_union.openmin = 0;
1073 if (range.max > range_union.max) {
1074 range_union.max = range.max;
1075 range_union.openmax = 1;
1077 if (range.max == range_union.max && !range.openmax)
1078 range_union.openmax = 0;
1080 return snd_interval_refine(i, &range_union);
1082 EXPORT_SYMBOL(snd_interval_ranges);
1084 static int snd_interval_step(struct snd_interval *i, unsigned int step)
1089 if (n != 0 || i->openmin) {
1095 if (n != 0 || i->openmax) {
1100 if (snd_interval_checkempty(i)) {
1107 /* Info constraints helpers */
1110 * snd_pcm_hw_rule_add - add the hw-constraint rule
1111 * @runtime: the pcm runtime instance
1112 * @cond: condition bits
1113 * @var: the variable to evaluate
1114 * @func: the evaluation function
1115 * @private: the private data pointer passed to function
1116 * @dep: the dependent variables
1118 * Return: Zero if successful, or a negative error code on failure.
1120 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1122 snd_pcm_hw_rule_func_t func, void *private,
1125 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1126 struct snd_pcm_hw_rule *c;
1129 va_start(args, dep);
1130 if (constrs->rules_num >= constrs->rules_all) {
1131 struct snd_pcm_hw_rule *new;
1132 unsigned int new_rules = constrs->rules_all + 16;
1133 new = krealloc_array(constrs->rules, new_rules,
1134 sizeof(*c), GFP_KERNEL);
1139 constrs->rules = new;
1140 constrs->rules_all = new_rules;
1142 c = &constrs->rules[constrs->rules_num];
1146 c->private = private;
1149 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1156 dep = va_arg(args, int);
1158 constrs->rules_num++;
1162 EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1165 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1166 * @runtime: PCM runtime instance
1167 * @var: hw_params variable to apply the mask
1168 * @mask: the bitmap mask
1170 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1172 * Return: Zero if successful, or a negative error code on failure.
1174 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1177 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1178 struct snd_mask *maskp = constrs_mask(constrs, var);
1179 *maskp->bits &= mask;
1180 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1181 if (*maskp->bits == 0)
1187 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1188 * @runtime: PCM runtime instance
1189 * @var: hw_params variable to apply the mask
1190 * @mask: the 64bit bitmap mask
1192 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1194 * Return: Zero if successful, or a negative error code on failure.
1196 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1199 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1200 struct snd_mask *maskp = constrs_mask(constrs, var);
1201 maskp->bits[0] &= (u_int32_t)mask;
1202 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1203 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1204 if (! maskp->bits[0] && ! maskp->bits[1])
1208 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1211 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1212 * @runtime: PCM runtime instance
1213 * @var: hw_params variable to apply the integer constraint
1215 * Apply the constraint of integer to an interval parameter.
1217 * Return: Positive if the value is changed, zero if it's not changed, or a
1218 * negative error code.
1220 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1222 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1223 return snd_interval_setinteger(constrs_interval(constrs, var));
1225 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1228 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1229 * @runtime: PCM runtime instance
1230 * @var: hw_params variable to apply the range
1231 * @min: the minimal value
1232 * @max: the maximal value
1234 * Apply the min/max range constraint to an interval parameter.
1236 * Return: Positive if the value is changed, zero if it's not changed, or a
1237 * negative error code.
1239 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1240 unsigned int min, unsigned int max)
1242 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1243 struct snd_interval t;
1246 t.openmin = t.openmax = 0;
1248 return snd_interval_refine(constrs_interval(constrs, var), &t);
1250 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1252 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1253 struct snd_pcm_hw_rule *rule)
1255 struct snd_pcm_hw_constraint_list *list = rule->private;
1256 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1261 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1262 * @runtime: PCM runtime instance
1263 * @cond: condition bits
1264 * @var: hw_params variable to apply the list constraint
1267 * Apply the list of constraints to an interval parameter.
1269 * Return: Zero if successful, or a negative error code on failure.
1271 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1273 snd_pcm_hw_param_t var,
1274 const struct snd_pcm_hw_constraint_list *l)
1276 return snd_pcm_hw_rule_add(runtime, cond, var,
1277 snd_pcm_hw_rule_list, (void *)l,
1280 EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1282 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1283 struct snd_pcm_hw_rule *rule)
1285 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1286 return snd_interval_ranges(hw_param_interval(params, rule->var),
1287 r->count, r->ranges, r->mask);
1292 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1293 * @runtime: PCM runtime instance
1294 * @cond: condition bits
1295 * @var: hw_params variable to apply the list of range constraints
1298 * Apply the list of range constraints to an interval parameter.
1300 * Return: Zero if successful, or a negative error code on failure.
1302 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1304 snd_pcm_hw_param_t var,
1305 const struct snd_pcm_hw_constraint_ranges *r)
1307 return snd_pcm_hw_rule_add(runtime, cond, var,
1308 snd_pcm_hw_rule_ranges, (void *)r,
1311 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1313 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1314 struct snd_pcm_hw_rule *rule)
1316 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1317 unsigned int num = 0, den = 0;
1319 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1320 r->nrats, r->rats, &num, &den);
1321 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1322 params->rate_num = num;
1323 params->rate_den = den;
1329 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1330 * @runtime: PCM runtime instance
1331 * @cond: condition bits
1332 * @var: hw_params variable to apply the ratnums constraint
1333 * @r: struct snd_ratnums constriants
1335 * Return: Zero if successful, or a negative error code on failure.
1337 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1339 snd_pcm_hw_param_t var,
1340 const struct snd_pcm_hw_constraint_ratnums *r)
1342 return snd_pcm_hw_rule_add(runtime, cond, var,
1343 snd_pcm_hw_rule_ratnums, (void *)r,
1346 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1348 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1349 struct snd_pcm_hw_rule *rule)
1351 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1352 unsigned int num = 0, den = 0;
1353 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1354 r->nrats, r->rats, &num, &den);
1355 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1356 params->rate_num = num;
1357 params->rate_den = den;
1363 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1364 * @runtime: PCM runtime instance
1365 * @cond: condition bits
1366 * @var: hw_params variable to apply the ratdens constraint
1367 * @r: struct snd_ratdens constriants
1369 * Return: Zero if successful, or a negative error code on failure.
1371 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1373 snd_pcm_hw_param_t var,
1374 const struct snd_pcm_hw_constraint_ratdens *r)
1376 return snd_pcm_hw_rule_add(runtime, cond, var,
1377 snd_pcm_hw_rule_ratdens, (void *)r,
1380 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1382 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1383 struct snd_pcm_hw_rule *rule)
1385 unsigned int l = (unsigned long) rule->private;
1386 int width = l & 0xffff;
1387 unsigned int msbits = l >> 16;
1388 const struct snd_interval *i =
1389 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1391 if (!snd_interval_single(i))
1394 if ((snd_interval_value(i) == width) ||
1395 (width == 0 && snd_interval_value(i) > msbits))
1396 params->msbits = min_not_zero(params->msbits, msbits);
1402 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1403 * @runtime: PCM runtime instance
1404 * @cond: condition bits
1405 * @width: sample bits width
1406 * @msbits: msbits width
1408 * This constraint will set the number of most significant bits (msbits) if a
1409 * sample format with the specified width has been select. If width is set to 0
1410 * the msbits will be set for any sample format with a width larger than the
1413 * Return: Zero if successful, or a negative error code on failure.
1415 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1418 unsigned int msbits)
1420 unsigned long l = (msbits << 16) | width;
1421 return snd_pcm_hw_rule_add(runtime, cond, -1,
1422 snd_pcm_hw_rule_msbits,
1424 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1426 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1428 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1429 struct snd_pcm_hw_rule *rule)
1431 unsigned long step = (unsigned long) rule->private;
1432 return snd_interval_step(hw_param_interval(params, rule->var), step);
1436 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1437 * @runtime: PCM runtime instance
1438 * @cond: condition bits
1439 * @var: hw_params variable to apply the step constraint
1442 * Return: Zero if successful, or a negative error code on failure.
1444 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1446 snd_pcm_hw_param_t var,
1449 return snd_pcm_hw_rule_add(runtime, cond, var,
1450 snd_pcm_hw_rule_step, (void *) step,
1453 EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1455 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1457 static const unsigned int pow2_sizes[] = {
1458 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1459 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1460 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1461 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1463 return snd_interval_list(hw_param_interval(params, rule->var),
1464 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1468 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1469 * @runtime: PCM runtime instance
1470 * @cond: condition bits
1471 * @var: hw_params variable to apply the power-of-2 constraint
1473 * Return: Zero if successful, or a negative error code on failure.
1475 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1477 snd_pcm_hw_param_t var)
1479 return snd_pcm_hw_rule_add(runtime, cond, var,
1480 snd_pcm_hw_rule_pow2, NULL,
1483 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1485 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1486 struct snd_pcm_hw_rule *rule)
1488 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1489 struct snd_interval *rate;
1491 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1492 return snd_interval_list(rate, 1, &base_rate, 0);
1496 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1497 * @runtime: PCM runtime instance
1498 * @base_rate: the rate at which the hardware does not resample
1500 * Return: Zero if successful, or a negative error code on failure.
1502 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1503 unsigned int base_rate)
1505 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1506 SNDRV_PCM_HW_PARAM_RATE,
1507 snd_pcm_hw_rule_noresample_func,
1508 (void *)(uintptr_t)base_rate,
1509 SNDRV_PCM_HW_PARAM_RATE, -1);
1511 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1513 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1514 snd_pcm_hw_param_t var)
1516 if (hw_is_mask(var)) {
1517 snd_mask_any(hw_param_mask(params, var));
1518 params->cmask |= 1 << var;
1519 params->rmask |= 1 << var;
1522 if (hw_is_interval(var)) {
1523 snd_interval_any(hw_param_interval(params, var));
1524 params->cmask |= 1 << var;
1525 params->rmask |= 1 << var;
1531 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1534 memset(params, 0, sizeof(*params));
1535 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1536 _snd_pcm_hw_param_any(params, k);
1537 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1538 _snd_pcm_hw_param_any(params, k);
1541 EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1544 * snd_pcm_hw_param_value - return @params field @var value
1545 * @params: the hw_params instance
1546 * @var: parameter to retrieve
1547 * @dir: pointer to the direction (-1,0,1) or %NULL
1549 * Return: The value for field @var if it's fixed in configuration space
1550 * defined by @params. -%EINVAL otherwise.
1552 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1553 snd_pcm_hw_param_t var, int *dir)
1555 if (hw_is_mask(var)) {
1556 const struct snd_mask *mask = hw_param_mask_c(params, var);
1557 if (!snd_mask_single(mask))
1561 return snd_mask_value(mask);
1563 if (hw_is_interval(var)) {
1564 const struct snd_interval *i = hw_param_interval_c(params, var);
1565 if (!snd_interval_single(i))
1569 return snd_interval_value(i);
1573 EXPORT_SYMBOL(snd_pcm_hw_param_value);
1575 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1576 snd_pcm_hw_param_t var)
1578 if (hw_is_mask(var)) {
1579 snd_mask_none(hw_param_mask(params, var));
1580 params->cmask |= 1 << var;
1581 params->rmask |= 1 << var;
1582 } else if (hw_is_interval(var)) {
1583 snd_interval_none(hw_param_interval(params, var));
1584 params->cmask |= 1 << var;
1585 params->rmask |= 1 << var;
1590 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1592 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1593 snd_pcm_hw_param_t var)
1596 if (hw_is_mask(var))
1597 changed = snd_mask_refine_first(hw_param_mask(params, var));
1598 else if (hw_is_interval(var))
1599 changed = snd_interval_refine_first(hw_param_interval(params, var));
1603 params->cmask |= 1 << var;
1604 params->rmask |= 1 << var;
1611 * snd_pcm_hw_param_first - refine config space and return minimum value
1612 * @pcm: PCM instance
1613 * @params: the hw_params instance
1614 * @var: parameter to retrieve
1615 * @dir: pointer to the direction (-1,0,1) or %NULL
1617 * Inside configuration space defined by @params remove from @var all
1618 * values > minimum. Reduce configuration space accordingly.
1620 * Return: The minimum, or a negative error code on failure.
1622 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1623 struct snd_pcm_hw_params *params,
1624 snd_pcm_hw_param_t var, int *dir)
1626 int changed = _snd_pcm_hw_param_first(params, var);
1629 if (params->rmask) {
1630 int err = snd_pcm_hw_refine(pcm, params);
1634 return snd_pcm_hw_param_value(params, var, dir);
1636 EXPORT_SYMBOL(snd_pcm_hw_param_first);
1638 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1639 snd_pcm_hw_param_t var)
1642 if (hw_is_mask(var))
1643 changed = snd_mask_refine_last(hw_param_mask(params, var));
1644 else if (hw_is_interval(var))
1645 changed = snd_interval_refine_last(hw_param_interval(params, var));
1649 params->cmask |= 1 << var;
1650 params->rmask |= 1 << var;
1657 * snd_pcm_hw_param_last - refine config space and return maximum value
1658 * @pcm: PCM instance
1659 * @params: the hw_params instance
1660 * @var: parameter to retrieve
1661 * @dir: pointer to the direction (-1,0,1) or %NULL
1663 * Inside configuration space defined by @params remove from @var all
1664 * values < maximum. Reduce configuration space accordingly.
1666 * Return: The maximum, or a negative error code on failure.
1668 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1669 struct snd_pcm_hw_params *params,
1670 snd_pcm_hw_param_t var, int *dir)
1672 int changed = _snd_pcm_hw_param_last(params, var);
1675 if (params->rmask) {
1676 int err = snd_pcm_hw_refine(pcm, params);
1680 return snd_pcm_hw_param_value(params, var, dir);
1682 EXPORT_SYMBOL(snd_pcm_hw_param_last);
1684 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1687 struct snd_pcm_runtime *runtime = substream->runtime;
1688 unsigned long flags;
1689 snd_pcm_stream_lock_irqsave(substream, flags);
1690 if (snd_pcm_running(substream) &&
1691 snd_pcm_update_hw_ptr(substream) >= 0)
1692 runtime->status->hw_ptr %= runtime->buffer_size;
1694 runtime->status->hw_ptr = 0;
1695 runtime->hw_ptr_wrap = 0;
1697 snd_pcm_stream_unlock_irqrestore(substream, flags);
1701 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1704 struct snd_pcm_channel_info *info = arg;
1705 struct snd_pcm_runtime *runtime = substream->runtime;
1707 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1711 width = snd_pcm_format_physical_width(runtime->format);
1715 switch (runtime->access) {
1716 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1717 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1718 info->first = info->channel * width;
1719 info->step = runtime->channels * width;
1721 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1722 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1724 size_t size = runtime->dma_bytes / runtime->channels;
1725 info->first = info->channel * size * 8;
1736 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1739 struct snd_pcm_hw_params *params = arg;
1740 snd_pcm_format_t format;
1744 params->fifo_size = substream->runtime->hw.fifo_size;
1745 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1746 format = params_format(params);
1747 channels = params_channels(params);
1748 frame_size = snd_pcm_format_size(format, channels);
1750 params->fifo_size /= frame_size;
1756 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1757 * @substream: the pcm substream instance
1758 * @cmd: ioctl command
1759 * @arg: ioctl argument
1761 * Processes the generic ioctl commands for PCM.
1762 * Can be passed as the ioctl callback for PCM ops.
1764 * Return: Zero if successful, or a negative error code on failure.
1766 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1767 unsigned int cmd, void *arg)
1770 case SNDRV_PCM_IOCTL1_RESET:
1771 return snd_pcm_lib_ioctl_reset(substream, arg);
1772 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1773 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1774 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1775 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1779 EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1782 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1783 * under acquired lock of PCM substream.
1784 * @substream: the instance of pcm substream.
1786 * This function is called when the batch of audio data frames as the same size as the period of
1787 * buffer is already processed in audio data transmission.
1789 * The call of function updates the status of runtime with the latest position of audio data
1790 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1791 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1792 * substream according to configured threshold.
1794 * The function is intended to use for the case that PCM driver operates audio data frames under
1795 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1796 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1797 * since lock of PCM substream should be acquired in advance.
1799 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1802 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1803 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1804 * - .get_time_info - to retrieve audio time stamp if needed.
1806 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1808 void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1810 struct snd_pcm_runtime *runtime;
1812 if (PCM_RUNTIME_CHECK(substream))
1814 runtime = substream->runtime;
1816 if (!snd_pcm_running(substream) ||
1817 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1820 #ifdef CONFIG_SND_PCM_TIMER
1821 if (substream->timer_running)
1822 snd_timer_interrupt(substream->timer, 1);
1825 snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1827 EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1830 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1832 * @substream: the instance of PCM substream.
1834 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1835 * acquiring lock of PCM substream voluntarily.
1837 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1838 * the batch of audio data frames as the same size as the period of buffer is already processed in
1839 * audio data transmission.
1841 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1843 unsigned long flags;
1845 if (snd_BUG_ON(!substream))
1848 snd_pcm_stream_lock_irqsave(substream, flags);
1849 snd_pcm_period_elapsed_under_stream_lock(substream);
1850 snd_pcm_stream_unlock_irqrestore(substream, flags);
1852 EXPORT_SYMBOL(snd_pcm_period_elapsed);
1855 * Wait until avail_min data becomes available
1856 * Returns a negative error code if any error occurs during operation.
1857 * The available space is stored on availp. When err = 0 and avail = 0
1858 * on the capture stream, it indicates the stream is in DRAINING state.
1860 static int wait_for_avail(struct snd_pcm_substream *substream,
1861 snd_pcm_uframes_t *availp)
1863 struct snd_pcm_runtime *runtime = substream->runtime;
1864 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1865 wait_queue_entry_t wait;
1867 snd_pcm_uframes_t avail = 0;
1868 long wait_time, tout;
1870 init_waitqueue_entry(&wait, current);
1871 set_current_state(TASK_INTERRUPTIBLE);
1872 add_wait_queue(&runtime->tsleep, &wait);
1874 if (runtime->no_period_wakeup)
1875 wait_time = MAX_SCHEDULE_TIMEOUT;
1877 /* use wait time from substream if available */
1878 if (substream->wait_time) {
1879 wait_time = substream->wait_time;
1883 if (runtime->rate) {
1884 long t = runtime->period_size * 2 /
1886 wait_time = max(t, wait_time);
1888 wait_time = msecs_to_jiffies(wait_time * 1000);
1893 if (signal_pending(current)) {
1899 * We need to check if space became available already
1900 * (and thus the wakeup happened already) first to close
1901 * the race of space already having become available.
1902 * This check must happen after been added to the waitqueue
1903 * and having current state be INTERRUPTIBLE.
1905 avail = snd_pcm_avail(substream);
1906 if (avail >= runtime->twake)
1908 snd_pcm_stream_unlock_irq(substream);
1910 tout = schedule_timeout(wait_time);
1912 snd_pcm_stream_lock_irq(substream);
1913 set_current_state(TASK_INTERRUPTIBLE);
1914 switch (runtime->state) {
1915 case SNDRV_PCM_STATE_SUSPENDED:
1918 case SNDRV_PCM_STATE_XRUN:
1921 case SNDRV_PCM_STATE_DRAINING:
1925 avail = 0; /* indicate draining */
1927 case SNDRV_PCM_STATE_OPEN:
1928 case SNDRV_PCM_STATE_SETUP:
1929 case SNDRV_PCM_STATE_DISCONNECTED:
1932 case SNDRV_PCM_STATE_PAUSED:
1936 pcm_dbg(substream->pcm,
1937 "%s write error (DMA or IRQ trouble?)\n",
1938 is_playback ? "playback" : "capture");
1944 set_current_state(TASK_RUNNING);
1945 remove_wait_queue(&runtime->tsleep, &wait);
1950 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1951 int channel, unsigned long hwoff,
1952 void *buf, unsigned long bytes);
1954 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1955 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1957 /* calculate the target DMA-buffer position to be written/read */
1958 static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1959 int channel, unsigned long hwoff)
1961 return runtime->dma_area + hwoff +
1962 channel * (runtime->dma_bytes / runtime->channels);
1965 /* default copy_user ops for write; used for both interleaved and non- modes */
1966 static int default_write_copy(struct snd_pcm_substream *substream,
1967 int channel, unsigned long hwoff,
1968 void *buf, unsigned long bytes)
1970 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1971 (void __user *)buf, bytes))
1976 /* default copy_kernel ops for write */
1977 static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1978 int channel, unsigned long hwoff,
1979 void *buf, unsigned long bytes)
1981 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1985 /* fill silence instead of copy data; called as a transfer helper
1986 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1987 * a NULL buffer is passed
1989 static int fill_silence(struct snd_pcm_substream *substream, int channel,
1990 unsigned long hwoff, void *buf, unsigned long bytes)
1992 struct snd_pcm_runtime *runtime = substream->runtime;
1994 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1996 if (substream->ops->fill_silence)
1997 return substream->ops->fill_silence(substream, channel,
2000 snd_pcm_format_set_silence(runtime->format,
2001 get_dma_ptr(runtime, channel, hwoff),
2002 bytes_to_samples(runtime, bytes));
2006 /* default copy_user ops for read; used for both interleaved and non- modes */
2007 static int default_read_copy(struct snd_pcm_substream *substream,
2008 int channel, unsigned long hwoff,
2009 void *buf, unsigned long bytes)
2011 if (copy_to_user((void __user *)buf,
2012 get_dma_ptr(substream->runtime, channel, hwoff),
2018 /* default copy_kernel ops for read */
2019 static int default_read_copy_kernel(struct snd_pcm_substream *substream,
2020 int channel, unsigned long hwoff,
2021 void *buf, unsigned long bytes)
2023 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
2027 /* call transfer function with the converted pointers and sizes;
2028 * for interleaved mode, it's one shot for all samples
2030 static int interleaved_copy(struct snd_pcm_substream *substream,
2031 snd_pcm_uframes_t hwoff, void *data,
2032 snd_pcm_uframes_t off,
2033 snd_pcm_uframes_t frames,
2034 pcm_transfer_f transfer)
2036 struct snd_pcm_runtime *runtime = substream->runtime;
2038 /* convert to bytes */
2039 hwoff = frames_to_bytes(runtime, hwoff);
2040 off = frames_to_bytes(runtime, off);
2041 frames = frames_to_bytes(runtime, frames);
2042 return transfer(substream, 0, hwoff, data + off, frames);
2045 /* call transfer function with the converted pointers and sizes for each
2046 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2048 static int noninterleaved_copy(struct snd_pcm_substream *substream,
2049 snd_pcm_uframes_t hwoff, void *data,
2050 snd_pcm_uframes_t off,
2051 snd_pcm_uframes_t frames,
2052 pcm_transfer_f transfer)
2054 struct snd_pcm_runtime *runtime = substream->runtime;
2055 int channels = runtime->channels;
2059 /* convert to bytes; note that it's not frames_to_bytes() here.
2060 * in non-interleaved mode, we copy for each channel, thus
2061 * each copy is n_samples bytes x channels = whole frames.
2063 off = samples_to_bytes(runtime, off);
2064 frames = samples_to_bytes(runtime, frames);
2065 hwoff = samples_to_bytes(runtime, hwoff);
2066 for (c = 0; c < channels; ++c, ++bufs) {
2067 if (!data || !*bufs)
2068 err = fill_silence(substream, c, hwoff, NULL, frames);
2070 err = transfer(substream, c, hwoff, *bufs + off,
2078 /* fill silence on the given buffer position;
2079 * called from snd_pcm_playback_silence()
2081 static int fill_silence_frames(struct snd_pcm_substream *substream,
2082 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2084 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2085 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2086 return interleaved_copy(substream, off, NULL, 0, frames,
2089 return noninterleaved_copy(substream, off, NULL, 0, frames,
2093 /* sanity-check for read/write methods */
2094 static int pcm_sanity_check(struct snd_pcm_substream *substream)
2096 struct snd_pcm_runtime *runtime;
2097 if (PCM_RUNTIME_CHECK(substream))
2099 runtime = substream->runtime;
2100 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2102 if (runtime->state == SNDRV_PCM_STATE_OPEN)
2107 static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2109 switch (runtime->state) {
2110 case SNDRV_PCM_STATE_PREPARED:
2111 case SNDRV_PCM_STATE_RUNNING:
2112 case SNDRV_PCM_STATE_PAUSED:
2114 case SNDRV_PCM_STATE_XRUN:
2116 case SNDRV_PCM_STATE_SUSPENDED:
2123 /* update to the given appl_ptr and call ack callback if needed;
2124 * when an error is returned, take back to the original value
2126 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2127 snd_pcm_uframes_t appl_ptr)
2129 struct snd_pcm_runtime *runtime = substream->runtime;
2130 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2131 snd_pcm_sframes_t diff;
2134 if (old_appl_ptr == appl_ptr)
2137 if (appl_ptr >= runtime->boundary)
2140 * check if a rewind is requested by the application
2142 if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2143 diff = appl_ptr - old_appl_ptr;
2145 if (diff > runtime->buffer_size)
2148 if (runtime->boundary + diff > runtime->buffer_size)
2153 runtime->control->appl_ptr = appl_ptr;
2154 if (substream->ops->ack) {
2155 ret = substream->ops->ack(substream);
2157 runtime->control->appl_ptr = old_appl_ptr;
2162 trace_applptr(substream, old_appl_ptr, appl_ptr);
2167 /* the common loop for read/write data */
2168 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2169 void *data, bool interleaved,
2170 snd_pcm_uframes_t size, bool in_kernel)
2172 struct snd_pcm_runtime *runtime = substream->runtime;
2173 snd_pcm_uframes_t xfer = 0;
2174 snd_pcm_uframes_t offset = 0;
2175 snd_pcm_uframes_t avail;
2177 pcm_transfer_f transfer;
2182 err = pcm_sanity_check(substream);
2186 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2188 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2189 runtime->channels > 1)
2191 writer = interleaved_copy;
2193 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2195 writer = noninterleaved_copy;
2200 transfer = fill_silence;
2203 } else if (in_kernel) {
2204 if (substream->ops->copy_kernel)
2205 transfer = substream->ops->copy_kernel;
2207 transfer = is_playback ?
2208 default_write_copy_kernel : default_read_copy_kernel;
2210 if (substream->ops->copy_user)
2211 transfer = (pcm_transfer_f)substream->ops->copy_user;
2213 transfer = is_playback ?
2214 default_write_copy : default_read_copy;
2220 nonblock = !!(substream->f_flags & O_NONBLOCK);
2222 snd_pcm_stream_lock_irq(substream);
2223 err = pcm_accessible_state(runtime);
2227 runtime->twake = runtime->control->avail_min ? : 1;
2228 if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2229 snd_pcm_update_hw_ptr(substream);
2232 * If size < start_threshold, wait indefinitely. Another
2233 * thread may start capture
2236 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2237 size >= runtime->start_threshold) {
2238 err = snd_pcm_start(substream);
2243 avail = snd_pcm_avail(substream);
2246 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2247 snd_pcm_uframes_t cont;
2250 runtime->state == SNDRV_PCM_STATE_DRAINING) {
2251 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2258 runtime->twake = min_t(snd_pcm_uframes_t, size,
2259 runtime->control->avail_min ? : 1);
2260 err = wait_for_avail(substream, &avail);
2264 continue; /* draining */
2266 frames = size > avail ? avail : size;
2267 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2268 appl_ofs = appl_ptr % runtime->buffer_size;
2269 cont = runtime->buffer_size - appl_ofs;
2272 if (snd_BUG_ON(!frames)) {
2276 if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2280 snd_pcm_stream_unlock_irq(substream);
2282 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2283 err = writer(substream, appl_ofs, data, offset, frames,
2286 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2287 snd_pcm_stream_lock_irq(substream);
2288 atomic_dec(&runtime->buffer_accessing);
2291 err = pcm_accessible_state(runtime);
2295 if (appl_ptr >= runtime->boundary)
2296 appl_ptr -= runtime->boundary;
2297 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2306 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2307 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2308 err = snd_pcm_start(substream);
2315 if (xfer > 0 && err >= 0)
2316 snd_pcm_update_state(substream, runtime);
2317 snd_pcm_stream_unlock_irq(substream);
2318 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2320 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2323 * standard channel mapping helpers
2326 /* default channel maps for multi-channel playbacks, up to 8 channels */
2327 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2329 .map = { SNDRV_CHMAP_MONO } },
2331 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2333 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2334 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2336 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2337 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2338 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2340 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2341 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2342 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2343 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2346 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2348 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2349 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2351 .map = { SNDRV_CHMAP_MONO } },
2353 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2355 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2356 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2358 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2359 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2360 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2362 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2363 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2364 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2365 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2368 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2370 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2372 if (ch > info->max_channels)
2374 return !info->channel_mask || (info->channel_mask & (1U << ch));
2377 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2378 struct snd_ctl_elem_info *uinfo)
2380 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2382 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2383 uinfo->count = info->max_channels;
2384 uinfo->value.integer.min = 0;
2385 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2389 /* get callback for channel map ctl element
2390 * stores the channel position firstly matching with the current channels
2392 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2393 struct snd_ctl_elem_value *ucontrol)
2395 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2396 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2397 struct snd_pcm_substream *substream;
2398 const struct snd_pcm_chmap_elem *map;
2402 substream = snd_pcm_chmap_substream(info, idx);
2405 memset(ucontrol->value.integer.value, 0,
2406 sizeof(long) * info->max_channels);
2407 if (!substream->runtime)
2408 return 0; /* no channels set */
2409 for (map = info->chmap; map->channels; map++) {
2411 if (map->channels == substream->runtime->channels &&
2412 valid_chmap_channels(info, map->channels)) {
2413 for (i = 0; i < map->channels; i++)
2414 ucontrol->value.integer.value[i] = map->map[i];
2421 /* tlv callback for channel map ctl element
2422 * expands the pre-defined channel maps in a form of TLV
2424 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2425 unsigned int size, unsigned int __user *tlv)
2427 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2428 const struct snd_pcm_chmap_elem *map;
2429 unsigned int __user *dst;
2436 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2440 for (map = info->chmap; map->channels; map++) {
2441 int chs_bytes = map->channels * 4;
2442 if (!valid_chmap_channels(info, map->channels))
2446 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2447 put_user(chs_bytes, dst + 1))
2452 if (size < chs_bytes)
2456 for (c = 0; c < map->channels; c++) {
2457 if (put_user(map->map[c], dst))
2462 if (put_user(count, tlv + 1))
2467 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2469 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2470 info->pcm->streams[info->stream].chmap_kctl = NULL;
2475 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2476 * @pcm: the assigned PCM instance
2477 * @stream: stream direction
2478 * @chmap: channel map elements (for query)
2479 * @max_channels: the max number of channels for the stream
2480 * @private_value: the value passed to each kcontrol's private_value field
2481 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2483 * Create channel-mapping control elements assigned to the given PCM stream(s).
2484 * Return: Zero if successful, or a negative error value.
2486 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2487 const struct snd_pcm_chmap_elem *chmap,
2489 unsigned long private_value,
2490 struct snd_pcm_chmap **info_ret)
2492 struct snd_pcm_chmap *info;
2493 struct snd_kcontrol_new knew = {
2494 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2495 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2496 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2497 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2498 .info = pcm_chmap_ctl_info,
2499 .get = pcm_chmap_ctl_get,
2500 .tlv.c = pcm_chmap_ctl_tlv,
2504 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2506 info = kzalloc(sizeof(*info), GFP_KERNEL);
2510 info->stream = stream;
2511 info->chmap = chmap;
2512 info->max_channels = max_channels;
2513 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2514 knew.name = "Playback Channel Map";
2516 knew.name = "Capture Channel Map";
2517 knew.device = pcm->device;
2518 knew.count = pcm->streams[stream].substream_count;
2519 knew.private_value = private_value;
2520 info->kctl = snd_ctl_new1(&knew, info);
2525 info->kctl->private_free = pcm_chmap_ctl_private_free;
2526 err = snd_ctl_add(pcm->card, info->kctl);
2529 pcm->streams[stream].chmap_kctl = info->kctl;
2534 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);