1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Digital Audio (PCM) abstract layer
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10 #include <linux/time.h>
11 #include <linux/math64.h>
12 #include <linux/export.h>
13 #include <sound/core.h>
14 #include <sound/control.h>
15 #include <sound/tlv.h>
16 #include <sound/info.h>
17 #include <sound/pcm.h>
18 #include <sound/pcm_params.h>
19 #include <sound/timer.h>
21 #include "pcm_local.h"
23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
24 #define CREATE_TRACE_POINTS
25 #include "pcm_trace.h"
27 #define trace_hwptr(substream, pos, in_interrupt)
28 #define trace_xrun(substream)
29 #define trace_hw_ptr_error(substream, reason)
30 #define trace_applptr(substream, prev, curr)
33 static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
37 static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
38 snd_pcm_uframes_t ptr,
39 snd_pcm_uframes_t new_ptr)
41 snd_pcm_sframes_t delta;
43 delta = new_ptr - ptr;
47 delta += runtime->boundary;
48 if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
49 runtime->silence_filled -= delta;
51 runtime->silence_filled = 0;
52 runtime->silence_start = new_ptr;
56 * fill ring buffer with silence
57 * runtime->silence_start: starting pointer to silence area
58 * runtime->silence_filled: size filled with silence
59 * runtime->silence_threshold: threshold from application
60 * runtime->silence_size: maximal size from application
62 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
64 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
66 struct snd_pcm_runtime *runtime = substream->runtime;
67 snd_pcm_uframes_t frames, ofs, transfer;
70 if (runtime->silence_size < runtime->boundary) {
71 snd_pcm_sframes_t noise_dist;
72 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
73 update_silence_vars(runtime, runtime->silence_start, appl_ptr);
74 /* initialization outside pointer updates */
75 if (new_hw_ptr == ULONG_MAX)
76 new_hw_ptr = runtime->status->hw_ptr;
77 /* get hw_avail with the boundary crossing */
78 noise_dist = appl_ptr - new_hw_ptr;
80 noise_dist += runtime->boundary;
81 /* total noise distance */
82 noise_dist += runtime->silence_filled;
83 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
85 frames = runtime->silence_threshold - noise_dist;
86 if (frames > runtime->silence_size)
87 frames = runtime->silence_size;
90 * This filling mode aims at free-running mode (used for example by dmix),
91 * which doesn't update the application pointer.
93 snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
94 if (new_hw_ptr == ULONG_MAX) {
96 * Initialization, fill the whole unused buffer with silence.
98 * Usually, this is entered while stopped, before data is queued,
99 * so both pointers are expected to be zero.
101 snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
103 avail += runtime->boundary;
105 * In free-running mode, appl_ptr will be zero even while running,
106 * so we end up with a huge number. There is no useful way to
107 * handle this, so we just clear the whole buffer.
109 runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
110 runtime->silence_start = hw_ptr;
112 /* Silence the just played area immediately */
113 update_silence_vars(runtime, hw_ptr, new_hw_ptr);
116 * In this mode, silence_filled actually includes the valid
117 * sample data from the user.
119 frames = runtime->buffer_size - runtime->silence_filled;
121 if (snd_BUG_ON(frames > runtime->buffer_size))
125 ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
127 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
128 err = fill_silence_frames(substream, ofs, transfer);
130 runtime->silence_filled += transfer;
133 } while (frames > 0);
134 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
137 #ifdef CONFIG_SND_DEBUG
138 void snd_pcm_debug_name(struct snd_pcm_substream *substream,
139 char *name, size_t len)
141 snprintf(name, len, "pcmC%dD%d%c:%d",
142 substream->pcm->card->number,
143 substream->pcm->device,
144 substream->stream ? 'c' : 'p',
147 EXPORT_SYMBOL(snd_pcm_debug_name);
150 #define XRUN_DEBUG_BASIC (1<<0)
151 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
152 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
154 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
156 #define xrun_debug(substream, mask) \
157 ((substream)->pstr->xrun_debug & (mask))
159 #define xrun_debug(substream, mask) 0
162 #define dump_stack_on_xrun(substream) do { \
163 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
167 /* call with stream lock held */
168 void __snd_pcm_xrun(struct snd_pcm_substream *substream)
170 struct snd_pcm_runtime *runtime = substream->runtime;
172 trace_xrun(substream);
173 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
174 struct timespec64 tstamp;
176 snd_pcm_gettime(runtime, &tstamp);
177 runtime->status->tstamp.tv_sec = tstamp.tv_sec;
178 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
180 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
181 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
183 snd_pcm_debug_name(substream, name, sizeof(name));
184 pcm_warn(substream->pcm, "XRUN: %s\n", name);
185 dump_stack_on_xrun(substream);
187 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
188 substream->xrun_counter++;
192 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
193 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
195 trace_hw_ptr_error(substream, reason); \
196 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
197 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
198 (in_interrupt) ? 'Q' : 'P', ##args); \
199 dump_stack_on_xrun(substream); \
203 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
205 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
209 int snd_pcm_update_state(struct snd_pcm_substream *substream,
210 struct snd_pcm_runtime *runtime)
212 snd_pcm_uframes_t avail;
214 avail = snd_pcm_avail(substream);
215 if (avail > runtime->avail_max)
216 runtime->avail_max = avail;
217 if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
218 if (avail >= runtime->buffer_size) {
219 snd_pcm_drain_done(substream);
223 if (avail >= runtime->stop_threshold) {
224 __snd_pcm_xrun(substream);
228 if (runtime->twake) {
229 if (avail >= runtime->twake)
230 wake_up(&runtime->tsleep);
231 } else if (avail >= runtime->control->avail_min)
232 wake_up(&runtime->sleep);
236 static void update_audio_tstamp(struct snd_pcm_substream *substream,
237 struct timespec64 *curr_tstamp,
238 struct timespec64 *audio_tstamp)
240 struct snd_pcm_runtime *runtime = substream->runtime;
241 u64 audio_frames, audio_nsecs;
242 struct timespec64 driver_tstamp;
244 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
247 if (!(substream->ops->get_time_info) ||
248 (runtime->audio_tstamp_report.actual_type ==
249 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
252 * provide audio timestamp derived from pointer position
253 * add delay only if requested
256 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
258 if (runtime->audio_tstamp_config.report_delay) {
259 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
260 audio_frames -= runtime->delay;
262 audio_frames += runtime->delay;
264 audio_nsecs = div_u64(audio_frames * 1000000000LL,
266 *audio_tstamp = ns_to_timespec64(audio_nsecs);
269 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
270 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
271 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
272 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
273 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
274 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
279 * re-take a driver timestamp to let apps detect if the reference tstamp
280 * read by low-level hardware was provided with a delay
282 snd_pcm_gettime(substream->runtime, &driver_tstamp);
283 runtime->driver_tstamp = driver_tstamp;
286 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
287 unsigned int in_interrupt)
289 struct snd_pcm_runtime *runtime = substream->runtime;
290 snd_pcm_uframes_t pos;
291 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
292 snd_pcm_sframes_t hdelta, delta;
293 unsigned long jdelta;
294 unsigned long curr_jiffies;
295 struct timespec64 curr_tstamp;
296 struct timespec64 audio_tstamp;
297 int crossed_boundary = 0;
299 old_hw_ptr = runtime->status->hw_ptr;
302 * group pointer, time and jiffies reads to allow for more
303 * accurate correlations/corrections.
304 * The values are stored at the end of this routine after
305 * corrections for hw_ptr position
307 pos = substream->ops->pointer(substream);
308 curr_jiffies = jiffies;
309 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
310 if ((substream->ops->get_time_info) &&
311 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
312 substream->ops->get_time_info(substream, &curr_tstamp,
314 &runtime->audio_tstamp_config,
315 &runtime->audio_tstamp_report);
317 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
318 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
319 snd_pcm_gettime(runtime, &curr_tstamp);
321 snd_pcm_gettime(runtime, &curr_tstamp);
324 if (pos == SNDRV_PCM_POS_XRUN) {
325 __snd_pcm_xrun(substream);
328 if (pos >= runtime->buffer_size) {
329 if (printk_ratelimit()) {
331 snd_pcm_debug_name(substream, name, sizeof(name));
332 pcm_err(substream->pcm,
333 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
334 name, pos, runtime->buffer_size,
335 runtime->period_size);
339 pos -= pos % runtime->min_align;
340 trace_hwptr(substream, pos, in_interrupt);
341 hw_base = runtime->hw_ptr_base;
342 new_hw_ptr = hw_base + pos;
344 /* we know that one period was processed */
345 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
346 delta = runtime->hw_ptr_interrupt + runtime->period_size;
347 if (delta > new_hw_ptr) {
348 /* check for double acknowledged interrupts */
349 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
350 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
351 hw_base += runtime->buffer_size;
352 if (hw_base >= runtime->boundary) {
356 new_hw_ptr = hw_base + pos;
361 /* new_hw_ptr might be lower than old_hw_ptr in case when */
362 /* pointer crosses the end of the ring buffer */
363 if (new_hw_ptr < old_hw_ptr) {
364 hw_base += runtime->buffer_size;
365 if (hw_base >= runtime->boundary) {
369 new_hw_ptr = hw_base + pos;
372 delta = new_hw_ptr - old_hw_ptr;
374 delta += runtime->boundary;
376 if (runtime->no_period_wakeup) {
377 snd_pcm_sframes_t xrun_threshold;
379 * Without regular period interrupts, we have to check
380 * the elapsed time to detect xruns.
382 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
383 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
385 hdelta = jdelta - delta * HZ / runtime->rate;
386 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
387 while (hdelta > xrun_threshold) {
388 delta += runtime->buffer_size;
389 hw_base += runtime->buffer_size;
390 if (hw_base >= runtime->boundary) {
394 new_hw_ptr = hw_base + pos;
395 hdelta -= runtime->hw_ptr_buffer_jiffies;
400 /* something must be really wrong */
401 if (delta >= runtime->buffer_size + runtime->period_size) {
402 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
403 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
404 substream->stream, (long)pos,
405 (long)new_hw_ptr, (long)old_hw_ptr);
409 /* Do jiffies check only in xrun_debug mode */
410 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
411 goto no_jiffies_check;
413 /* Skip the jiffies check for hardwares with BATCH flag.
414 * Such hardware usually just increases the position at each IRQ,
415 * thus it can't give any strange position.
417 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
418 goto no_jiffies_check;
420 if (hdelta < runtime->delay)
421 goto no_jiffies_check;
422 hdelta -= runtime->delay;
423 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
424 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
426 (((runtime->period_size * HZ) / runtime->rate)
428 /* move new_hw_ptr according jiffies not pos variable */
429 new_hw_ptr = old_hw_ptr;
431 /* use loop to avoid checks for delta overflows */
432 /* the delta value is small or zero in most cases */
434 new_hw_ptr += runtime->period_size;
435 if (new_hw_ptr >= runtime->boundary) {
436 new_hw_ptr -= runtime->boundary;
441 /* align hw_base to buffer_size */
442 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
443 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
444 (long)pos, (long)hdelta,
445 (long)runtime->period_size, jdelta,
446 ((hdelta * HZ) / runtime->rate), hw_base,
447 (unsigned long)old_hw_ptr,
448 (unsigned long)new_hw_ptr);
449 /* reset values to proper state */
451 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
454 if (delta > runtime->period_size + runtime->period_size / 2) {
455 hw_ptr_error(substream, in_interrupt,
457 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
458 substream->stream, (long)delta,
464 if (runtime->status->hw_ptr == new_hw_ptr) {
465 runtime->hw_ptr_jiffies = curr_jiffies;
466 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
470 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
471 runtime->silence_size > 0)
472 snd_pcm_playback_silence(substream, new_hw_ptr);
475 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
477 delta += runtime->boundary;
478 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
479 runtime->hw_ptr_interrupt += delta;
480 if (runtime->hw_ptr_interrupt >= runtime->boundary)
481 runtime->hw_ptr_interrupt -= runtime->boundary;
483 runtime->hw_ptr_base = hw_base;
484 runtime->status->hw_ptr = new_hw_ptr;
485 runtime->hw_ptr_jiffies = curr_jiffies;
486 if (crossed_boundary) {
487 snd_BUG_ON(crossed_boundary != 1);
488 runtime->hw_ptr_wrap += runtime->boundary;
491 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
493 return snd_pcm_update_state(substream, runtime);
496 /* CAUTION: call it with irq disabled */
497 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
499 return snd_pcm_update_hw_ptr0(substream, 0);
503 * snd_pcm_set_ops - set the PCM operators
504 * @pcm: the pcm instance
505 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
506 * @ops: the operator table
508 * Sets the given PCM operators to the pcm instance.
510 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
511 const struct snd_pcm_ops *ops)
513 struct snd_pcm_str *stream = &pcm->streams[direction];
514 struct snd_pcm_substream *substream;
516 for (substream = stream->substream; substream != NULL; substream = substream->next)
517 substream->ops = ops;
519 EXPORT_SYMBOL(snd_pcm_set_ops);
522 * snd_pcm_set_sync_per_card - set the PCM sync id with card number
523 * @substream: the pcm substream
524 * @params: modified hardware parameters
525 * @id: identifier (max 12 bytes)
526 * @len: identifier length (max 12 bytes)
528 * Sets the PCM sync identifier for the card with zero padding.
530 * User space or any user should use this 16-byte identifier for a comparison only
531 * to check if two IDs are similar or different. Special case is the identifier
532 * containing only zeros. Interpretation for this combination is - empty (not set).
533 * The contents of the identifier should not be interpreted in any other way.
535 * The synchronization ID must be unique per clock source (usually one sound card,
536 * but multiple soundcard may use one PCM word clock source which means that they
537 * are fully synchronized).
539 * This routine composes this ID using card number in first four bytes and
540 * 12-byte additional ID. When other ID composition is used (e.g. for multiple
541 * sound cards), make sure that the composition does not clash with this
542 * composition scheme.
544 void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream,
545 struct snd_pcm_hw_params *params,
546 const unsigned char *id, unsigned int len)
548 *(__u32 *)params->sync = cpu_to_le32(substream->pcm->card->number);
550 memcpy(params->sync + 4, id, len);
551 memset(params->sync + 4 + len, 0, 12 - len);
553 EXPORT_SYMBOL_GPL(snd_pcm_set_sync_per_card);
556 * Standard ioctl routine
559 static inline unsigned int div32(unsigned int a, unsigned int b,
570 static inline unsigned int div_down(unsigned int a, unsigned int b)
577 static inline unsigned int div_up(unsigned int a, unsigned int b)
589 static inline unsigned int mul(unsigned int a, unsigned int b)
593 if (div_down(UINT_MAX, a) < b)
598 static inline unsigned int muldiv32(unsigned int a, unsigned int b,
599 unsigned int c, unsigned int *r)
601 u_int64_t n = (u_int64_t) a * b;
606 n = div_u64_rem(n, c, r);
615 * snd_interval_refine - refine the interval value of configurator
616 * @i: the interval value to refine
617 * @v: the interval value to refer to
619 * Refines the interval value with the reference value.
620 * The interval is changed to the range satisfying both intervals.
621 * The interval status (min, max, integer, etc.) are evaluated.
623 * Return: Positive if the value is changed, zero if it's not changed, or a
624 * negative error code.
626 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
629 if (snd_BUG_ON(snd_interval_empty(i)))
631 if (i->min < v->min) {
633 i->openmin = v->openmin;
635 } else if (i->min == v->min && !i->openmin && v->openmin) {
639 if (i->max > v->max) {
641 i->openmax = v->openmax;
643 } else if (i->max == v->max && !i->openmax && v->openmax) {
647 if (!i->integer && v->integer) {
660 } else if (!i->openmin && !i->openmax && i->min == i->max)
662 if (snd_interval_checkempty(i)) {
663 snd_interval_none(i);
668 EXPORT_SYMBOL(snd_interval_refine);
670 static int snd_interval_refine_first(struct snd_interval *i)
672 const unsigned int last_max = i->max;
674 if (snd_BUG_ON(snd_interval_empty(i)))
676 if (snd_interval_single(i))
681 /* only exclude max value if also excluded before refine */
682 i->openmax = (i->openmax && i->max >= last_max);
686 static int snd_interval_refine_last(struct snd_interval *i)
688 const unsigned int last_min = i->min;
690 if (snd_BUG_ON(snd_interval_empty(i)))
692 if (snd_interval_single(i))
697 /* only exclude min value if also excluded before refine */
698 i->openmin = (i->openmin && i->min <= last_min);
702 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
704 if (a->empty || b->empty) {
705 snd_interval_none(c);
709 c->min = mul(a->min, b->min);
710 c->openmin = (a->openmin || b->openmin);
711 c->max = mul(a->max, b->max);
712 c->openmax = (a->openmax || b->openmax);
713 c->integer = (a->integer && b->integer);
717 * snd_interval_div - refine the interval value with division
724 * Returns non-zero if the value is changed, zero if not changed.
726 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
729 if (a->empty || b->empty) {
730 snd_interval_none(c);
734 c->min = div32(a->min, b->max, &r);
735 c->openmin = (r || a->openmin || b->openmax);
737 c->max = div32(a->max, b->min, &r);
742 c->openmax = (a->openmax || b->openmin);
751 * snd_interval_muldivk - refine the interval value
754 * @k: divisor (as integer)
759 * Returns non-zero if the value is changed, zero if not changed.
761 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
762 unsigned int k, struct snd_interval *c)
765 if (a->empty || b->empty) {
766 snd_interval_none(c);
770 c->min = muldiv32(a->min, b->min, k, &r);
771 c->openmin = (r || a->openmin || b->openmin);
772 c->max = muldiv32(a->max, b->max, k, &r);
777 c->openmax = (a->openmax || b->openmax);
782 * snd_interval_mulkdiv - refine the interval value
784 * @k: dividend 2 (as integer)
790 * Returns non-zero if the value is changed, zero if not changed.
792 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
793 const struct snd_interval *b, struct snd_interval *c)
796 if (a->empty || b->empty) {
797 snd_interval_none(c);
801 c->min = muldiv32(a->min, k, b->max, &r);
802 c->openmin = (r || a->openmin || b->openmax);
804 c->max = muldiv32(a->max, k, b->min, &r);
809 c->openmax = (a->openmax || b->openmin);
821 * snd_interval_ratnum - refine the interval value
822 * @i: interval to refine
823 * @rats_count: number of ratnum_t
824 * @rats: ratnum_t array
825 * @nump: pointer to store the resultant numerator
826 * @denp: pointer to store the resultant denominator
828 * Return: Positive if the value is changed, zero if it's not changed, or a
829 * negative error code.
831 int snd_interval_ratnum(struct snd_interval *i,
832 unsigned int rats_count, const struct snd_ratnum *rats,
833 unsigned int *nump, unsigned int *denp)
835 unsigned int best_num, best_den;
838 struct snd_interval t;
840 unsigned int result_num, result_den;
843 best_num = best_den = best_diff = 0;
844 for (k = 0; k < rats_count; ++k) {
845 unsigned int num = rats[k].num;
847 unsigned int q = i->min;
851 den = div_up(num, q);
852 if (den < rats[k].den_min)
854 if (den > rats[k].den_max)
855 den = rats[k].den_max;
858 r = (den - rats[k].den_min) % rats[k].den_step;
862 diff = num - q * den;
866 diff * best_den < best_diff * den) {
876 t.min = div_down(best_num, best_den);
877 t.openmin = !!(best_num % best_den);
879 result_num = best_num;
880 result_diff = best_diff;
881 result_den = best_den;
882 best_num = best_den = best_diff = 0;
883 for (k = 0; k < rats_count; ++k) {
884 unsigned int num = rats[k].num;
886 unsigned int q = i->max;
892 den = div_down(num, q);
893 if (den > rats[k].den_max)
895 if (den < rats[k].den_min)
896 den = rats[k].den_min;
899 r = (den - rats[k].den_min) % rats[k].den_step;
901 den += rats[k].den_step - r;
903 diff = q * den - num;
907 diff * best_den < best_diff * den) {
917 t.max = div_up(best_num, best_den);
918 t.openmax = !!(best_num % best_den);
920 err = snd_interval_refine(i, &t);
924 if (snd_interval_single(i)) {
925 if (best_diff * result_den < result_diff * best_den) {
926 result_num = best_num;
927 result_den = best_den;
936 EXPORT_SYMBOL(snd_interval_ratnum);
939 * snd_interval_ratden - refine the interval value
940 * @i: interval to refine
941 * @rats_count: number of struct ratden
942 * @rats: struct ratden array
943 * @nump: pointer to store the resultant numerator
944 * @denp: pointer to store the resultant denominator
946 * Return: Positive if the value is changed, zero if it's not changed, or a
947 * negative error code.
949 static int snd_interval_ratden(struct snd_interval *i,
950 unsigned int rats_count,
951 const struct snd_ratden *rats,
952 unsigned int *nump, unsigned int *denp)
954 unsigned int best_num, best_diff, best_den;
956 struct snd_interval t;
959 best_num = best_den = best_diff = 0;
960 for (k = 0; k < rats_count; ++k) {
962 unsigned int den = rats[k].den;
963 unsigned int q = i->min;
966 if (num > rats[k].num_max)
968 if (num < rats[k].num_min)
969 num = rats[k].num_max;
972 r = (num - rats[k].num_min) % rats[k].num_step;
974 num += rats[k].num_step - r;
976 diff = num - q * den;
978 diff * best_den < best_diff * den) {
988 t.min = div_down(best_num, best_den);
989 t.openmin = !!(best_num % best_den);
991 best_num = best_den = best_diff = 0;
992 for (k = 0; k < rats_count; ++k) {
994 unsigned int den = rats[k].den;
995 unsigned int q = i->max;
998 if (num < rats[k].num_min)
1000 if (num > rats[k].num_max)
1001 num = rats[k].num_max;
1004 r = (num - rats[k].num_min) % rats[k].num_step;
1008 diff = q * den - num;
1009 if (best_num == 0 ||
1010 diff * best_den < best_diff * den) {
1016 if (best_den == 0) {
1020 t.max = div_up(best_num, best_den);
1021 t.openmax = !!(best_num % best_den);
1023 err = snd_interval_refine(i, &t);
1027 if (snd_interval_single(i)) {
1037 * snd_interval_list - refine the interval value from the list
1038 * @i: the interval value to refine
1039 * @count: the number of elements in the list
1040 * @list: the value list
1041 * @mask: the bit-mask to evaluate
1043 * Refines the interval value from the list.
1044 * When mask is non-zero, only the elements corresponding to bit 1 are
1047 * Return: Positive if the value is changed, zero if it's not changed, or a
1048 * negative error code.
1050 int snd_interval_list(struct snd_interval *i, unsigned int count,
1051 const unsigned int *list, unsigned int mask)
1054 struct snd_interval list_range;
1060 snd_interval_any(&list_range);
1061 list_range.min = UINT_MAX;
1063 for (k = 0; k < count; k++) {
1064 if (mask && !(mask & (1 << k)))
1066 if (!snd_interval_test(i, list[k]))
1068 list_range.min = min(list_range.min, list[k]);
1069 list_range.max = max(list_range.max, list[k]);
1071 return snd_interval_refine(i, &list_range);
1073 EXPORT_SYMBOL(snd_interval_list);
1076 * snd_interval_ranges - refine the interval value from the list of ranges
1077 * @i: the interval value to refine
1078 * @count: the number of elements in the list of ranges
1079 * @ranges: the ranges list
1080 * @mask: the bit-mask to evaluate
1082 * Refines the interval value from the list of ranges.
1083 * When mask is non-zero, only the elements corresponding to bit 1 are
1086 * Return: Positive if the value is changed, zero if it's not changed, or a
1087 * negative error code.
1089 int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1090 const struct snd_interval *ranges, unsigned int mask)
1093 struct snd_interval range_union;
1094 struct snd_interval range;
1097 snd_interval_none(i);
1100 snd_interval_any(&range_union);
1101 range_union.min = UINT_MAX;
1102 range_union.max = 0;
1103 for (k = 0; k < count; k++) {
1104 if (mask && !(mask & (1 << k)))
1106 snd_interval_copy(&range, &ranges[k]);
1107 if (snd_interval_refine(&range, i) < 0)
1109 if (snd_interval_empty(&range))
1112 if (range.min < range_union.min) {
1113 range_union.min = range.min;
1114 range_union.openmin = 1;
1116 if (range.min == range_union.min && !range.openmin)
1117 range_union.openmin = 0;
1118 if (range.max > range_union.max) {
1119 range_union.max = range.max;
1120 range_union.openmax = 1;
1122 if (range.max == range_union.max && !range.openmax)
1123 range_union.openmax = 0;
1125 return snd_interval_refine(i, &range_union);
1127 EXPORT_SYMBOL(snd_interval_ranges);
1129 static int snd_interval_step(struct snd_interval *i, unsigned int step)
1134 if (n != 0 || i->openmin) {
1140 if (n != 0 || i->openmax) {
1145 if (snd_interval_checkempty(i)) {
1152 /* Info constraints helpers */
1155 * snd_pcm_hw_rule_add - add the hw-constraint rule
1156 * @runtime: the pcm runtime instance
1157 * @cond: condition bits
1158 * @var: the variable to evaluate
1159 * @func: the evaluation function
1160 * @private: the private data pointer passed to function
1161 * @dep: the dependent variables
1163 * Return: Zero if successful, or a negative error code on failure.
1165 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1167 snd_pcm_hw_rule_func_t func, void *private,
1170 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1171 struct snd_pcm_hw_rule *c;
1174 va_start(args, dep);
1175 if (constrs->rules_num >= constrs->rules_all) {
1176 struct snd_pcm_hw_rule *new;
1177 unsigned int new_rules = constrs->rules_all + 16;
1178 new = krealloc_array(constrs->rules, new_rules,
1179 sizeof(*c), GFP_KERNEL);
1184 constrs->rules = new;
1185 constrs->rules_all = new_rules;
1187 c = &constrs->rules[constrs->rules_num];
1191 c->private = private;
1194 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1201 dep = va_arg(args, int);
1203 constrs->rules_num++;
1207 EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1210 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1211 * @runtime: PCM runtime instance
1212 * @var: hw_params variable to apply the mask
1213 * @mask: the bitmap mask
1215 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1217 * Return: Zero if successful, or a negative error code on failure.
1219 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1222 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1223 struct snd_mask *maskp = constrs_mask(constrs, var);
1224 *maskp->bits &= mask;
1225 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1226 if (*maskp->bits == 0)
1232 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1233 * @runtime: PCM runtime instance
1234 * @var: hw_params variable to apply the mask
1235 * @mask: the 64bit bitmap mask
1237 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1239 * Return: Zero if successful, or a negative error code on failure.
1241 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1244 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1245 struct snd_mask *maskp = constrs_mask(constrs, var);
1246 maskp->bits[0] &= (u_int32_t)mask;
1247 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1248 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1249 if (! maskp->bits[0] && ! maskp->bits[1])
1253 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1256 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1257 * @runtime: PCM runtime instance
1258 * @var: hw_params variable to apply the integer constraint
1260 * Apply the constraint of integer to an interval parameter.
1262 * Return: Positive if the value is changed, zero if it's not changed, or a
1263 * negative error code.
1265 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1267 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1268 return snd_interval_setinteger(constrs_interval(constrs, var));
1270 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1273 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1274 * @runtime: PCM runtime instance
1275 * @var: hw_params variable to apply the range
1276 * @min: the minimal value
1277 * @max: the maximal value
1279 * Apply the min/max range constraint to an interval parameter.
1281 * Return: Positive if the value is changed, zero if it's not changed, or a
1282 * negative error code.
1284 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1285 unsigned int min, unsigned int max)
1287 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1288 struct snd_interval t;
1291 t.openmin = t.openmax = 0;
1293 return snd_interval_refine(constrs_interval(constrs, var), &t);
1295 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1297 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1298 struct snd_pcm_hw_rule *rule)
1300 struct snd_pcm_hw_constraint_list *list = rule->private;
1301 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1306 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1307 * @runtime: PCM runtime instance
1308 * @cond: condition bits
1309 * @var: hw_params variable to apply the list constraint
1312 * Apply the list of constraints to an interval parameter.
1314 * Return: Zero if successful, or a negative error code on failure.
1316 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1318 snd_pcm_hw_param_t var,
1319 const struct snd_pcm_hw_constraint_list *l)
1321 return snd_pcm_hw_rule_add(runtime, cond, var,
1322 snd_pcm_hw_rule_list, (void *)l,
1325 EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1327 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1328 struct snd_pcm_hw_rule *rule)
1330 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1331 return snd_interval_ranges(hw_param_interval(params, rule->var),
1332 r->count, r->ranges, r->mask);
1337 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1338 * @runtime: PCM runtime instance
1339 * @cond: condition bits
1340 * @var: hw_params variable to apply the list of range constraints
1343 * Apply the list of range constraints to an interval parameter.
1345 * Return: Zero if successful, or a negative error code on failure.
1347 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1349 snd_pcm_hw_param_t var,
1350 const struct snd_pcm_hw_constraint_ranges *r)
1352 return snd_pcm_hw_rule_add(runtime, cond, var,
1353 snd_pcm_hw_rule_ranges, (void *)r,
1356 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1358 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1359 struct snd_pcm_hw_rule *rule)
1361 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1362 unsigned int num = 0, den = 0;
1364 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1365 r->nrats, r->rats, &num, &den);
1366 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1367 params->rate_num = num;
1368 params->rate_den = den;
1374 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1375 * @runtime: PCM runtime instance
1376 * @cond: condition bits
1377 * @var: hw_params variable to apply the ratnums constraint
1378 * @r: struct snd_ratnums constriants
1380 * Return: Zero if successful, or a negative error code on failure.
1382 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1384 snd_pcm_hw_param_t var,
1385 const struct snd_pcm_hw_constraint_ratnums *r)
1387 return snd_pcm_hw_rule_add(runtime, cond, var,
1388 snd_pcm_hw_rule_ratnums, (void *)r,
1391 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1393 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1394 struct snd_pcm_hw_rule *rule)
1396 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1397 unsigned int num = 0, den = 0;
1398 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1399 r->nrats, r->rats, &num, &den);
1400 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1401 params->rate_num = num;
1402 params->rate_den = den;
1408 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1409 * @runtime: PCM runtime instance
1410 * @cond: condition bits
1411 * @var: hw_params variable to apply the ratdens constraint
1412 * @r: struct snd_ratdens constriants
1414 * Return: Zero if successful, or a negative error code on failure.
1416 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1418 snd_pcm_hw_param_t var,
1419 const struct snd_pcm_hw_constraint_ratdens *r)
1421 return snd_pcm_hw_rule_add(runtime, cond, var,
1422 snd_pcm_hw_rule_ratdens, (void *)r,
1425 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1427 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1428 struct snd_pcm_hw_rule *rule)
1430 unsigned int l = (unsigned long) rule->private;
1431 int width = l & 0xffff;
1432 unsigned int msbits = l >> 16;
1433 const struct snd_interval *i =
1434 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1436 if (!snd_interval_single(i))
1439 if ((snd_interval_value(i) == width) ||
1440 (width == 0 && snd_interval_value(i) > msbits))
1441 params->msbits = min_not_zero(params->msbits, msbits);
1447 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1448 * @runtime: PCM runtime instance
1449 * @cond: condition bits
1450 * @width: sample bits width
1451 * @msbits: msbits width
1453 * This constraint will set the number of most significant bits (msbits) if a
1454 * sample format with the specified width has been select. If width is set to 0
1455 * the msbits will be set for any sample format with a width larger than the
1458 * Return: Zero if successful, or a negative error code on failure.
1460 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1463 unsigned int msbits)
1465 unsigned long l = (msbits << 16) | width;
1466 return snd_pcm_hw_rule_add(runtime, cond, -1,
1467 snd_pcm_hw_rule_msbits,
1469 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1471 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1473 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1474 struct snd_pcm_hw_rule *rule)
1476 unsigned long step = (unsigned long) rule->private;
1477 return snd_interval_step(hw_param_interval(params, rule->var), step);
1481 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1482 * @runtime: PCM runtime instance
1483 * @cond: condition bits
1484 * @var: hw_params variable to apply the step constraint
1487 * Return: Zero if successful, or a negative error code on failure.
1489 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1491 snd_pcm_hw_param_t var,
1494 return snd_pcm_hw_rule_add(runtime, cond, var,
1495 snd_pcm_hw_rule_step, (void *) step,
1498 EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1500 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1502 static const unsigned int pow2_sizes[] = {
1503 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1504 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1505 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1506 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1508 return snd_interval_list(hw_param_interval(params, rule->var),
1509 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1513 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1514 * @runtime: PCM runtime instance
1515 * @cond: condition bits
1516 * @var: hw_params variable to apply the power-of-2 constraint
1518 * Return: Zero if successful, or a negative error code on failure.
1520 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1522 snd_pcm_hw_param_t var)
1524 return snd_pcm_hw_rule_add(runtime, cond, var,
1525 snd_pcm_hw_rule_pow2, NULL,
1528 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1530 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1531 struct snd_pcm_hw_rule *rule)
1533 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1534 struct snd_interval *rate;
1536 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1537 return snd_interval_list(rate, 1, &base_rate, 0);
1541 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1542 * @runtime: PCM runtime instance
1543 * @base_rate: the rate at which the hardware does not resample
1545 * Return: Zero if successful, or a negative error code on failure.
1547 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1548 unsigned int base_rate)
1550 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1551 SNDRV_PCM_HW_PARAM_RATE,
1552 snd_pcm_hw_rule_noresample_func,
1553 (void *)(uintptr_t)base_rate,
1554 SNDRV_PCM_HW_PARAM_RATE, -1);
1556 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1558 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1559 snd_pcm_hw_param_t var)
1561 if (hw_is_mask(var)) {
1562 snd_mask_any(hw_param_mask(params, var));
1563 params->cmask |= 1 << var;
1564 params->rmask |= 1 << var;
1567 if (hw_is_interval(var)) {
1568 snd_interval_any(hw_param_interval(params, var));
1569 params->cmask |= 1 << var;
1570 params->rmask |= 1 << var;
1576 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1579 memset(params, 0, sizeof(*params));
1580 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1581 _snd_pcm_hw_param_any(params, k);
1582 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1583 _snd_pcm_hw_param_any(params, k);
1586 EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1589 * snd_pcm_hw_param_value - return @params field @var value
1590 * @params: the hw_params instance
1591 * @var: parameter to retrieve
1592 * @dir: pointer to the direction (-1,0,1) or %NULL
1594 * Return: The value for field @var if it's fixed in configuration space
1595 * defined by @params. -%EINVAL otherwise.
1597 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1598 snd_pcm_hw_param_t var, int *dir)
1600 if (hw_is_mask(var)) {
1601 const struct snd_mask *mask = hw_param_mask_c(params, var);
1602 if (!snd_mask_single(mask))
1606 return snd_mask_value(mask);
1608 if (hw_is_interval(var)) {
1609 const struct snd_interval *i = hw_param_interval_c(params, var);
1610 if (!snd_interval_single(i))
1614 return snd_interval_value(i);
1618 EXPORT_SYMBOL(snd_pcm_hw_param_value);
1620 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1621 snd_pcm_hw_param_t var)
1623 if (hw_is_mask(var)) {
1624 snd_mask_none(hw_param_mask(params, var));
1625 params->cmask |= 1 << var;
1626 params->rmask |= 1 << var;
1627 } else if (hw_is_interval(var)) {
1628 snd_interval_none(hw_param_interval(params, var));
1629 params->cmask |= 1 << var;
1630 params->rmask |= 1 << var;
1635 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1637 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1638 snd_pcm_hw_param_t var)
1641 if (hw_is_mask(var))
1642 changed = snd_mask_refine_first(hw_param_mask(params, var));
1643 else if (hw_is_interval(var))
1644 changed = snd_interval_refine_first(hw_param_interval(params, var));
1648 params->cmask |= 1 << var;
1649 params->rmask |= 1 << var;
1656 * snd_pcm_hw_param_first - refine config space and return minimum value
1657 * @pcm: PCM instance
1658 * @params: the hw_params instance
1659 * @var: parameter to retrieve
1660 * @dir: pointer to the direction (-1,0,1) or %NULL
1662 * Inside configuration space defined by @params remove from @var all
1663 * values > minimum. Reduce configuration space accordingly.
1665 * Return: The minimum, or a negative error code on failure.
1667 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1668 struct snd_pcm_hw_params *params,
1669 snd_pcm_hw_param_t var, int *dir)
1671 int changed = _snd_pcm_hw_param_first(params, var);
1674 if (params->rmask) {
1675 int err = snd_pcm_hw_refine(pcm, params);
1679 return snd_pcm_hw_param_value(params, var, dir);
1681 EXPORT_SYMBOL(snd_pcm_hw_param_first);
1683 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1684 snd_pcm_hw_param_t var)
1687 if (hw_is_mask(var))
1688 changed = snd_mask_refine_last(hw_param_mask(params, var));
1689 else if (hw_is_interval(var))
1690 changed = snd_interval_refine_last(hw_param_interval(params, var));
1694 params->cmask |= 1 << var;
1695 params->rmask |= 1 << var;
1702 * snd_pcm_hw_param_last - refine config space and return maximum value
1703 * @pcm: PCM instance
1704 * @params: the hw_params instance
1705 * @var: parameter to retrieve
1706 * @dir: pointer to the direction (-1,0,1) or %NULL
1708 * Inside configuration space defined by @params remove from @var all
1709 * values < maximum. Reduce configuration space accordingly.
1711 * Return: The maximum, or a negative error code on failure.
1713 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1714 struct snd_pcm_hw_params *params,
1715 snd_pcm_hw_param_t var, int *dir)
1717 int changed = _snd_pcm_hw_param_last(params, var);
1720 if (params->rmask) {
1721 int err = snd_pcm_hw_refine(pcm, params);
1725 return snd_pcm_hw_param_value(params, var, dir);
1727 EXPORT_SYMBOL(snd_pcm_hw_param_last);
1730 * snd_pcm_hw_params_bits - Get the number of bits per the sample.
1731 * @p: hardware parameters
1733 * Return: The number of bits per sample based on the format,
1734 * subformat and msbits the specified hw params has.
1736 int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p)
1738 snd_pcm_subformat_t subformat = params_subformat(p);
1739 snd_pcm_format_t format = params_format(p);
1742 case SNDRV_PCM_FORMAT_S32_LE:
1743 case SNDRV_PCM_FORMAT_U32_LE:
1744 case SNDRV_PCM_FORMAT_S32_BE:
1745 case SNDRV_PCM_FORMAT_U32_BE:
1746 switch (subformat) {
1747 case SNDRV_PCM_SUBFORMAT_MSBITS_20:
1749 case SNDRV_PCM_SUBFORMAT_MSBITS_24:
1751 case SNDRV_PCM_SUBFORMAT_MSBITS_MAX:
1752 case SNDRV_PCM_SUBFORMAT_STD:
1758 return snd_pcm_format_width(format);
1761 EXPORT_SYMBOL(snd_pcm_hw_params_bits);
1763 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1766 struct snd_pcm_runtime *runtime = substream->runtime;
1768 guard(pcm_stream_lock_irqsave)(substream);
1769 if (snd_pcm_running(substream) &&
1770 snd_pcm_update_hw_ptr(substream) >= 0)
1771 runtime->status->hw_ptr %= runtime->buffer_size;
1773 runtime->status->hw_ptr = 0;
1774 runtime->hw_ptr_wrap = 0;
1779 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1782 struct snd_pcm_channel_info *info = arg;
1783 struct snd_pcm_runtime *runtime = substream->runtime;
1785 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1789 width = snd_pcm_format_physical_width(runtime->format);
1793 switch (runtime->access) {
1794 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1795 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1796 info->first = info->channel * width;
1797 info->step = runtime->channels * width;
1799 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1800 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1802 size_t size = runtime->dma_bytes / runtime->channels;
1803 info->first = info->channel * size * 8;
1814 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1817 struct snd_pcm_hw_params *params = arg;
1818 snd_pcm_format_t format;
1822 params->fifo_size = substream->runtime->hw.fifo_size;
1823 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1824 format = params_format(params);
1825 channels = params_channels(params);
1826 frame_size = snd_pcm_format_size(format, channels);
1828 params->fifo_size /= frame_size;
1833 static int snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream *substream,
1836 static const unsigned char id[12] = { 0xff, 0xff, 0xff, 0xff,
1837 0xff, 0xff, 0xff, 0xff,
1838 0xff, 0xff, 0xff, 0xff };
1840 if (substream->runtime->std_sync_id)
1841 snd_pcm_set_sync_per_card(substream, arg, id, sizeof(id));
1846 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1847 * @substream: the pcm substream instance
1848 * @cmd: ioctl command
1849 * @arg: ioctl argument
1851 * Processes the generic ioctl commands for PCM.
1852 * Can be passed as the ioctl callback for PCM ops.
1854 * Return: Zero if successful, or a negative error code on failure.
1856 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1857 unsigned int cmd, void *arg)
1860 case SNDRV_PCM_IOCTL1_RESET:
1861 return snd_pcm_lib_ioctl_reset(substream, arg);
1862 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1863 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1864 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1865 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1866 case SNDRV_PCM_IOCTL1_SYNC_ID:
1867 return snd_pcm_lib_ioctl_sync_id(substream, arg);
1871 EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1874 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1875 * under acquired lock of PCM substream.
1876 * @substream: the instance of pcm substream.
1878 * This function is called when the batch of audio data frames as the same size as the period of
1879 * buffer is already processed in audio data transmission.
1881 * The call of function updates the status of runtime with the latest position of audio data
1882 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1883 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1884 * substream according to configured threshold.
1886 * The function is intended to use for the case that PCM driver operates audio data frames under
1887 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1888 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1889 * since lock of PCM substream should be acquired in advance.
1891 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1894 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1895 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1896 * - .get_time_info - to retrieve audio time stamp if needed.
1898 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1900 void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1902 struct snd_pcm_runtime *runtime;
1904 if (PCM_RUNTIME_CHECK(substream))
1906 runtime = substream->runtime;
1908 if (!snd_pcm_running(substream) ||
1909 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1912 #ifdef CONFIG_SND_PCM_TIMER
1913 if (substream->timer_running)
1914 snd_timer_interrupt(substream->timer, 1);
1917 snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1919 EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1922 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1924 * @substream: the instance of PCM substream.
1926 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1927 * acquiring lock of PCM substream voluntarily.
1929 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1930 * the batch of audio data frames as the same size as the period of buffer is already processed in
1931 * audio data transmission.
1933 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1935 if (snd_BUG_ON(!substream))
1938 guard(pcm_stream_lock_irqsave)(substream);
1939 snd_pcm_period_elapsed_under_stream_lock(substream);
1941 EXPORT_SYMBOL(snd_pcm_period_elapsed);
1944 * Wait until avail_min data becomes available
1945 * Returns a negative error code if any error occurs during operation.
1946 * The available space is stored on availp. When err = 0 and avail = 0
1947 * on the capture stream, it indicates the stream is in DRAINING state.
1949 static int wait_for_avail(struct snd_pcm_substream *substream,
1950 snd_pcm_uframes_t *availp)
1952 struct snd_pcm_runtime *runtime = substream->runtime;
1953 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1954 wait_queue_entry_t wait;
1956 snd_pcm_uframes_t avail = 0;
1957 long wait_time, tout;
1959 init_waitqueue_entry(&wait, current);
1960 set_current_state(TASK_INTERRUPTIBLE);
1961 add_wait_queue(&runtime->tsleep, &wait);
1963 if (runtime->no_period_wakeup)
1964 wait_time = MAX_SCHEDULE_TIMEOUT;
1966 /* use wait time from substream if available */
1967 if (substream->wait_time) {
1968 wait_time = substream->wait_time;
1972 if (runtime->rate) {
1973 long t = runtime->buffer_size * 1100 / runtime->rate;
1974 wait_time = max(t, wait_time);
1977 wait_time = msecs_to_jiffies(wait_time);
1981 if (signal_pending(current)) {
1987 * We need to check if space became available already
1988 * (and thus the wakeup happened already) first to close
1989 * the race of space already having become available.
1990 * This check must happen after been added to the waitqueue
1991 * and having current state be INTERRUPTIBLE.
1993 avail = snd_pcm_avail(substream);
1994 if (avail >= runtime->twake)
1996 snd_pcm_stream_unlock_irq(substream);
1998 tout = schedule_timeout(wait_time);
2000 snd_pcm_stream_lock_irq(substream);
2001 set_current_state(TASK_INTERRUPTIBLE);
2002 switch (runtime->state) {
2003 case SNDRV_PCM_STATE_SUSPENDED:
2006 case SNDRV_PCM_STATE_XRUN:
2009 case SNDRV_PCM_STATE_DRAINING:
2013 avail = 0; /* indicate draining */
2015 case SNDRV_PCM_STATE_OPEN:
2016 case SNDRV_PCM_STATE_SETUP:
2017 case SNDRV_PCM_STATE_DISCONNECTED:
2020 case SNDRV_PCM_STATE_PAUSED:
2024 pcm_dbg(substream->pcm,
2025 "%s timeout (DMA or IRQ trouble?)\n",
2026 is_playback ? "playback write" : "capture read");
2032 set_current_state(TASK_RUNNING);
2033 remove_wait_queue(&runtime->tsleep, &wait);
2038 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
2039 int channel, unsigned long hwoff,
2040 struct iov_iter *iter, unsigned long bytes);
2042 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
2043 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
2046 /* calculate the target DMA-buffer position to be written/read */
2047 static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
2048 int channel, unsigned long hwoff)
2050 return runtime->dma_area + hwoff +
2051 channel * (runtime->dma_bytes / runtime->channels);
2054 /* default copy ops for write; used for both interleaved and non- modes */
2055 static int default_write_copy(struct snd_pcm_substream *substream,
2056 int channel, unsigned long hwoff,
2057 struct iov_iter *iter, unsigned long bytes)
2059 if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2060 bytes, iter) != bytes)
2065 /* fill silence instead of copy data; called as a transfer helper
2066 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2067 * a NULL buffer is passed
2069 static int fill_silence(struct snd_pcm_substream *substream, int channel,
2070 unsigned long hwoff, struct iov_iter *iter,
2071 unsigned long bytes)
2073 struct snd_pcm_runtime *runtime = substream->runtime;
2075 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2077 if (substream->ops->fill_silence)
2078 return substream->ops->fill_silence(substream, channel,
2081 snd_pcm_format_set_silence(runtime->format,
2082 get_dma_ptr(runtime, channel, hwoff),
2083 bytes_to_samples(runtime, bytes));
2087 /* default copy ops for read; used for both interleaved and non- modes */
2088 static int default_read_copy(struct snd_pcm_substream *substream,
2089 int channel, unsigned long hwoff,
2090 struct iov_iter *iter, unsigned long bytes)
2092 if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2093 bytes, iter) != bytes)
2098 /* call transfer with the filled iov_iter */
2099 static int do_transfer(struct snd_pcm_substream *substream, int c,
2100 unsigned long hwoff, void *data, unsigned long bytes,
2101 pcm_transfer_f transfer, bool in_kernel)
2103 struct iov_iter iter;
2106 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2112 struct kvec kvec = { data, bytes };
2114 iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2115 return transfer(substream, c, hwoff, &iter, bytes);
2118 err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2121 return transfer(substream, c, hwoff, &iter, bytes);
2124 /* call transfer function with the converted pointers and sizes;
2125 * for interleaved mode, it's one shot for all samples
2127 static int interleaved_copy(struct snd_pcm_substream *substream,
2128 snd_pcm_uframes_t hwoff, void *data,
2129 snd_pcm_uframes_t off,
2130 snd_pcm_uframes_t frames,
2131 pcm_transfer_f transfer,
2134 struct snd_pcm_runtime *runtime = substream->runtime;
2136 /* convert to bytes */
2137 hwoff = frames_to_bytes(runtime, hwoff);
2138 off = frames_to_bytes(runtime, off);
2139 frames = frames_to_bytes(runtime, frames);
2141 return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2145 /* call transfer function with the converted pointers and sizes for each
2146 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2148 static int noninterleaved_copy(struct snd_pcm_substream *substream,
2149 snd_pcm_uframes_t hwoff, void *data,
2150 snd_pcm_uframes_t off,
2151 snd_pcm_uframes_t frames,
2152 pcm_transfer_f transfer,
2155 struct snd_pcm_runtime *runtime = substream->runtime;
2156 int channels = runtime->channels;
2160 /* convert to bytes; note that it's not frames_to_bytes() here.
2161 * in non-interleaved mode, we copy for each channel, thus
2162 * each copy is n_samples bytes x channels = whole frames.
2164 off = samples_to_bytes(runtime, off);
2165 frames = samples_to_bytes(runtime, frames);
2166 hwoff = samples_to_bytes(runtime, hwoff);
2167 for (c = 0; c < channels; ++c, ++bufs) {
2168 if (!data || !*bufs)
2169 err = fill_silence(substream, c, hwoff, NULL, frames);
2171 err = do_transfer(substream, c, hwoff, *bufs + off,
2172 frames, transfer, in_kernel);
2179 /* fill silence on the given buffer position;
2180 * called from snd_pcm_playback_silence()
2182 static int fill_silence_frames(struct snd_pcm_substream *substream,
2183 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2185 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2186 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2187 return interleaved_copy(substream, off, NULL, 0, frames,
2188 fill_silence, true);
2190 return noninterleaved_copy(substream, off, NULL, 0, frames,
2191 fill_silence, true);
2194 /* sanity-check for read/write methods */
2195 static int pcm_sanity_check(struct snd_pcm_substream *substream)
2197 struct snd_pcm_runtime *runtime;
2198 if (PCM_RUNTIME_CHECK(substream))
2200 runtime = substream->runtime;
2201 if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2203 if (runtime->state == SNDRV_PCM_STATE_OPEN)
2208 static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2210 switch (runtime->state) {
2211 case SNDRV_PCM_STATE_PREPARED:
2212 case SNDRV_PCM_STATE_RUNNING:
2213 case SNDRV_PCM_STATE_PAUSED:
2215 case SNDRV_PCM_STATE_XRUN:
2217 case SNDRV_PCM_STATE_SUSPENDED:
2224 /* update to the given appl_ptr and call ack callback if needed;
2225 * when an error is returned, take back to the original value
2227 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2228 snd_pcm_uframes_t appl_ptr)
2230 struct snd_pcm_runtime *runtime = substream->runtime;
2231 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2232 snd_pcm_sframes_t diff;
2235 if (old_appl_ptr == appl_ptr)
2238 if (appl_ptr >= runtime->boundary)
2241 * check if a rewind is requested by the application
2243 if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2244 diff = appl_ptr - old_appl_ptr;
2246 if (diff > runtime->buffer_size)
2249 if (runtime->boundary + diff > runtime->buffer_size)
2254 runtime->control->appl_ptr = appl_ptr;
2255 if (substream->ops->ack) {
2256 ret = substream->ops->ack(substream);
2258 runtime->control->appl_ptr = old_appl_ptr;
2260 __snd_pcm_xrun(substream);
2265 trace_applptr(substream, old_appl_ptr, appl_ptr);
2270 /* the common loop for read/write data */
2271 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2272 void *data, bool interleaved,
2273 snd_pcm_uframes_t size, bool in_kernel)
2275 struct snd_pcm_runtime *runtime = substream->runtime;
2276 snd_pcm_uframes_t xfer = 0;
2277 snd_pcm_uframes_t offset = 0;
2278 snd_pcm_uframes_t avail;
2280 pcm_transfer_f transfer;
2285 err = pcm_sanity_check(substream);
2289 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2291 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2292 runtime->channels > 1)
2294 writer = interleaved_copy;
2296 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2298 writer = noninterleaved_copy;
2303 transfer = fill_silence;
2307 if (substream->ops->copy)
2308 transfer = substream->ops->copy;
2310 transfer = is_playback ?
2311 default_write_copy : default_read_copy;
2317 nonblock = !!(substream->f_flags & O_NONBLOCK);
2319 snd_pcm_stream_lock_irq(substream);
2320 err = pcm_accessible_state(runtime);
2324 runtime->twake = runtime->control->avail_min ? : 1;
2325 if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2326 snd_pcm_update_hw_ptr(substream);
2329 * If size < start_threshold, wait indefinitely. Another
2330 * thread may start capture
2333 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2334 size >= runtime->start_threshold) {
2335 err = snd_pcm_start(substream);
2340 avail = snd_pcm_avail(substream);
2343 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2344 snd_pcm_uframes_t cont;
2347 runtime->state == SNDRV_PCM_STATE_DRAINING) {
2348 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2355 runtime->twake = min_t(snd_pcm_uframes_t, size,
2356 runtime->control->avail_min ? : 1);
2357 err = wait_for_avail(substream, &avail);
2361 continue; /* draining */
2363 frames = size > avail ? avail : size;
2364 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2365 appl_ofs = appl_ptr % runtime->buffer_size;
2366 cont = runtime->buffer_size - appl_ofs;
2369 if (snd_BUG_ON(!frames)) {
2373 if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2377 snd_pcm_stream_unlock_irq(substream);
2379 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2380 err = writer(substream, appl_ofs, data, offset, frames,
2381 transfer, in_kernel);
2383 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2384 snd_pcm_stream_lock_irq(substream);
2385 atomic_dec(&runtime->buffer_accessing);
2388 err = pcm_accessible_state(runtime);
2392 if (appl_ptr >= runtime->boundary)
2393 appl_ptr -= runtime->boundary;
2394 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2403 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2404 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2405 err = snd_pcm_start(substream);
2412 if (xfer > 0 && err >= 0)
2413 snd_pcm_update_state(substream, runtime);
2414 snd_pcm_stream_unlock_irq(substream);
2415 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2417 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2420 * standard channel mapping helpers
2423 /* default channel maps for multi-channel playbacks, up to 8 channels */
2424 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2426 .map = { SNDRV_CHMAP_MONO } },
2428 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2430 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2431 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2433 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2434 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2435 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2437 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2438 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2439 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2440 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2443 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2445 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2446 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2448 .map = { SNDRV_CHMAP_MONO } },
2450 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2452 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2453 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2455 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2456 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2457 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2459 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2460 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2461 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2462 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2465 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2467 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2469 if (ch > info->max_channels)
2471 return !info->channel_mask || (info->channel_mask & (1U << ch));
2474 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2475 struct snd_ctl_elem_info *uinfo)
2477 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2479 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2480 uinfo->count = info->max_channels;
2481 uinfo->value.integer.min = 0;
2482 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2486 /* get callback for channel map ctl element
2487 * stores the channel position firstly matching with the current channels
2489 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2490 struct snd_ctl_elem_value *ucontrol)
2492 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2493 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2494 struct snd_pcm_substream *substream;
2495 const struct snd_pcm_chmap_elem *map;
2499 substream = snd_pcm_chmap_substream(info, idx);
2502 memset(ucontrol->value.integer.value, 0,
2503 sizeof(long) * info->max_channels);
2504 if (!substream->runtime)
2505 return 0; /* no channels set */
2506 for (map = info->chmap; map->channels; map++) {
2508 if (map->channels == substream->runtime->channels &&
2509 valid_chmap_channels(info, map->channels)) {
2510 for (i = 0; i < map->channels; i++)
2511 ucontrol->value.integer.value[i] = map->map[i];
2518 /* tlv callback for channel map ctl element
2519 * expands the pre-defined channel maps in a form of TLV
2521 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2522 unsigned int size, unsigned int __user *tlv)
2524 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2525 const struct snd_pcm_chmap_elem *map;
2526 unsigned int __user *dst;
2533 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2537 for (map = info->chmap; map->channels; map++) {
2538 int chs_bytes = map->channels * 4;
2539 if (!valid_chmap_channels(info, map->channels))
2543 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2544 put_user(chs_bytes, dst + 1))
2549 if (size < chs_bytes)
2553 for (c = 0; c < map->channels; c++) {
2554 if (put_user(map->map[c], dst))
2559 if (put_user(count, tlv + 1))
2564 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2566 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2567 info->pcm->streams[info->stream].chmap_kctl = NULL;
2572 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2573 * @pcm: the assigned PCM instance
2574 * @stream: stream direction
2575 * @chmap: channel map elements (for query)
2576 * @max_channels: the max number of channels for the stream
2577 * @private_value: the value passed to each kcontrol's private_value field
2578 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2580 * Create channel-mapping control elements assigned to the given PCM stream(s).
2581 * Return: Zero if successful, or a negative error value.
2583 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2584 const struct snd_pcm_chmap_elem *chmap,
2586 unsigned long private_value,
2587 struct snd_pcm_chmap **info_ret)
2589 struct snd_pcm_chmap *info;
2590 struct snd_kcontrol_new knew = {
2591 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2592 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2593 SNDRV_CTL_ELEM_ACCESS_VOLATILE |
2594 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2595 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2596 .info = pcm_chmap_ctl_info,
2597 .get = pcm_chmap_ctl_get,
2598 .tlv.c = pcm_chmap_ctl_tlv,
2602 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2604 info = kzalloc(sizeof(*info), GFP_KERNEL);
2608 info->stream = stream;
2609 info->chmap = chmap;
2610 info->max_channels = max_channels;
2611 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2612 knew.name = "Playback Channel Map";
2614 knew.name = "Capture Channel Map";
2615 knew.device = pcm->device;
2616 knew.count = pcm->streams[stream].substream_count;
2617 knew.private_value = private_value;
2618 info->kctl = snd_ctl_new1(&knew, info);
2623 info->kctl->private_free = pcm_chmap_ctl_private_free;
2624 err = snd_ctl_add(pcm->card, info->kctl);
2627 pcm->streams[stream].chmap_kctl = info->kctl;
2632 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);