This patch adds calls to pm_runtime_get/put to ensure that any access to
I2S registers is done with proper (active) runtime PM state of I2S device.
Till now the driver enabled runtime PM, but didn't manage the state during
driver operation. The driver worked fine only because the runtime PM
callbacks managed device clock, which was enabled all the time because of
the additional enable call in the driver's probe function.
Signed-off-by: Marek Szyprowski <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
u32 mod, mask, val = 0;
unsigned long flags;
unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
u32 mod, mask, val = 0;
unsigned long flags;
+ int ret = 0;
+
+ pm_runtime_get_sync(dai->dev);
spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
&& (mod & cdcon_mask))))) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
&& (mod & cdcon_mask))))) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
+ ret = -EAGAIN;
+ goto err;
}
if (dir == SND_SOC_CLOCK_IN)
}
if (dir == SND_SOC_CLOCK_IN)
} else {
i2s->rclk_srcrate =
clk_get_rate(i2s->op_clk);
} else {
i2s->rclk_srcrate =
clk_get_rate(i2s->op_clk);
i2s->op_clk = clk_get(&i2s->pdev->dev,
"i2s_opclk0");
i2s->op_clk = clk_get(&i2s->pdev->dev,
"i2s_opclk0");
- if (WARN_ON(IS_ERR(i2s->op_clk)))
- return PTR_ERR(i2s->op_clk);
+ if (WARN_ON(IS_ERR(i2s->op_clk))) {
+ ret = PTR_ERR(i2s->op_clk);
+ goto err;
+ }
clk_prepare_enable(i2s->op_clk);
i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
clk_prepare_enable(i2s->op_clk);
i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
|| (clk_id && !(mod & rsrc_mask))) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
|| (clk_id && !(mod & rsrc_mask))) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
+ ret = -EAGAIN;
+ goto err;
} else {
/* Call can't be on the active DAI */
i2s->op_clk = other->op_clk;
i2s->rclk_srcrate = other->rclk_srcrate;
} else {
/* Call can't be on the active DAI */
i2s->op_clk = other->op_clk;
i2s->rclk_srcrate = other->rclk_srcrate;
break;
default:
dev_err(&i2s->pdev->dev, "We don't serve that!\n");
break;
default:
dev_err(&i2s->pdev->dev, "We don't serve that!\n");
+ ret = -EINVAL;
+ goto err;
}
spin_lock_irqsave(i2s->lock, flags);
}
spin_lock_irqsave(i2s->lock, flags);
mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD);
spin_unlock_irqrestore(i2s->lock, flags);
mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD);
spin_unlock_irqrestore(i2s->lock, flags);
+done:
+ pm_runtime_put(dai->dev);
+err:
+ pm_runtime_put(dai->dev);
+ return ret;
}
static int i2s_set_fmt(struct snd_soc_dai *dai,
}
static int i2s_set_fmt(struct snd_soc_dai *dai,
+ pm_runtime_get_sync(dai->dev);
spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
/*
spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
/*
if (any_active(i2s) &&
((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
spin_unlock_irqrestore(i2s->lock, flags);
if (any_active(i2s) &&
((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
spin_unlock_irqrestore(i2s->lock, flags);
+ pm_runtime_put(dai->dev);
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
mod |= tmp;
writel(mod, i2s->addr + I2SMOD);
spin_unlock_irqrestore(i2s->lock, flags);
mod |= tmp;
writel(mod, i2s->addr + I2SMOD);
spin_unlock_irqrestore(i2s->lock, flags);
+ pm_runtime_put(dai->dev);
u32 mod, mask = 0, val = 0;
unsigned long flags;
u32 mod, mask = 0, val = 0;
unsigned long flags;
+ WARN_ON(!pm_runtime_active(dai->dev));
+
if (!is_secondary(i2s))
mask |= (MOD_DC2_EN | MOD_DC1_EN);
if (!is_secondary(i2s))
mask |= (MOD_DC2_EN | MOD_DC1_EN);
struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
+ pm_runtime_get_sync(dai->dev);
+
spin_lock_irqsave(&lock, flags);
i2s->mode |= DAI_OPENED;
spin_lock_irqsave(&lock, flags);
i2s->mode |= DAI_OPENED;
i2s->bfs = 0;
spin_unlock_irqrestore(&lock, flags);
i2s->bfs = 0;
spin_unlock_irqrestore(&lock, flags);
+
+ pm_runtime_put(dai->dev);
}
static int config_setup(struct i2s_dai *i2s)
}
static int config_setup(struct i2s_dai *i2s)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ pm_runtime_get_sync(dai->dev);
spin_lock_irqsave(i2s->lock, flags);
if (config_setup(i2s)) {
spin_lock_irqsave(i2s->lock, flags);
if (config_setup(i2s)) {
}
spin_unlock_irqrestore(i2s->lock, flags);
}
spin_unlock_irqrestore(i2s->lock, flags);
+ pm_runtime_put(dai->dev);
switch (div_id) {
case SAMSUNG_I2S_DIV_BCLK:
switch (div_id) {
case SAMSUNG_I2S_DIV_BCLK:
+ pm_runtime_get_sync(dai->dev);
if ((any_active(i2s) && div && (get_bfs(i2s) != div))
|| (other && other->bfs && (other->bfs != div))) {
if ((any_active(i2s) && div && (get_bfs(i2s) != div))
|| (other && other->bfs && (other->bfs != div))) {
+ pm_runtime_put(dai->dev);
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
}
i2s->bfs = div;
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
}
i2s->bfs = div;
+ pm_runtime_put(dai->dev);
break;
default:
dev_err(&i2s->pdev->dev,
break;
default:
dev_err(&i2s->pdev->dev,
snd_pcm_sframes_t delay;
const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
snd_pcm_sframes_t delay;
const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
+ WARN_ON(!pm_runtime_active(dai->dev));
+
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
delay = FIC_RXCOUNT(reg);
else if (is_secondary(i2s))
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
delay = FIC_RXCOUNT(reg);
else if (is_secondary(i2s))
struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
+ pm_runtime_get_sync(dai->dev);
+
if (is_secondary(i2s)) { /* If this is probe on the secondary DAI */
snd_soc_dai_init_dma_data(dai, &other->sec_dai->dma_playback,
NULL);
if (is_secondary(i2s)) { /* If this is probe on the secondary DAI */
snd_soc_dai_init_dma_data(dai, &other->sec_dai->dma_playback,
NULL);
if (!is_opened(other))
i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
0, SND_SOC_CLOCK_IN);
if (!is_opened(other))
i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
0, SND_SOC_CLOCK_IN);
+ pm_runtime_put(dai->dev);
struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
unsigned long flags;
struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
unsigned long flags;
+ pm_runtime_get_sync(dai->dev);
+
if (!is_secondary(i2s)) {
if (i2s->quirks & QUIRK_NEED_RSTCLR) {
spin_lock_irqsave(i2s->lock, flags);
if (!is_secondary(i2s)) {
if (i2s->quirks & QUIRK_NEED_RSTCLR) {
spin_lock_irqsave(i2s->lock, flags);
+ pm_runtime_put(dai->dev);
+
dev_set_drvdata(&pdev->dev, pri_dai);
dev_set_drvdata(&pdev->dev, pri_dai);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = i2s_register_clock_provider(pdev);
pm_runtime_enable(&pdev->dev);
ret = i2s_register_clock_provider(pdev);
pri_dai->sec_dai = NULL;
sec_dai->pri_dai = NULL;
pri_dai->sec_dai = NULL;
sec_dai->pri_dai = NULL;
+ pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
i2s_unregister_clock_provider(pdev);
clk_disable_unprepare(pri_dai->clk);
pm_runtime_disable(&pdev->dev);
i2s_unregister_clock_provider(pdev);
clk_disable_unprepare(pri_dai->clk);
+ pm_runtime_put_noidle(&pdev->dev);