2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/irqdomain.h>
27 #include <linux/pm_domain.h>
28 #include <linux/platform_device.h>
29 #include <sound/designware_i2s.h>
30 #include <sound/pcm.h>
34 #include "amdgpu_acp.h"
36 #include "acp_gfx_if.h"
38 #define ACP_TILE_ON_MASK 0x03
39 #define ACP_TILE_OFF_MASK 0x02
40 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
41 #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
43 #define ACP_TILE_P1_MASK 0x3e
44 #define ACP_TILE_P2_MASK 0x3d
45 #define ACP_TILE_DSP0_MASK 0x3b
46 #define ACP_TILE_DSP1_MASK 0x37
48 #define ACP_TILE_DSP2_MASK 0x2f
50 #define ACP_DMA_REGS_END 0x146c0
51 #define ACP_I2S_PLAY_REGS_START 0x14840
52 #define ACP_I2S_PLAY_REGS_END 0x148b4
53 #define ACP_I2S_CAP_REGS_START 0x148b8
54 #define ACP_I2S_CAP_REGS_END 0x1496c
56 #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
57 #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
61 #define mmACP_PGFSM_RETAIN_REG 0x51c9
62 #define mmACP_PGFSM_CONFIG_REG 0x51ca
63 #define mmACP_PGFSM_READ_REG_0 0x51cc
65 #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
66 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
67 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
68 #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
70 #define mmACP_CONTROL 0x5131
71 #define mmACP_STATUS 0x5133
72 #define mmACP_SOFT_RESET 0x5134
73 #define ACP_CONTROL__ClkEn_MASK 0x1
74 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
75 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
76 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
77 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
79 #define ACP_TIMEOUT_LOOP 0x000000FF
81 #define ACP_SRC_ID 162
91 static int acp_sw_init(void *handle)
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
95 adev->acp.parent = adev->dev;
97 adev->acp.cgs_device =
98 amdgpu_cgs_create_device(adev);
99 if (!adev->acp.cgs_device)
105 static int acp_sw_fini(void *handle)
107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
109 if (adev->acp.cgs_device)
110 amdgpu_cgs_destroy_device(adev->acp.cgs_device);
115 /* power off a tile/block within ACP */
116 static int acp_suspend_tile(void *cgs_dev, int tile)
121 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
122 pr_err("Invalid ACP tile : %d to suspend\n", tile);
126 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
127 val &= ACP_TILE_ON_MASK;
130 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
131 val = val | (1 << tile);
132 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
133 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
136 count = ACP_TIMEOUT_LOOP;
138 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
140 val = val & ACP_TILE_ON_MASK;
141 if (val == ACP_TILE_OFF_MASK)
144 pr_err("Timeout reading ACP PGFSM status\n");
150 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
152 val |= ACP_TILE_OFF_RETAIN_REG_MASK;
153 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
158 /* power on a tile/block within ACP */
159 static int acp_resume_tile(void *cgs_dev, int tile)
164 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
165 pr_err("Invalid ACP tile to resume\n");
169 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
170 val = val & ACP_TILE_ON_MASK;
173 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
175 count = ACP_TIMEOUT_LOOP;
177 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
179 val = val & ACP_TILE_ON_MASK;
183 pr_err("Timeout reading ACP PGFSM status\n");
188 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
189 if (tile == ACP_TILE_P1)
190 val = val & (ACP_TILE_P1_MASK);
191 else if (tile == ACP_TILE_P2)
192 val = val & (ACP_TILE_P2_MASK);
194 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
199 struct acp_pm_domain {
201 struct generic_pm_domain gpd;
204 static int acp_poweroff(struct generic_pm_domain *genpd)
207 struct acp_pm_domain *apd;
209 apd = container_of(genpd, struct acp_pm_domain, gpd);
211 /* Donot return abruptly if any of power tile fails to suspend.
212 * Log it and continue powering off other tile
214 for (i = 4; i >= 0 ; i--) {
215 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
217 pr_err("ACP tile %d tile suspend failed\n", i);
223 static int acp_poweron(struct generic_pm_domain *genpd)
226 struct acp_pm_domain *apd;
228 apd = container_of(genpd, struct acp_pm_domain, gpd);
230 for (i = 0; i < 2; i++) {
231 ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
233 pr_err("ACP tile %d resume failed\n", i);
238 /* Disable DSPs which are not going to be used */
239 for (i = 0; i < 3; i++) {
240 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
241 /* Continue suspending other DSP, even if one fails */
243 pr_err("ACP DSP %d suspend failed\n", i);
249 static struct device *get_mfd_cell_dev(const char *device_name, int r)
251 char auto_dev_name[25];
254 snprintf(auto_dev_name, sizeof(auto_dev_name),
255 "%s.%d.auto", device_name, r);
256 dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
257 dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
263 * acp_hw_init - start and test ACP block
265 * @adev: amdgpu_device pointer
268 static int acp_hw_init(void *handle)
275 struct i2s_platform_data *i2s_pdata;
277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
279 const struct amdgpu_ip_block *ip_block =
280 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
285 r = amd_acp_hw_init(adev->acp.cgs_device,
286 ip_block->version->major, ip_block->version->minor);
287 /* -ENODEV means board uses AZ rather than ACP */
293 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
296 acp_base = adev->rmmio_base;
298 if (adev->asic_type != CHIP_STONEY) {
299 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
300 if (adev->acp.acp_genpd == NULL)
303 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
304 adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
305 adev->acp.acp_genpd->gpd.power_on = acp_poweron;
308 adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
310 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
313 adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
316 if (adev->acp.acp_cell == NULL)
319 adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
321 if (adev->acp.acp_res == NULL) {
322 kfree(adev->acp.acp_cell);
326 i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
327 if (i2s_pdata == NULL) {
328 kfree(adev->acp.acp_res);
329 kfree(adev->acp.acp_cell);
333 switch (adev->asic_type) {
335 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
336 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
339 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
341 i2s_pdata[0].cap = DWC_I2S_PLAY;
342 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
343 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
344 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
345 switch (adev->asic_type) {
347 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
348 DW_I2S_QUIRK_COMP_PARAM1 |
349 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
352 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
353 DW_I2S_QUIRK_COMP_PARAM1;
356 i2s_pdata[1].cap = DWC_I2S_RECORD;
357 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
358 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
359 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
361 adev->acp.acp_res[0].name = "acp2x_dma";
362 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
363 adev->acp.acp_res[0].start = acp_base;
364 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
366 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
367 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
368 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
369 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
371 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
372 adev->acp.acp_res[2].flags = IORESOURCE_MEM;
373 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
374 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
376 adev->acp.acp_res[3].name = "acp2x_dma_irq";
377 adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
378 adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
379 adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
381 adev->acp.acp_cell[0].name = "acp_audio_dma";
382 adev->acp.acp_cell[0].num_resources = 4;
383 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
384 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
385 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
387 adev->acp.acp_cell[1].name = "designware-i2s";
388 adev->acp.acp_cell[1].num_resources = 1;
389 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
390 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
391 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
393 adev->acp.acp_cell[2].name = "designware-i2s";
394 adev->acp.acp_cell[2].num_resources = 1;
395 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
396 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
397 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
399 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
404 if (adev->asic_type != CHIP_STONEY) {
405 for (i = 0; i < ACP_DEVS ; i++) {
406 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
407 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
409 dev_err(dev, "Failed to add dev to genpd\n");
415 /* Assert Soft reset of ACP */
416 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
418 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
419 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
421 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
423 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
424 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
425 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
428 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
433 /* Enable clock to ACP and wait until the clock is enabled */
434 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
435 val = val | ACP_CONTROL__ClkEn_MASK;
436 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
438 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
441 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
445 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
450 /* Deassert the SOFT RESET flags */
451 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
452 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
453 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
459 * acp_hw_fini - stop the hardware block
461 * @adev: amdgpu_device pointer
464 static int acp_hw_fini(void *handle)
470 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472 /* return early if no ACP */
473 if (!adev->acp.acp_cell)
476 /* Assert Soft reset of ACP */
477 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
479 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
480 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
482 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
484 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
485 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
486 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
489 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
494 /* Disable ACP clock */
495 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
496 val &= ~ACP_CONTROL__ClkEn_MASK;
497 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
499 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
502 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
506 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
512 if (adev->acp.acp_genpd) {
513 for (i = 0; i < ACP_DEVS ; i++) {
514 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
515 ret = pm_genpd_remove_device(dev);
516 /* If removal fails, dont giveup and try rest */
518 dev_err(dev, "remove dev from genpd failed\n");
520 kfree(adev->acp.acp_genpd);
523 mfd_remove_devices(adev->acp.parent);
524 kfree(adev->acp.acp_res);
525 kfree(adev->acp.acp_cell);
530 static int acp_suspend(void *handle)
535 static int acp_resume(void *handle)
540 static int acp_early_init(void *handle)
545 static bool acp_is_idle(void *handle)
550 static int acp_wait_for_idle(void *handle)
555 static int acp_soft_reset(void *handle)
560 static int acp_set_clockgating_state(void *handle,
561 enum amd_clockgating_state state)
566 static int acp_set_powergating_state(void *handle,
567 enum amd_powergating_state state)
572 static const struct amd_ip_funcs acp_ip_funcs = {
574 .early_init = acp_early_init,
576 .sw_init = acp_sw_init,
577 .sw_fini = acp_sw_fini,
578 .hw_init = acp_hw_init,
579 .hw_fini = acp_hw_fini,
580 .suspend = acp_suspend,
581 .resume = acp_resume,
582 .is_idle = acp_is_idle,
583 .wait_for_idle = acp_wait_for_idle,
584 .soft_reset = acp_soft_reset,
585 .set_clockgating_state = acp_set_clockgating_state,
586 .set_powergating_state = acp_set_powergating_state,
589 const struct amdgpu_ip_block_version acp_ip_block =
591 .type = AMD_IP_BLOCK_TYPE_ACP,
595 .funcs = &acp_ip_funcs,