2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "ppatomctrl.h"
28 #include "ivsrcid/thm/irqsrcs_thm_9_0.h"
29 #include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
30 #include "ivsrcid/ivsrcid_vislands30.h"
32 uint8_t convert_to_vid(uint16_t vddc)
34 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
37 uint16_t convert_to_vddc(uint8_t vid)
39 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
42 int phm_copy_clock_limits_array(
43 struct pp_hwmgr *hwmgr,
44 uint32_t **pptable_info_array,
45 const uint32_t *pptable_array,
46 uint32_t power_saving_clock_count)
48 uint32_t array_size, i;
51 array_size = sizeof(uint32_t) * power_saving_clock_count;
52 table = kzalloc(array_size, GFP_KERNEL);
56 for (i = 0; i < power_saving_clock_count; i++)
57 table[i] = le32_to_cpu(pptable_array[i]);
59 *pptable_info_array = table;
64 int phm_copy_overdrive_settings_limits_array(
65 struct pp_hwmgr *hwmgr,
66 uint32_t **pptable_info_array,
67 const uint32_t *pptable_array,
68 uint32_t od_setting_count)
70 uint32_t array_size, i;
73 array_size = sizeof(uint32_t) * od_setting_count;
74 table = kzalloc(array_size, GFP_KERNEL);
78 for (i = 0; i < od_setting_count; i++)
79 table[i] = le32_to_cpu(pptable_array[i]);
81 *pptable_info_array = table;
86 uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
91 shift = (offset % 4) << 3;
92 if (size == sizeof(uint8_t))
94 else if (size == sizeof(uint16_t))
95 mask = 0xFFFF << shift;
97 original_data &= ~mask;
98 original_data |= (field << shift);
103 * Returns once the part of the register indicated by the mask has
104 * reached the given value.
106 int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
107 uint32_t value, uint32_t mask)
112 if (hwmgr == NULL || hwmgr->device == NULL) {
113 pr_err("Invalid Hardware Manager!");
117 for (i = 0; i < hwmgr->usec_timeout; i++) {
118 cur_value = cgs_read_register(hwmgr->device, index);
119 if ((cur_value & mask) == (value & mask))
124 /* timeout means wrong logic*/
125 if (i == hwmgr->usec_timeout)
132 * Returns once the part of the register indicated by the mask has
133 * reached the given value.The indirect space is described by giving
134 * the memory-mapped index of the indirect index register.
136 int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
137 uint32_t indirect_port,
142 if (hwmgr == NULL || hwmgr->device == NULL) {
143 pr_err("Invalid Hardware Manager!");
147 cgs_write_register(hwmgr->device, indirect_port, index);
148 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
151 int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
153 uint32_t value, uint32_t mask)
158 if (hwmgr == NULL || hwmgr->device == NULL)
161 for (i = 0; i < hwmgr->usec_timeout; i++) {
162 cur_value = cgs_read_register(hwmgr->device,
164 if ((cur_value & mask) != (value & mask))
169 /* timeout means wrong logic */
170 if (i == hwmgr->usec_timeout)
175 int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
176 uint32_t indirect_port,
181 if (hwmgr == NULL || hwmgr->device == NULL)
184 cgs_write_register(hwmgr->device, indirect_port, index);
185 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
189 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
191 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
194 bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
196 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
200 int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
205 struct pp_atomctrl_voltage_table *table;
207 PP_ASSERT_WITH_CODE((NULL != vol_table),
208 "Voltage Table empty.", return -EINVAL);
210 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
216 table->mask_low = vol_table->mask_low;
217 table->phase_delay = vol_table->phase_delay;
219 for (i = 0; i < vol_table->count; i++) {
220 vvalue = vol_table->entries[i].value;
223 for (j = 0; j < table->count; j++) {
224 if (vvalue == table->entries[j].value) {
231 table->entries[table->count].value = vvalue;
232 table->entries[table->count].smio_low =
233 vol_table->entries[i].smio_low;
238 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
244 int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
245 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
250 PP_ASSERT_WITH_CODE((0 != dep_table->count),
251 "Voltage Dependency Table empty.", return -EINVAL);
253 PP_ASSERT_WITH_CODE((NULL != vol_table),
254 "vol_table empty.", return -EINVAL);
256 vol_table->mask_low = 0;
257 vol_table->phase_delay = 0;
258 vol_table->count = dep_table->count;
260 for (i = 0; i < dep_table->count; i++) {
261 vol_table->entries[i].value = dep_table->entries[i].mvdd;
262 vol_table->entries[i].smio_low = 0;
265 result = phm_trim_voltage_table(vol_table);
266 PP_ASSERT_WITH_CODE((0 == result),
267 "Failed to trim MVDD table.", return result);
272 int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
273 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
278 PP_ASSERT_WITH_CODE((0 != dep_table->count),
279 "Voltage Dependency Table empty.", return -EINVAL);
281 PP_ASSERT_WITH_CODE((NULL != vol_table),
282 "vol_table empty.", return -EINVAL);
284 vol_table->mask_low = 0;
285 vol_table->phase_delay = 0;
286 vol_table->count = dep_table->count;
288 for (i = 0; i < dep_table->count; i++) {
289 vol_table->entries[i].value = dep_table->entries[i].vddci;
290 vol_table->entries[i].smio_low = 0;
293 result = phm_trim_voltage_table(vol_table);
294 PP_ASSERT_WITH_CODE((0 == result),
295 "Failed to trim VDDCI table.", return result);
300 int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
301 phm_ppt_v1_voltage_lookup_table *lookup_table)
305 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
306 "Voltage Lookup Table empty.", return -EINVAL);
308 PP_ASSERT_WITH_CODE((NULL != vol_table),
309 "vol_table empty.", return -EINVAL);
311 vol_table->mask_low = 0;
312 vol_table->phase_delay = 0;
314 vol_table->count = lookup_table->count;
316 for (i = 0; i < vol_table->count; i++) {
317 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
318 vol_table->entries[i].smio_low = 0;
324 void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
325 struct pp_atomctrl_voltage_table *vol_table)
327 unsigned int i, diff;
329 if (vol_table->count <= max_vol_steps)
332 diff = vol_table->count - max_vol_steps;
334 for (i = 0; i < max_vol_steps; i++)
335 vol_table->entries[i] = vol_table->entries[i + diff];
337 vol_table->count = max_vol_steps;
342 int phm_reset_single_dpm_table(void *table,
343 uint32_t count, int max)
347 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
349 dpm_table->count = count > max ? max : count;
351 for (i = 0; i < dpm_table->count; i++)
352 dpm_table->dpm_level[i].enabled = false;
357 void phm_setup_pcie_table_entry(
359 uint32_t index, uint32_t pcie_gen,
362 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
363 dpm_table->dpm_level[index].value = pcie_gen;
364 dpm_table->dpm_level[index].param1 = pcie_lanes;
365 dpm_table->dpm_level[index].enabled = 1;
368 int32_t phm_get_dpm_level_enable_mask_value(void *table)
372 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
374 for (i = dpm_table->count; i > 0; i--) {
376 if (dpm_table->dpm_level[i - 1].enabled)
385 uint8_t phm_get_voltage_index(
386 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
388 uint8_t count = (uint8_t) (lookup_table->count);
391 PP_ASSERT_WITH_CODE((NULL != lookup_table),
392 "Lookup Table empty.", return 0);
393 PP_ASSERT_WITH_CODE((0 != count),
394 "Lookup Table empty.", return 0);
396 for (i = 0; i < lookup_table->count; i++) {
397 /* find first voltage equal or bigger than requested */
398 if (lookup_table->entries[i].us_vdd >= voltage)
401 /* voltage is bigger than max voltage in the table */
405 uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
408 uint8_t count = (uint8_t) (voltage_table->count);
411 PP_ASSERT_WITH_CODE((NULL != voltage_table),
412 "Voltage Table empty.", return 0;);
413 PP_ASSERT_WITH_CODE((0 != count),
414 "Voltage Table empty.", return 0;);
416 for (i = 0; i < count; i++) {
417 /* find first voltage bigger than requested */
418 if (voltage_table->entries[i].value >= voltage)
422 /* voltage is bigger than max voltage in the table */
426 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
430 for (i = 0; i < vddci_table->count; i++) {
431 if (vddci_table->entries[i].value >= vddci)
432 return vddci_table->entries[i].value;
435 pr_debug("vddci is larger than max value in vddci_table\n");
436 return vddci_table->entries[i-1].value;
439 int phm_find_boot_level(void *table,
440 uint32_t value, uint32_t *boot_level)
442 int result = -EINVAL;
444 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
446 for (i = 0; i < dpm_table->count; i++) {
447 if (value == dpm_table->dpm_level[i].value) {
456 int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
457 phm_ppt_v1_voltage_lookup_table *lookup_table,
458 uint16_t virtual_voltage_id, int32_t *sclk)
462 struct phm_ppt_v1_information *table_info =
463 (struct phm_ppt_v1_information *)(hwmgr->pptable);
465 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
467 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
468 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
469 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
470 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
474 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
475 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
479 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
485 * Initialize Dynamic State Adjustment Rule Settings
487 * @param hwmgr the address of the powerplay hardware manager.
489 int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
492 struct phm_clock_voltage_dependency_table *table_clk_vlt;
493 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
495 /* initialize vddc_dep_on_dal_pwrl table */
496 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
497 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
499 if (NULL == table_clk_vlt) {
500 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
503 table_clk_vlt->count = 4;
504 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
505 table_clk_vlt->entries[0].v = 0;
506 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
507 table_clk_vlt->entries[1].v = 720;
508 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
509 table_clk_vlt->entries[2].v = 810;
510 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
511 table_clk_vlt->entries[3].v = 900;
512 if (pptable_info != NULL)
513 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
514 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
520 uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
524 while (0 == (mask & (1 << level)))
530 void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
532 struct phm_ppt_v1_information *table_info =
533 (struct phm_ppt_v1_information *)hwmgr->pptable;
534 struct phm_clock_voltage_dependency_table *table =
535 table_info->vddc_dep_on_dal_pwrl;
536 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
537 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
538 uint32_t req_vddc = 0, req_volt, i;
540 if (!table || table->count <= 0
541 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
542 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
545 for (i = 0; i < table->count; i++) {
546 if (dal_power_level == table->entries[i].clk) {
547 req_vddc = table->entries[i].v;
552 vddc_table = table_info->vdd_dep_on_sclk;
553 for (i = 0; i < vddc_table->count; i++) {
554 if (req_vddc <= vddc_table->entries[i].vddc) {
555 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
556 smum_send_msg_to_smc_with_parameter(hwmgr,
557 PPSMC_MSG_VddC_Request, req_volt);
561 pr_err("DAL requested level can not"
562 " found a available voltage in VDDC DPM Table \n");
565 int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
566 uint32_t sclk, uint16_t id, uint16_t *voltage)
571 if (hwmgr->chip_id < CHIP_TONGA) {
572 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
573 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
574 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
575 if (*voltage >= 2000 || *voltage == 0)
578 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
579 *voltage = (uint16_t)(vol/100);
585 int phm_irq_process(struct amdgpu_device *adev,
586 struct amdgpu_irq_src *source,
587 struct amdgpu_iv_entry *entry)
589 uint32_t client_id = entry->client_id;
590 uint32_t src_id = entry->src_id;
592 if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
593 if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
594 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
595 PCI_BUS_NUM(adev->pdev->devfn),
596 PCI_SLOT(adev->pdev->devfn),
597 PCI_FUNC(adev->pdev->devfn));
598 else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
599 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
600 PCI_BUS_NUM(adev->pdev->devfn),
601 PCI_SLOT(adev->pdev->devfn),
602 PCI_FUNC(adev->pdev->devfn));
603 else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
604 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
605 PCI_BUS_NUM(adev->pdev->devfn),
606 PCI_SLOT(adev->pdev->devfn),
607 PCI_FUNC(adev->pdev->devfn));
608 } else if (client_id == SOC15_IH_CLIENTID_THM) {
610 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
611 PCI_BUS_NUM(adev->pdev->devfn),
612 PCI_SLOT(adev->pdev->devfn),
613 PCI_FUNC(adev->pdev->devfn));
615 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
616 PCI_BUS_NUM(adev->pdev->devfn),
617 PCI_SLOT(adev->pdev->devfn),
618 PCI_FUNC(adev->pdev->devfn));
619 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
620 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
621 PCI_BUS_NUM(adev->pdev->devfn),
622 PCI_SLOT(adev->pdev->devfn),
623 PCI_FUNC(adev->pdev->devfn));
628 static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
629 .process = phm_irq_process,
632 int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
634 struct amdgpu_irq_src *source =
635 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
640 source->funcs = &smu9_irq_funcs;
642 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
643 SOC15_IH_CLIENTID_THM,
644 THM_9_0__SRCID__THM_DIG_THERM_L2H,
646 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
647 SOC15_IH_CLIENTID_THM,
648 THM_9_0__SRCID__THM_DIG_THERM_H2L,
651 /* Register CTF(GPIO_19) interrupt */
652 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
653 SOC15_IH_CLIENTID_ROM_SMUIO,
654 SMUIO_9_0__SRCID__SMUIO_GPIO19,
660 void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
661 uint8_t *frev, uint8_t *crev)
663 struct amdgpu_device *adev = dev;
666 if (amdgpu_atom_parse_data_header(
667 adev->mode_info.atom_context, table, size,
668 frev, crev, &data_start))
669 return (uint8_t *)adev->mode_info.atom_context->bios +
675 int smu_get_voltage_dependency_table_ppt_v1(
676 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
677 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
680 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
681 "Voltage Lookup Table empty",
684 dep_table->count = allowed_dep_table->count;
685 for (i=0; i<dep_table->count; i++) {
686 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
687 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
688 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
689 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
690 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
691 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
692 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
693 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
694 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
695 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
701 int smu_set_watermarks_for_clocks_ranges(void *wt_table,
702 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
705 struct watermarks *table = wt_table;
707 if (!table || !wm_with_clock_ranges)
710 if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
713 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
714 table->WatermarkRow[1][i].MinClock =
715 cpu_to_le16((uint16_t)
716 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
718 table->WatermarkRow[1][i].MaxClock =
719 cpu_to_le16((uint16_t)
720 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
722 table->WatermarkRow[1][i].MinUclk =
723 cpu_to_le16((uint16_t)
724 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
726 table->WatermarkRow[1][i].MaxUclk =
727 cpu_to_le16((uint16_t)
728 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
730 table->WatermarkRow[1][i].WmSetting = (uint8_t)
731 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
734 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
735 table->WatermarkRow[0][i].MinClock =
736 cpu_to_le16((uint16_t)
737 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
739 table->WatermarkRow[0][i].MaxClock =
740 cpu_to_le16((uint16_t)
741 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
743 table->WatermarkRow[0][i].MinUclk =
744 cpu_to_le16((uint16_t)
745 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
747 table->WatermarkRow[0][i].MaxUclk =
748 cpu_to_le16((uint16_t)
749 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
751 table->WatermarkRow[0][i].WmSetting = (uint8_t)
752 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;