}
clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
++ ++
++ ++ trace_clk_rate_request_start(&parent_req);
++ ++
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
return ret;
++ ++ trace_clk_rate_request_done(&parent_req);
++ ++
best = parent_req.rate;
} else if (parent) {
best = clk_core_get_rate_nolock(parent);
struct clk_rate_request parent_req;
clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
++ ++
++ ++ trace_clk_rate_request_start(&parent_req);
++ ++
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
continue;
++ ++ trace_clk_rate_request_done(&parent_req);
++ ++
parent_rate = parent_req.rate;
} else {
parent_rate = clk_core_get_rate_nolock(parent);
{
struct clk_core *parent;
---- if (WARN_ON(!core || !req))
++++ if (WARN_ON(!req))
return;
memset(req, 0, sizeof(*req));
++++ req->max_rate = ULONG_MAX;
+
++++ if (!core)
++++ return;
+ ++
++ ++ req->core = core;
req->rate = rate;
clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
struct clk_rate_request parent_req;
clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
++ ++
++ ++ trace_clk_rate_request_start(&parent_req);
++ ++
ret = clk_core_round_rate_nolock(core->parent, &parent_req);
if (ret)
return ret;
++ ++ trace_clk_rate_request_done(&parent_req);
++ ++
req->best_parent_rate = parent_req.rate;
req->rate = parent_req.rate;
clk_core_init_rate_req(hw->core, &req, rate);
++ ++ trace_clk_rate_request_start(&req);
++ ++
ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret)
return 0;
++ ++ trace_clk_rate_request_done(&req);
++ ++
return req.rate;
}
EXPORT_SYMBOL_GPL(clk_hw_round_rate);
clk_core_init_rate_req(clk->core, &req, rate);
++ ++ trace_clk_rate_request_start(&req);
++ ++
ret = clk_core_round_rate_nolock(clk->core, &req);
++ ++ trace_clk_rate_request_done(&req);
++ ++
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_core_init_rate_req(core, &req, rate);
++ ++ trace_clk_rate_request_start(&req);
++ ++
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
return NULL;
++ ++ trace_clk_rate_request_done(&req);
++ ++
best_parent_rate = req.best_parent_rate;
new_rate = req.rate;
parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
clk_core_init_rate_req(core, &req, req_rate);
++ ++ trace_clk_rate_request_start(&req);
++ ++
ret = clk_core_round_rate_nolock(core, &req);
++ ++ trace_clk_rate_request_done(&req);
++ ++
/* restore the protection */
clk_core_rate_restore_protect(core, cnt);
FACTOR(CLK_TOP_IN_DGI_D4, "in_dgi_d4", "in_dgi", 1, 4),
FACTOR(CLK_TOP_IN_DGI_D6, "in_dgi_d6", "in_dgi", 1, 6),
FACTOR(CLK_TOP_IN_DGI_D8, "in_dgi_d8", "in_dgi", 1, 8),
- --- FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
- --- FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
- --- FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll_d4", 1, 2),
- --- FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll_d4", 1, 4),
- --- FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll_d4", 1, 8),
- --- FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
- --- FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2),
- --- FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4),
- --- FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll_d5", 1, 8),
- --- FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
- --- FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll_d6", 1, 2),
- --- FACTOR(CLK_TOP_MAINPLL_D6_D4, "mainpll_d6_d4", "mainpll_d6", 1, 4),
- --- FACTOR(CLK_TOP_MAINPLL_D6_D8, "mainpll_d6_d8", "mainpll_d6", 1, 8),
- --- FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
- --- FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2),
- --- FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4),
- --- FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll_d7", 1, 8),
- --- FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
- --- FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
- --- FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
- --- FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
- --- FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll_d4", 1, 2),
- --- FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll_d4", 1, 4),
- --- FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll_d4", 1, 8),
- --- FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
- --- FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2),
- --- FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4),
- --- FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, 8),
- --- FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
- --- FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll_d6", 1, 2),
- --- FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll_d6", 1, 4),
- --- FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll_d6", 1, 8),
- --- FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll_d6", 1, 16),
- --- FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
- --- FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
- --- FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4),
- --- FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8),
- --- FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16),
- --- FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll_d4", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll_d4", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll_d4", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll_d5", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll_d6", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D6_D4, "mainpll_d6_d4", "mainpll_d6", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D6_D8, "mainpll_d6_d8", "mainpll_d6", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll_d7", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll_d4", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll_d4", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll_d4", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll_d6", 1, 2, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll_d6", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll_d6", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll_d6", 1, 16, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16, 0),
+ +++ FACTOR_FLAGS(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32, 0),
FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1", 1, 3),
FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4),
FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2", 1, 3),
hw = devm_clk_hw_register_mux(&pdev->dev, "mfg_ck_fast_ref", mfg_fast_parents,
ARRAY_SIZE(mfg_fast_parents), CLK_SET_RATE_PARENT,
(base + 0x250), 8, 1, 0, &mt8195_clk_lock);
---- if (IS_ERR(hw))
++++ if (IS_ERR(hw)) {
++++ r = PTR_ERR(hw);
goto unregister_muxes;
++++ }
top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF] = hw;
r = clk_mt8195_reg_mfg_mux_notifier(&pdev->dev,
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/pm_domain.h>
----#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset-controller.h>
#define RETAIN_MEM BIT(14)
#define RETAIN_PERIPH BIT(13)
+++ +#define STATUS_POLL_TIMEOUT_US 1500
#define TIMEOUT_US 500
#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
GDSC_ON
};
----static int gdsc_pm_runtime_get(struct gdsc *sc)
----{
---- if (!sc->dev)
---- return 0;
----
---- return pm_runtime_resume_and_get(sc->dev);
----}
----
----static int gdsc_pm_runtime_put(struct gdsc *sc)
----{
---- if (!sc->dev)
---- return 0;
----
---- return pm_runtime_put_sync(sc->dev);
----}
----
/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
{
do {
if (gdsc_check_status(sc, status))
return 0;
--- - } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
+++ + } while (ktime_us_delta(ktime_get(), start) < STATUS_POLL_TIMEOUT_US);
if (gdsc_check_status(sc, status))
return 0;
regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
}
----static int _gdsc_enable(struct gdsc *sc)
++++static int gdsc_enable(struct generic_pm_domain *domain)
{
++++ struct gdsc *sc = domain_to_gdsc(domain);
int ret;
if (sc->pwrsts == PWRSTS_ON)
return 0;
}
----static int gdsc_enable(struct generic_pm_domain *domain)
++++static int gdsc_disable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
int ret;
---- ret = gdsc_pm_runtime_get(sc);
---- if (ret)
---- return ret;
----
---- return _gdsc_enable(sc);
----}
----
----static int _gdsc_disable(struct gdsc *sc)
----{
---- int ret;
----
if (sc->pwrsts == PWRSTS_ON)
return gdsc_assert_reset(sc);
return 0;
}
----static int gdsc_disable(struct generic_pm_domain *domain)
----{
---- struct gdsc *sc = domain_to_gdsc(domain);
---- int ret;
----
---- ret = _gdsc_disable(sc);
----
---- gdsc_pm_runtime_put(sc);
----
---- return ret;
----}
----
static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
return ret;
}
---- /* ...and the power-domain */
---- ret = gdsc_pm_runtime_get(sc);
---- if (ret)
---- goto err_disable_supply;
----
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
if (sc->flags & VOTABLE) {
ret = gdsc_update_collapse_bit(sc, false);
if (ret)
---- goto err_put_rpm;
++++ goto err_disable_supply;
}
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
---- goto err_put_rpm;
++++ goto err_disable_supply;
}
/*
ret = pm_genpd_init(&sc->pd, NULL, !on);
if (ret)
---- goto err_put_rpm;
++++ goto err_disable_supply;
return 0;
----err_put_rpm:
---- if (on)
---- gdsc_pm_runtime_put(sc);
err_disable_supply:
if (on && sc->rsupply)
regulator_disable(sc->rsupply);
for (i = 0; i < num; i++) {
if (!scs[i])
continue;
---- if (pm_runtime_enabled(dev))
---- scs[i]->dev = dev;
scs[i]->regmap = regmap;
scs[i]->rcdev = rcdev;
ret = gdsc_init(scs[i]);