1 // SPDX-License-Identifier: GPL-2.0+
8 #include <linux/clk-provider.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
12 #include <linux/iopoll.h>
13 #include <linux/slab.h>
17 #define TIMEOUT_US 500U
19 #define CCM_DIV_SHIFT 0
20 #define CCM_DIV_WIDTH 8
21 #define CCM_MUX_SHIFT 8
22 #define CCM_MUX_MASK 3
23 #define CCM_OFF_SHIFT 24
24 #define CCM_BUSY_SHIFT 28
26 #define STAT_OFFSET 0x4
27 #define AUTHEN_OFFSET 0x30
29 #define TZ_NS_MASK BIT(9)
31 #define WHITE_LIST_SHIFT 16
33 static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
38 ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
41 pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
46 static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
48 struct clk_gate *gate = to_clk_gate(hw);
53 spin_lock_irqsave(gate->lock, flags);
55 reg = readl(gate->reg);
58 reg &= ~BIT(gate->bit_idx);
60 reg |= BIT(gate->bit_idx);
62 writel(reg, gate->reg);
64 imx93_clk_composite_wait_ready(hw, gate->reg);
67 spin_unlock_irqrestore(gate->lock, flags);
70 static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
72 imx93_clk_composite_gate_endisable(hw, 1);
77 static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
79 imx93_clk_composite_gate_endisable(hw, 0);
82 static const struct clk_ops imx93_clk_composite_gate_ops = {
83 .enable = imx93_clk_composite_gate_enable,
84 .disable = imx93_clk_composite_gate_disable,
85 .is_enabled = clk_gate_is_enabled,
89 imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
91 return clk_divider_ops.recalc_rate(hw, parent_rate);
95 imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
97 return clk_divider_ops.round_rate(hw, rate, prate);
101 imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
103 return clk_divider_ops.determine_rate(hw, req);
106 static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
107 unsigned long parent_rate)
109 struct clk_divider *divider = to_clk_divider(hw);
111 unsigned long flags = 0;
115 value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
120 spin_lock_irqsave(divider->lock, flags);
122 val = readl(divider->reg);
123 val &= ~(clk_div_mask(divider->width) << divider->shift);
124 val |= (u32)value << divider->shift;
125 writel(val, divider->reg);
127 ret = imx93_clk_composite_wait_ready(hw, divider->reg);
130 spin_unlock_irqrestore(divider->lock, flags);
135 static const struct clk_ops imx93_clk_composite_divider_ops = {
136 .recalc_rate = imx93_clk_composite_divider_recalc_rate,
137 .round_rate = imx93_clk_composite_divider_round_rate,
138 .determine_rate = imx93_clk_composite_divider_determine_rate,
139 .set_rate = imx93_clk_composite_divider_set_rate,
142 static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
144 return clk_mux_ops.get_parent(hw);
147 static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
149 struct clk_mux *mux = to_clk_mux(hw);
150 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
151 unsigned long flags = 0;
156 spin_lock_irqsave(mux->lock, flags);
158 reg = readl(mux->reg);
159 reg &= ~(mux->mask << mux->shift);
160 val = val << mux->shift;
162 writel(reg, mux->reg);
164 ret = imx93_clk_composite_wait_ready(hw, mux->reg);
167 spin_unlock_irqrestore(mux->lock, flags);
173 imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
175 return clk_mux_ops.determine_rate(hw, req);
178 static const struct clk_ops imx93_clk_composite_mux_ops = {
179 .get_parent = imx93_clk_composite_mux_get_parent,
180 .set_parent = imx93_clk_composite_mux_set_parent,
181 .determine_rate = imx93_clk_composite_mux_determine_rate,
184 struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
185 int num_parents, void __iomem *reg, u32 domain_id,
188 struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
189 struct clk_hw *div_hw, *gate_hw;
190 struct clk_divider *div = NULL;
191 struct clk_gate *gate = NULL;
192 struct clk_mux *mux = NULL;
196 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
202 mux->shift = CCM_MUX_SHIFT;
203 mux->mask = CCM_MUX_MASK;
204 mux->lock = &imx_ccm_lock;
206 div = kzalloc(sizeof(*div), GFP_KERNEL);
212 div->shift = CCM_DIV_SHIFT;
213 div->width = CCM_DIV_WIDTH;
214 div->lock = &imx_ccm_lock;
215 div->flags = CLK_DIVIDER_ROUND_CLOSEST;
217 authen = readl(reg + AUTHEN_OFFSET);
218 if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
222 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
223 mux_hw, &clk_mux_ro_ops, div_hw,
224 &clk_divider_ro_ops, NULL, NULL, flags);
225 } else if (!mcore_booted) {
226 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
232 gate->bit_idx = CCM_OFF_SHIFT;
233 gate->lock = &imx_ccm_lock;
234 gate->flags = CLK_GATE_SET_TO_DISABLE;
236 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
237 mux_hw, &imx93_clk_composite_mux_ops, div_hw,
238 &imx93_clk_composite_divider_ops, gate_hw,
239 &imx93_clk_composite_gate_ops,
240 flags | CLK_SET_RATE_NO_REPARENT);
242 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
243 mux_hw, &imx93_clk_composite_mux_ops, div_hw,
244 &imx93_clk_composite_divider_ops, NULL,
245 &imx93_clk_composite_gate_ops,
246 flags | CLK_SET_RATE_NO_REPARENT);
260 EXPORT_SYMBOL_GPL(imx93_clk_composite_flags);