]> Git Repo - J-linux.git/blob - drivers/clk/imx/clk-composite-93.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[J-linux.git] / drivers / clk / imx / clk-composite-93.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2021 NXP
4  *
5  * Peng Fan <[email protected]>
6  */
7
8 #include <linux/clk-provider.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/slab.h>
14
15 #include "clk.h"
16
17 #define TIMEOUT_US      500U
18
19 #define CCM_DIV_SHIFT   0
20 #define CCM_DIV_WIDTH   8
21 #define CCM_MUX_SHIFT   8
22 #define CCM_MUX_MASK    3
23 #define CCM_OFF_SHIFT   24
24 #define CCM_BUSY_SHIFT  28
25
26 #define STAT_OFFSET     0x4
27 #define AUTHEN_OFFSET   0x30
28 #define TZ_NS_SHIFT     9
29 #define TZ_NS_MASK      BIT(9)
30
31 #define WHITE_LIST_SHIFT        16
32
33 static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
34 {
35         int ret;
36         u32 val;
37
38         ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
39                                         0, TIMEOUT_US);
40         if (ret)
41                 pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
42
43         return ret;
44 }
45
46 static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
47 {
48         struct clk_gate *gate = to_clk_gate(hw);
49         unsigned long flags;
50         u32 reg;
51
52         if (gate->lock)
53                 spin_lock_irqsave(gate->lock, flags);
54
55         reg = readl(gate->reg);
56
57         if (enable)
58                 reg &= ~BIT(gate->bit_idx);
59         else
60                 reg |= BIT(gate->bit_idx);
61
62         writel(reg, gate->reg);
63
64         imx93_clk_composite_wait_ready(hw, gate->reg);
65
66         if (gate->lock)
67                 spin_unlock_irqrestore(gate->lock, flags);
68 }
69
70 static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
71 {
72         imx93_clk_composite_gate_endisable(hw, 1);
73
74         return 0;
75 }
76
77 static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
78 {
79         imx93_clk_composite_gate_endisable(hw, 0);
80 }
81
82 static const struct clk_ops imx93_clk_composite_gate_ops = {
83         .enable = imx93_clk_composite_gate_enable,
84         .disable = imx93_clk_composite_gate_disable,
85         .is_enabled = clk_gate_is_enabled,
86 };
87
88 static unsigned long
89 imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
90 {
91         return clk_divider_ops.recalc_rate(hw, parent_rate);
92 }
93
94 static long
95 imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
96 {
97         return clk_divider_ops.round_rate(hw, rate, prate);
98 }
99
100 static int
101 imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
102 {
103         return clk_divider_ops.determine_rate(hw, req);
104 }
105
106 static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
107                                                 unsigned long parent_rate)
108 {
109         struct clk_divider *divider = to_clk_divider(hw);
110         int value;
111         unsigned long flags = 0;
112         u32 val;
113         int ret;
114
115         value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
116         if (value < 0)
117                 return value;
118
119         if (divider->lock)
120                 spin_lock_irqsave(divider->lock, flags);
121
122         val = readl(divider->reg);
123         val &= ~(clk_div_mask(divider->width) << divider->shift);
124         val |= (u32)value << divider->shift;
125         writel(val, divider->reg);
126
127         ret = imx93_clk_composite_wait_ready(hw, divider->reg);
128
129         if (divider->lock)
130                 spin_unlock_irqrestore(divider->lock, flags);
131
132         return ret;
133 }
134
135 static const struct clk_ops imx93_clk_composite_divider_ops = {
136         .recalc_rate = imx93_clk_composite_divider_recalc_rate,
137         .round_rate = imx93_clk_composite_divider_round_rate,
138         .determine_rate = imx93_clk_composite_divider_determine_rate,
139         .set_rate = imx93_clk_composite_divider_set_rate,
140 };
141
142 static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
143 {
144         return clk_mux_ops.get_parent(hw);
145 }
146
147 static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
148 {
149         struct clk_mux *mux = to_clk_mux(hw);
150         u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
151         unsigned long flags = 0;
152         u32 reg;
153         int ret;
154
155         if (mux->lock)
156                 spin_lock_irqsave(mux->lock, flags);
157
158         reg = readl(mux->reg);
159         reg &= ~(mux->mask << mux->shift);
160         val = val << mux->shift;
161         reg |= val;
162         writel(reg, mux->reg);
163
164         ret = imx93_clk_composite_wait_ready(hw, mux->reg);
165
166         if (mux->lock)
167                 spin_unlock_irqrestore(mux->lock, flags);
168
169         return ret;
170 }
171
172 static int
173 imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
174 {
175         return clk_mux_ops.determine_rate(hw, req);
176 }
177
178 static const struct clk_ops imx93_clk_composite_mux_ops = {
179         .get_parent = imx93_clk_composite_mux_get_parent,
180         .set_parent = imx93_clk_composite_mux_set_parent,
181         .determine_rate = imx93_clk_composite_mux_determine_rate,
182 };
183
184 struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
185                                          int num_parents, void __iomem *reg, u32 domain_id,
186                                          unsigned long flags)
187 {
188         struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
189         struct clk_hw *div_hw, *gate_hw;
190         struct clk_divider *div = NULL;
191         struct clk_gate *gate = NULL;
192         struct clk_mux *mux = NULL;
193         bool clk_ro = false;
194         u32 authen;
195
196         mux = kzalloc(sizeof(*mux), GFP_KERNEL);
197         if (!mux)
198                 goto fail;
199
200         mux_hw = &mux->hw;
201         mux->reg = reg;
202         mux->shift = CCM_MUX_SHIFT;
203         mux->mask = CCM_MUX_MASK;
204         mux->lock = &imx_ccm_lock;
205
206         div = kzalloc(sizeof(*div), GFP_KERNEL);
207         if (!div)
208                 goto fail;
209
210         div_hw = &div->hw;
211         div->reg = reg;
212         div->shift = CCM_DIV_SHIFT;
213         div->width = CCM_DIV_WIDTH;
214         div->lock = &imx_ccm_lock;
215         div->flags = CLK_DIVIDER_ROUND_CLOSEST;
216
217         authen = readl(reg + AUTHEN_OFFSET);
218         if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
219                 clk_ro = true;
220
221         if (clk_ro) {
222                 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
223                                                mux_hw, &clk_mux_ro_ops, div_hw,
224                                                &clk_divider_ro_ops, NULL, NULL, flags);
225         } else if (!mcore_booted) {
226                 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
227                 if (!gate)
228                         goto fail;
229
230                 gate_hw = &gate->hw;
231                 gate->reg = reg;
232                 gate->bit_idx = CCM_OFF_SHIFT;
233                 gate->lock = &imx_ccm_lock;
234                 gate->flags = CLK_GATE_SET_TO_DISABLE;
235
236                 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
237                                                mux_hw, &imx93_clk_composite_mux_ops, div_hw,
238                                                &imx93_clk_composite_divider_ops, gate_hw,
239                                                &imx93_clk_composite_gate_ops,
240                                                flags | CLK_SET_RATE_NO_REPARENT);
241         } else {
242                 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
243                                                mux_hw, &imx93_clk_composite_mux_ops, div_hw,
244                                                &imx93_clk_composite_divider_ops, NULL,
245                                                &imx93_clk_composite_gate_ops,
246                                                flags | CLK_SET_RATE_NO_REPARENT);
247         }
248
249         if (IS_ERR(hw))
250                 goto fail;
251
252         return hw;
253
254 fail:
255         kfree(gate);
256         kfree(div);
257         kfree(mux);
258         return ERR_CAST(hw);
259 }
260 EXPORT_SYMBOL_GPL(imx93_clk_composite_flags);
This page took 0.043606 seconds and 4 git commands to generate.