]> Git Repo - J-linux.git/blob - drivers/soc/qcom/ocmem.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / soc / qcom / ocmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The On Chip Memory (OCMEM) allocator allows various clients to allocate
4  * memory from OCMEM based on performance, latency and power requirements.
5  * This is typically used by the GPU, camera/video, and audio components on
6  * some Snapdragon SoCs.
7  *
8  * Copyright (C) 2019 Brian Masney <[email protected]>
9  * Copyright (C) 2015 Red Hat. Author: Rob Clark <[email protected]>
10  */
11
12 #include <linux/bitfield.h>
13 #include <linux/cleanup.h>
14 #include <linux/clk.h>
15 #include <linux/io.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/firmware/qcom/qcom_scm.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <soc/qcom/ocmem.h>
26
27 enum region_mode {
28         WIDE_MODE = 0x0,
29         THIN_MODE,
30         MODE_DEFAULT = WIDE_MODE,
31 };
32
33 enum ocmem_macro_state {
34         PASSTHROUGH = 0,
35         PERI_ON = 1,
36         CORE_ON = 2,
37         CLK_OFF = 4,
38 };
39
40 struct ocmem_region {
41         bool interleaved;
42         enum region_mode mode;
43         unsigned int num_macros;
44         enum ocmem_macro_state macro_state[4];
45         unsigned long macro_size;
46         unsigned long region_size;
47 };
48
49 struct ocmem_config {
50         uint8_t num_regions;
51         unsigned long macro_size;
52 };
53
54 struct ocmem {
55         struct device *dev;
56         const struct ocmem_config *config;
57         struct resource *memory;
58         void __iomem *mmio;
59         struct clk *core_clk;
60         struct clk *iface_clk;
61         unsigned int num_ports;
62         unsigned int num_macros;
63         bool interleaved;
64         struct ocmem_region *regions;
65         unsigned long active_allocations;
66 };
67
68 #define OCMEM_MIN_ALIGN                         SZ_64K
69 #define OCMEM_MIN_ALLOC                         SZ_64K
70
71 #define OCMEM_REG_HW_VERSION                    0x00000000
72 #define OCMEM_REG_HW_PROFILE                    0x00000004
73
74 #define OCMEM_REG_REGION_MODE_CTL               0x00001000
75 #define OCMEM_REGION_MODE_CTL_REG0_THIN         0x00000001
76 #define OCMEM_REGION_MODE_CTL_REG1_THIN         0x00000002
77 #define OCMEM_REGION_MODE_CTL_REG2_THIN         0x00000004
78 #define OCMEM_REGION_MODE_CTL_REG3_THIN         0x00000008
79
80 #define OCMEM_REG_GFX_MPU_START                 0x00001004
81 #define OCMEM_REG_GFX_MPU_END                   0x00001008
82
83 #define OCMEM_HW_VERSION_MAJOR(val)             FIELD_GET(GENMASK(31, 28), val)
84 #define OCMEM_HW_VERSION_MINOR(val)             FIELD_GET(GENMASK(27, 16), val)
85 #define OCMEM_HW_VERSION_STEP(val)              FIELD_GET(GENMASK(15, 0), val)
86
87 #define OCMEM_HW_PROFILE_NUM_PORTS(val)         FIELD_GET(0x0000000f, (val))
88 #define OCMEM_HW_PROFILE_NUM_MACROS(val)        FIELD_GET(0x00003f00, (val))
89
90 #define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE     0x00010000
91 #define OCMEM_HW_PROFILE_INTERLEAVING           0x00020000
92 #define OCMEM_REG_GEN_STATUS                    0x0000000c
93
94 #define OCMEM_REG_PSGSC_STATUS                  0x00000038
95 #define OCMEM_REG_PSGSC_CTL(i0)                 (0x0000003c + 0x1*(i0))
96
97 #define OCMEM_PSGSC_CTL_MACRO0_MODE(val)        FIELD_PREP(0x00000007, (val))
98 #define OCMEM_PSGSC_CTL_MACRO1_MODE(val)        FIELD_PREP(0x00000070, (val))
99 #define OCMEM_PSGSC_CTL_MACRO2_MODE(val)        FIELD_PREP(0x00000700, (val))
100 #define OCMEM_PSGSC_CTL_MACRO3_MODE(val)        FIELD_PREP(0x00007000, (val))
101
102 static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data)
103 {
104         writel(data, ocmem->mmio + reg);
105 }
106
107 static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg)
108 {
109         return readl(ocmem->mmio + reg);
110 }
111
112 static void update_ocmem(struct ocmem *ocmem)
113 {
114         uint32_t region_mode_ctrl = 0x0;
115         int i;
116
117         if (!qcom_scm_ocmem_lock_available()) {
118                 for (i = 0; i < ocmem->config->num_regions; i++) {
119                         struct ocmem_region *region = &ocmem->regions[i];
120
121                         if (region->mode == THIN_MODE)
122                                 region_mode_ctrl |= BIT(i);
123                 }
124
125                 dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n",
126                         region_mode_ctrl);
127                 ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl);
128         }
129
130         for (i = 0; i < ocmem->config->num_regions; i++) {
131                 struct ocmem_region *region = &ocmem->regions[i];
132                 u32 data;
133
134                 data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) |
135                         OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) |
136                         OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) |
137                         OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]);
138
139                 ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data);
140         }
141 }
142
143 static unsigned long phys_to_offset(struct ocmem *ocmem,
144                                     unsigned long addr)
145 {
146         if (addr < ocmem->memory->start || addr >= ocmem->memory->end)
147                 return 0;
148
149         return addr - ocmem->memory->start;
150 }
151
152 static unsigned long device_address(struct ocmem *ocmem,
153                                     enum ocmem_client client,
154                                     unsigned long addr)
155 {
156         WARN_ON(client != OCMEM_GRAPHICS);
157
158         /* TODO: gpu uses phys_to_offset, but others do not.. */
159         return phys_to_offset(ocmem, addr);
160 }
161
162 static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
163                          enum ocmem_macro_state mstate, enum region_mode rmode)
164 {
165         unsigned long offset = 0;
166         int i, j;
167
168         for (i = 0; i < ocmem->config->num_regions; i++) {
169                 struct ocmem_region *region = &ocmem->regions[i];
170
171                 if (buf->offset <= offset && offset < buf->offset + buf->len)
172                         region->mode = rmode;
173
174                 for (j = 0; j < region->num_macros; j++) {
175                         if (buf->offset <= offset &&
176                             offset < buf->offset + buf->len)
177                                 region->macro_state[j] = mstate;
178
179                         offset += region->macro_size;
180                 }
181         }
182
183         update_ocmem(ocmem);
184 }
185
186 struct ocmem *of_get_ocmem(struct device *dev)
187 {
188         struct platform_device *pdev;
189         struct ocmem *ocmem;
190
191         struct device_node *devnode __free(device_node) = of_parse_phandle(dev->of_node,
192                                                                            "sram", 0);
193         if (!devnode || !devnode->parent) {
194                 dev_err(dev, "Cannot look up sram phandle\n");
195                 return ERR_PTR(-ENODEV);
196         }
197
198         pdev = of_find_device_by_node(devnode->parent);
199         if (!pdev) {
200                 dev_err(dev, "Cannot find device node %s\n", devnode->name);
201                 return ERR_PTR(-EPROBE_DEFER);
202         }
203
204         ocmem = platform_get_drvdata(pdev);
205         if (!ocmem) {
206                 dev_err(dev, "Cannot get ocmem\n");
207                 put_device(&pdev->dev);
208                 return ERR_PTR(-ENODEV);
209         }
210         return ocmem;
211 }
212 EXPORT_SYMBOL_GPL(of_get_ocmem);
213
214 struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
215                                  unsigned long size)
216 {
217         int ret;
218
219         /* TODO: add support for other clients... */
220         if (WARN_ON(client != OCMEM_GRAPHICS))
221                 return ERR_PTR(-ENODEV);
222
223         if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN))
224                 return ERR_PTR(-EINVAL);
225
226         if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
227                 return ERR_PTR(-EBUSY);
228
229         struct ocmem_buf *buf __free(kfree) = kzalloc(sizeof(*buf), GFP_KERNEL);
230         if (!buf) {
231                 ret = -ENOMEM;
232                 goto err_unlock;
233         }
234
235         buf->offset = 0;
236         buf->addr = device_address(ocmem, client, buf->offset);
237         buf->len = size;
238
239         update_range(ocmem, buf, CORE_ON, WIDE_MODE);
240
241         if (qcom_scm_ocmem_lock_available()) {
242                 ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID,
243                                           buf->offset, buf->len, WIDE_MODE);
244                 if (ret) {
245                         dev_err(ocmem->dev, "could not lock: %d\n", ret);
246                         ret = -EINVAL;
247                         goto err_unlock;
248                 }
249         } else {
250                 ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
251                 ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END,
252                             buf->offset + buf->len);
253         }
254
255         dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
256                 size / 1024, buf->addr, client);
257
258         return_ptr(buf);
259
260 err_unlock:
261         clear_bit_unlock(BIT(client), &ocmem->active_allocations);
262
263         return ERR_PTR(ret);
264 }
265 EXPORT_SYMBOL_GPL(ocmem_allocate);
266
267 void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
268                 struct ocmem_buf *buf)
269 {
270         /* TODO: add support for other clients... */
271         if (WARN_ON(client != OCMEM_GRAPHICS))
272                 return;
273
274         update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT);
275
276         if (qcom_scm_ocmem_lock_available()) {
277                 int ret;
278
279                 ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID,
280                                             buf->offset, buf->len);
281                 if (ret)
282                         dev_err(ocmem->dev, "could not unlock: %d\n", ret);
283         } else {
284                 ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0);
285                 ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0);
286         }
287
288         kfree(buf);
289
290         clear_bit_unlock(BIT(client), &ocmem->active_allocations);
291 }
292 EXPORT_SYMBOL_GPL(ocmem_free);
293
294 static int ocmem_dev_probe(struct platform_device *pdev)
295 {
296         struct device *dev = &pdev->dev;
297         unsigned long reg, region_size;
298         int i, j, ret, num_banks;
299         struct ocmem *ocmem;
300
301         if (!qcom_scm_is_available())
302                 return -EPROBE_DEFER;
303
304         ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL);
305         if (!ocmem)
306                 return -ENOMEM;
307
308         ocmem->dev = dev;
309         ocmem->config = device_get_match_data(dev);
310
311         ocmem->core_clk = devm_clk_get(dev, "core");
312         if (IS_ERR(ocmem->core_clk))
313                 return dev_err_probe(dev, PTR_ERR(ocmem->core_clk),
314                                      "Unable to get core clock\n");
315
316         ocmem->iface_clk = devm_clk_get_optional(dev, "iface");
317         if (IS_ERR(ocmem->iface_clk))
318                 return dev_err_probe(dev, PTR_ERR(ocmem->iface_clk),
319                                      "Unable to get iface clock\n");
320
321         ocmem->mmio = devm_platform_ioremap_resource_byname(pdev, "ctrl");
322         if (IS_ERR(ocmem->mmio))
323                 return dev_err_probe(&pdev->dev, PTR_ERR(ocmem->mmio),
324                                      "Failed to ioremap ocmem_ctrl resource\n");
325
326         ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM,
327                                                      "mem");
328         if (!ocmem->memory) {
329                 dev_err(dev, "Could not get mem region\n");
330                 return -ENXIO;
331         }
332
333         /* The core clock is synchronous with graphics */
334         WARN_ON(clk_set_rate(ocmem->core_clk, 1000) < 0);
335
336         ret = clk_prepare_enable(ocmem->core_clk);
337         if (ret)
338                 return dev_err_probe(ocmem->dev, ret, "Failed to enable core clock\n");
339
340         ret = clk_prepare_enable(ocmem->iface_clk);
341         if (ret) {
342                 clk_disable_unprepare(ocmem->core_clk);
343                 return dev_err_probe(ocmem->dev, ret, "Failed to enable iface clock\n");
344         }
345
346         if (qcom_scm_restore_sec_cfg_available()) {
347                 dev_dbg(dev, "configuring scm\n");
348                 ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0);
349                 if (ret) {
350                         dev_err_probe(dev, ret, "Could not enable secure configuration\n");
351                         goto err_clk_disable;
352                 }
353         }
354
355         reg = ocmem_read(ocmem, OCMEM_REG_HW_VERSION);
356         dev_dbg(dev, "OCMEM hardware version: %lu.%lu.%lu\n",
357                 OCMEM_HW_VERSION_MAJOR(reg),
358                 OCMEM_HW_VERSION_MINOR(reg),
359                 OCMEM_HW_VERSION_STEP(reg));
360
361         reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE);
362         ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg);
363         ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg);
364         ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING);
365
366         num_banks = ocmem->num_ports / 2;
367         region_size = ocmem->config->macro_size * num_banks;
368
369         dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n",
370                  ocmem->num_ports, ocmem->config->num_regions,
371                  ocmem->num_macros, ocmem->interleaved ? "" : "not ");
372
373         ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions,
374                                       sizeof(struct ocmem_region), GFP_KERNEL);
375         if (!ocmem->regions) {
376                 ret = -ENOMEM;
377                 goto err_clk_disable;
378         }
379
380         for (i = 0; i < ocmem->config->num_regions; i++) {
381                 struct ocmem_region *region = &ocmem->regions[i];
382
383                 if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) {
384                         ret = -EINVAL;
385                         goto err_clk_disable;
386                 }
387
388                 region->mode = MODE_DEFAULT;
389                 region->num_macros = num_banks;
390
391                 if (i == (ocmem->config->num_regions - 1) &&
392                     reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) {
393                         region->macro_size = ocmem->config->macro_size / 2;
394                         region->region_size = region_size / 2;
395                 } else {
396                         region->macro_size = ocmem->config->macro_size;
397                         region->region_size = region_size;
398                 }
399
400                 for (j = 0; j < ARRAY_SIZE(region->macro_state); j++)
401                         region->macro_state[j] = CLK_OFF;
402         }
403
404         platform_set_drvdata(pdev, ocmem);
405
406         return 0;
407
408 err_clk_disable:
409         clk_disable_unprepare(ocmem->core_clk);
410         clk_disable_unprepare(ocmem->iface_clk);
411         return ret;
412 }
413
414 static void ocmem_dev_remove(struct platform_device *pdev)
415 {
416         struct ocmem *ocmem = platform_get_drvdata(pdev);
417
418         clk_disable_unprepare(ocmem->core_clk);
419         clk_disable_unprepare(ocmem->iface_clk);
420 }
421
422 static const struct ocmem_config ocmem_8226_config = {
423         .num_regions = 1,
424         .macro_size = SZ_128K,
425 };
426
427 static const struct ocmem_config ocmem_8974_config = {
428         .num_regions = 3,
429         .macro_size = SZ_128K,
430 };
431
432 static const struct of_device_id ocmem_of_match[] = {
433         { .compatible = "qcom,msm8226-ocmem", .data = &ocmem_8226_config },
434         { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config },
435         { }
436 };
437
438 MODULE_DEVICE_TABLE(of, ocmem_of_match);
439
440 static struct platform_driver ocmem_driver = {
441         .probe = ocmem_dev_probe,
442         .remove = ocmem_dev_remove,
443         .driver = {
444                 .name = "ocmem",
445                 .of_match_table = ocmem_of_match,
446         },
447 };
448
449 module_platform_driver(ocmem_driver);
450
451 MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs");
452 MODULE_LICENSE("GPL v2");
This page took 0.053512 seconds and 4 git commands to generate.