]> Git Repo - J-linux.git/blob - drivers/net/wireless/ath/wil6210/pmc.c
Merge tag 'kbuild-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[J-linux.git] / drivers / net / wireless / ath / wil6210 / pmc.c
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
4  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5  */
6
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/fs.h>
10 #include <linux/seq_file.h>
11 #include "wmi.h"
12 #include "wil6210.h"
13 #include "txrx.h"
14 #include "pmc.h"
15
16 struct desc_alloc_info {
17         dma_addr_t pa;
18         void      *va;
19 };
20
21 static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
22 {
23         return !!pmc->pring_va;
24 }
25
26 void wil_pmc_init(struct wil6210_priv *wil)
27 {
28         memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
29         mutex_init(&wil->pmc.lock);
30 }
31
32 /* Allocate the physical ring (p-ring) and the required
33  * number of descriptors of required size.
34  * Initialize the descriptors as required by pmc dma.
35  * The descriptors' buffers dwords are initialized to hold
36  * dword's serial number in the lsw and reserved value
37  * PCM_DATA_INVALID_DW_VAL in the msw.
38  */
39 void wil_pmc_alloc(struct wil6210_priv *wil,
40                    int num_descriptors,
41                    int descriptor_size)
42 {
43         u32 i;
44         struct pmc_ctx *pmc = &wil->pmc;
45         struct device *dev = wil_to_dev(wil);
46         struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
47         struct wmi_pmc_cmd pmc_cmd = {0};
48         int last_cmd_err = -ENOMEM;
49
50         mutex_lock(&pmc->lock);
51
52         if (wil_is_pmc_allocated(pmc)) {
53                 /* sanity check */
54                 wil_err(wil, "ERROR pmc is already allocated\n");
55                 goto no_release_err;
56         }
57         if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
58                 wil_err(wil,
59                         "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
60                         num_descriptors, descriptor_size);
61                 last_cmd_err = -EINVAL;
62                 goto no_release_err;
63         }
64
65         if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
66                 wil_err(wil,
67                         "num_descriptors(%d) exceeds max ring size %d\n",
68                         num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
69                 last_cmd_err = -EINVAL;
70                 goto no_release_err;
71         }
72
73         if (num_descriptors > INT_MAX / descriptor_size) {
74                 wil_err(wil,
75                         "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
76                         num_descriptors, descriptor_size);
77                 last_cmd_err = -EINVAL;
78                 goto no_release_err;
79         }
80
81         pmc->num_descriptors = num_descriptors;
82         pmc->descriptor_size = descriptor_size;
83
84         wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
85                      num_descriptors, descriptor_size);
86
87         /* allocate descriptors info list in pmc context*/
88         pmc->descriptors = kcalloc(num_descriptors,
89                                   sizeof(struct desc_alloc_info),
90                                   GFP_KERNEL);
91         if (!pmc->descriptors) {
92                 wil_err(wil, "ERROR allocating pmc skb list\n");
93                 goto no_release_err;
94         }
95
96         wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
97                      pmc->descriptors);
98
99         /* Allocate pring buffer and descriptors.
100          * vring->va should be aligned on its size rounded up to power of 2
101          * This is granted by the dma_alloc_coherent.
102          *
103          * HW has limitation that all vrings addresses must share the same
104          * upper 16 msb bits part of 48 bits address. To workaround that,
105          * if we are using more than 32 bit addresses switch to 32 bit
106          * allocation before allocating vring memory.
107          *
108          * There's no check for the return value of dma_set_mask_and_coherent,
109          * since we assume if we were able to set the mask during
110          * initialization in this system it will not fail if we set it again
111          */
112         if (wil->dma_addr_size > 32)
113                 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
114
115         pmc->pring_va = dma_alloc_coherent(dev,
116                         sizeof(struct vring_tx_desc) * num_descriptors,
117                         &pmc->pring_pa,
118                         GFP_KERNEL);
119
120         if (wil->dma_addr_size > 32)
121                 dma_set_mask_and_coherent(dev,
122                                           DMA_BIT_MASK(wil->dma_addr_size));
123
124         wil_dbg_misc(wil,
125                      "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
126                      pmc->pring_va, &pmc->pring_pa,
127                      sizeof(struct vring_tx_desc),
128                      num_descriptors,
129                      sizeof(struct vring_tx_desc) * num_descriptors);
130
131         if (!pmc->pring_va) {
132                 wil_err(wil, "ERROR allocating pmc pring\n");
133                 goto release_pmc_skb_list;
134         }
135
136         /* initially, all descriptors are SW owned
137          * For Tx, Rx, and PMC, ownership bit is at the same location, thus
138          * we can use any
139          */
140         for (i = 0; i < num_descriptors; i++) {
141                 struct vring_tx_desc *_d = &pmc->pring_va[i];
142                 struct vring_tx_desc dd = {}, *d = &dd;
143                 int j = 0;
144
145                 pmc->descriptors[i].va = dma_alloc_coherent(dev,
146                         descriptor_size,
147                         &pmc->descriptors[i].pa,
148                         GFP_KERNEL);
149
150                 if (unlikely(!pmc->descriptors[i].va)) {
151                         wil_err(wil, "ERROR allocating pmc descriptor %d", i);
152                         goto release_pmc_skbs;
153                 }
154
155                 for (j = 0; j < descriptor_size / sizeof(u32); j++) {
156                         u32 *p = (u32 *)pmc->descriptors[i].va + j;
157                         *p = PCM_DATA_INVALID_DW_VAL | j;
158                 }
159
160                 /* configure dma descriptor */
161                 d->dma.addr.addr_low =
162                         cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
163                 d->dma.addr.addr_high =
164                         cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
165                 d->dma.status = 0; /* 0 = HW_OWNED */
166                 d->dma.length = cpu_to_le16(descriptor_size);
167                 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
168                 *_d = *d;
169         }
170
171         wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
172
173         pmc_cmd.op = WMI_PMC_ALLOCATE;
174         pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
175         pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
176
177         wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
178         pmc->last_cmd_status = wmi_send(wil,
179                                         WMI_PMC_CMDID,
180                                         vif->mid,
181                                         &pmc_cmd,
182                                         sizeof(pmc_cmd));
183         if (pmc->last_cmd_status) {
184                 wil_err(wil,
185                         "WMI_PMC_CMD with ALLOCATE op failed with status %d",
186                         pmc->last_cmd_status);
187                 goto release_pmc_skbs;
188         }
189
190         mutex_unlock(&pmc->lock);
191
192         return;
193
194 release_pmc_skbs:
195         wil_err(wil, "exit on error: Releasing skbs...\n");
196         for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
197                 dma_free_coherent(dev,
198                                   descriptor_size,
199                                   pmc->descriptors[i].va,
200                                   pmc->descriptors[i].pa);
201
202                 pmc->descriptors[i].va = NULL;
203         }
204         wil_err(wil, "exit on error: Releasing pring...\n");
205
206         dma_free_coherent(dev,
207                           sizeof(struct vring_tx_desc) * num_descriptors,
208                           pmc->pring_va,
209                           pmc->pring_pa);
210
211         pmc->pring_va = NULL;
212
213 release_pmc_skb_list:
214         wil_err(wil, "exit on error: Releasing descriptors info list...\n");
215         kfree(pmc->descriptors);
216         pmc->descriptors = NULL;
217
218 no_release_err:
219         pmc->last_cmd_status = last_cmd_err;
220         mutex_unlock(&pmc->lock);
221 }
222
223 /* Traverse the p-ring and release all buffers.
224  * At the end release the p-ring memory
225  */
226 void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
227 {
228         struct pmc_ctx *pmc = &wil->pmc;
229         struct device *dev = wil_to_dev(wil);
230         struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
231         struct wmi_pmc_cmd pmc_cmd = {0};
232
233         mutex_lock(&pmc->lock);
234
235         pmc->last_cmd_status = 0;
236
237         if (!wil_is_pmc_allocated(pmc)) {
238                 wil_dbg_misc(wil,
239                              "pmc_free: Error, can't free - not allocated\n");
240                 pmc->last_cmd_status = -EPERM;
241                 mutex_unlock(&pmc->lock);
242                 return;
243         }
244
245         if (send_pmc_cmd) {
246                 wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
247                 pmc_cmd.op = WMI_PMC_RELEASE;
248                 pmc->last_cmd_status =
249                                 wmi_send(wil, WMI_PMC_CMDID, vif->mid,
250                                          &pmc_cmd, sizeof(pmc_cmd));
251                 if (pmc->last_cmd_status) {
252                         wil_err(wil,
253                                 "WMI_PMC_CMD with RELEASE op failed, status %d",
254                                 pmc->last_cmd_status);
255                         /* There's nothing we can do with this error.
256                          * Normally, it should never occur.
257                          * Continue to freeing all memory allocated for pmc.
258                          */
259                 }
260         }
261
262         if (pmc->pring_va) {
263                 size_t buf_size = sizeof(struct vring_tx_desc) *
264                                   pmc->num_descriptors;
265
266                 wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
267                              pmc->pring_va);
268                 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
269
270                 pmc->pring_va = NULL;
271         } else {
272                 pmc->last_cmd_status = -ENOENT;
273         }
274
275         if (pmc->descriptors) {
276                 int i;
277
278                 for (i = 0;
279                      i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
280                         dma_free_coherent(dev,
281                                           pmc->descriptor_size,
282                                           pmc->descriptors[i].va,
283                                           pmc->descriptors[i].pa);
284                         pmc->descriptors[i].va = NULL;
285                 }
286                 wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
287                              pmc->num_descriptors);
288                 wil_dbg_misc(wil,
289                              "pmc_free: free pmc descriptors info list %p\n",
290                              pmc->descriptors);
291                 kfree(pmc->descriptors);
292                 pmc->descriptors = NULL;
293         } else {
294                 pmc->last_cmd_status = -ENOENT;
295         }
296
297         mutex_unlock(&pmc->lock);
298 }
299
300 /* Status of the last operation requested via debugfs: alloc/free/read.
301  * 0 - success or negative errno
302  */
303 int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
304 {
305         wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
306                      wil->pmc.last_cmd_status);
307
308         return wil->pmc.last_cmd_status;
309 }
310
311 /* Read from required position up to the end of current descriptor,
312  * depends on descriptor size configured during alloc request.
313  */
314 ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
315                      loff_t *f_pos)
316 {
317         struct wil6210_priv *wil = filp->private_data;
318         struct pmc_ctx *pmc = &wil->pmc;
319         size_t retval = 0;
320         unsigned long long idx;
321         loff_t offset;
322         size_t pmc_size;
323
324         mutex_lock(&pmc->lock);
325
326         if (!wil_is_pmc_allocated(pmc)) {
327                 wil_err(wil, "error, pmc is not allocated!\n");
328                 pmc->last_cmd_status = -EPERM;
329                 mutex_unlock(&pmc->lock);
330                 return -EPERM;
331         }
332
333         pmc_size = pmc->descriptor_size * pmc->num_descriptors;
334
335         wil_dbg_misc(wil,
336                      "pmc_read: size %u, pos %lld\n",
337                      (u32)count, *f_pos);
338
339         pmc->last_cmd_status = 0;
340
341         idx = *f_pos;
342         do_div(idx, pmc->descriptor_size);
343         offset = *f_pos - (idx * pmc->descriptor_size);
344
345         if (*f_pos >= pmc_size) {
346                 wil_dbg_misc(wil,
347                              "pmc_read: reached end of pmc buf: %lld >= %u\n",
348                              *f_pos, (u32)pmc_size);
349                 pmc->last_cmd_status = -ERANGE;
350                 goto out;
351         }
352
353         wil_dbg_misc(wil,
354                      "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
355                      *f_pos, idx, offset, count);
356
357         /* if no errors, return the copied byte count */
358         retval = simple_read_from_buffer(buf,
359                                          count,
360                                          &offset,
361                                          pmc->descriptors[idx].va,
362                                          pmc->descriptor_size);
363         *f_pos += retval;
364 out:
365         mutex_unlock(&pmc->lock);
366
367         return retval;
368 }
369
370 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
371 {
372         loff_t newpos;
373         struct wil6210_priv *wil = filp->private_data;
374         struct pmc_ctx *pmc = &wil->pmc;
375         size_t pmc_size;
376
377         mutex_lock(&pmc->lock);
378
379         if (!wil_is_pmc_allocated(pmc)) {
380                 wil_err(wil, "error, pmc is not allocated!\n");
381                 pmc->last_cmd_status = -EPERM;
382                 mutex_unlock(&pmc->lock);
383                 return -EPERM;
384         }
385
386         pmc_size = pmc->descriptor_size * pmc->num_descriptors;
387
388         switch (whence) {
389         case 0: /* SEEK_SET */
390                 newpos = off;
391                 break;
392
393         case 1: /* SEEK_CUR */
394                 newpos = filp->f_pos + off;
395                 break;
396
397         case 2: /* SEEK_END */
398                 newpos = pmc_size;
399                 break;
400
401         default: /* can't happen */
402                 newpos = -EINVAL;
403                 goto out;
404         }
405
406         if (newpos < 0) {
407                 newpos = -EINVAL;
408                 goto out;
409         }
410         if (newpos > pmc_size)
411                 newpos = pmc_size;
412
413         filp->f_pos = newpos;
414
415 out:
416         mutex_unlock(&pmc->lock);
417
418         return newpos;
419 }
420
421 int wil_pmcring_read(struct seq_file *s, void *data)
422 {
423         struct wil6210_priv *wil = s->private;
424         struct pmc_ctx *pmc = &wil->pmc;
425         size_t pmc_ring_size =
426                 sizeof(struct vring_rx_desc) * pmc->num_descriptors;
427
428         mutex_lock(&pmc->lock);
429
430         if (!wil_is_pmc_allocated(pmc)) {
431                 wil_err(wil, "error, pmc is not allocated!\n");
432                 pmc->last_cmd_status = -EPERM;
433                 mutex_unlock(&pmc->lock);
434                 return -EPERM;
435         }
436
437         wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size);
438
439         seq_write(s, pmc->pring_va, pmc_ring_size);
440
441         mutex_unlock(&pmc->lock);
442
443         return 0;
444 }
This page took 0.054768 seconds and 4 git commands to generate.