]> Git Repo - J-linux.git/blob - drivers/hwtracing/coresight/coresight-tmc-etf.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / hwtracing / coresight / coresight-tmc-etf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <[email protected]>
5  */
6
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17                               struct perf_output_handle *handle);
18
19 static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21         int rc = 0;
22
23         CS_UNLOCK(drvdata->base);
24
25         /* Wait for TMCSReady bit to be set */
26         rc = tmc_wait_for_tmcready(drvdata);
27         if (rc) {
28                 dev_err(&drvdata->csdev->dev,
29                         "Failed to enable: TMC not ready\n");
30                 CS_LOCK(drvdata->base);
31                 return rc;
32         }
33
34         writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
35         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
36                        TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
37                        TMC_FFCR_TRIGON_TRIGIN,
38                        drvdata->base + TMC_FFCR);
39
40         writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
41         tmc_enable_hw(drvdata);
42
43         CS_LOCK(drvdata->base);
44         return rc;
45 }
46
47 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
48 {
49         int rc = coresight_claim_device(drvdata->csdev);
50
51         if (rc)
52                 return rc;
53
54         rc = __tmc_etb_enable_hw(drvdata);
55         if (rc)
56                 coresight_disclaim_device(drvdata->csdev);
57         return rc;
58 }
59
60 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
61 {
62         char *bufp;
63         u32 read_data, lost;
64
65         /* Check if the buffer wrapped around. */
66         lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
67         bufp = drvdata->buf;
68         drvdata->len = 0;
69         while (1) {
70                 read_data = readl_relaxed(drvdata->base + TMC_RRD);
71                 if (read_data == 0xFFFFFFFF)
72                         break;
73                 memcpy(bufp, &read_data, 4);
74                 bufp += 4;
75                 drvdata->len += 4;
76         }
77
78         if (lost)
79                 coresight_insert_barrier_packet(drvdata->buf);
80         return;
81 }
82
83 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
84 {
85         CS_UNLOCK(drvdata->base);
86
87         tmc_flush_and_stop(drvdata);
88         /*
89          * When operating in sysFS mode the content of the buffer needs to be
90          * read before the TMC is disabled.
91          */
92         if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
93                 tmc_etb_dump_hw(drvdata);
94         tmc_disable_hw(drvdata);
95
96         CS_LOCK(drvdata->base);
97 }
98
99 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
100 {
101         __tmc_etb_disable_hw(drvdata);
102         coresight_disclaim_device(drvdata->csdev);
103 }
104
105 static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
106 {
107         int rc = 0;
108
109         CS_UNLOCK(drvdata->base);
110
111         /* Wait for TMCSReady bit to be set */
112         rc = tmc_wait_for_tmcready(drvdata);
113         if (rc) {
114                 dev_err(&drvdata->csdev->dev,
115                         "Failed to enable : TMC is not ready\n");
116                 CS_LOCK(drvdata->base);
117                 return rc;
118         }
119
120         writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
121         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
122                        drvdata->base + TMC_FFCR);
123         writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
124         tmc_enable_hw(drvdata);
125
126         CS_LOCK(drvdata->base);
127         return rc;
128 }
129
130 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
131 {
132         int rc = coresight_claim_device(drvdata->csdev);
133
134         if (rc)
135                 return rc;
136
137         rc = __tmc_etf_enable_hw(drvdata);
138         if (rc)
139                 coresight_disclaim_device(drvdata->csdev);
140         return rc;
141 }
142
143 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
144 {
145         struct coresight_device *csdev = drvdata->csdev;
146
147         CS_UNLOCK(drvdata->base);
148
149         tmc_flush_and_stop(drvdata);
150         tmc_disable_hw(drvdata);
151         coresight_disclaim_device_unlocked(csdev);
152         CS_LOCK(drvdata->base);
153 }
154
155 /*
156  * Return the available trace data in the buffer from @pos, with
157  * a maximum limit of @len, updating the @bufpp on where to
158  * find it.
159  */
160 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
161                                 loff_t pos, size_t len, char **bufpp)
162 {
163         ssize_t actual = len;
164
165         /* Adjust the len to available size @pos */
166         if (pos + actual > drvdata->len)
167                 actual = drvdata->len - pos;
168         if (actual > 0)
169                 *bufpp = drvdata->buf + pos;
170         return actual;
171 }
172
173 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
174 {
175         int ret = 0;
176         bool used = false;
177         char *buf = NULL;
178         unsigned long flags;
179         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
180
181         /*
182          * If we don't have a buffer release the lock and allocate memory.
183          * Otherwise keep the lock and move along.
184          */
185         spin_lock_irqsave(&drvdata->spinlock, flags);
186         if (!drvdata->buf) {
187                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
188
189                 /* Allocating the memory here while outside of the spinlock */
190                 buf = kzalloc(drvdata->size, GFP_KERNEL);
191                 if (!buf)
192                         return -ENOMEM;
193
194                 /* Let's try again */
195                 spin_lock_irqsave(&drvdata->spinlock, flags);
196         }
197
198         if (drvdata->reading) {
199                 ret = -EBUSY;
200                 goto out;
201         }
202
203         /*
204          * In sysFS mode we can have multiple writers per sink.  Since this
205          * sink is already enabled no memory is needed and the HW need not be
206          * touched.
207          */
208         if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
209                 csdev->refcnt++;
210                 goto out;
211         }
212
213         /*
214          * If drvdata::buf isn't NULL, memory was allocated for a previous
215          * trace run but wasn't read.  If so simply zero-out the memory.
216          * Otherwise use the memory allocated above.
217          *
218          * The memory is freed when users read the buffer using the
219          * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
220          * details.
221          */
222         if (drvdata->buf) {
223                 memset(drvdata->buf, 0, drvdata->size);
224         } else {
225                 used = true;
226                 drvdata->buf = buf;
227         }
228
229         ret = tmc_etb_enable_hw(drvdata);
230         if (!ret) {
231                 coresight_set_mode(csdev, CS_MODE_SYSFS);
232                 csdev->refcnt++;
233         } else {
234                 /* Free up the buffer if we failed to enable */
235                 used = false;
236         }
237 out:
238         spin_unlock_irqrestore(&drvdata->spinlock, flags);
239
240         /* Free memory outside the spinlock if need be */
241         if (!used)
242                 kfree(buf);
243
244         return ret;
245 }
246
247 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
248 {
249         int ret = 0;
250         pid_t pid;
251         unsigned long flags;
252         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
253         struct perf_output_handle *handle = data;
254         struct cs_buffers *buf = etm_perf_sink_config(handle);
255
256         spin_lock_irqsave(&drvdata->spinlock, flags);
257         do {
258                 ret = -EINVAL;
259                 if (drvdata->reading)
260                         break;
261                 /*
262                  * No need to continue if the ETB/ETF is already operated
263                  * from sysFS.
264                  */
265                 if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
266                         ret = -EBUSY;
267                         break;
268                 }
269
270                 /* Get a handle on the pid of the process to monitor */
271                 pid = buf->pid;
272
273                 if (drvdata->pid != -1 && drvdata->pid != pid) {
274                         ret = -EBUSY;
275                         break;
276                 }
277
278                 ret = tmc_set_etf_buffer(csdev, handle);
279                 if (ret)
280                         break;
281
282                 /*
283                  * No HW configuration is needed if the sink is already in
284                  * use for this session.
285                  */
286                 if (drvdata->pid == pid) {
287                         csdev->refcnt++;
288                         break;
289                 }
290
291                 ret  = tmc_etb_enable_hw(drvdata);
292                 if (!ret) {
293                         /* Associate with monitored process. */
294                         drvdata->pid = pid;
295                         coresight_set_mode(csdev, CS_MODE_PERF);
296                         csdev->refcnt++;
297                 }
298         } while (0);
299         spin_unlock_irqrestore(&drvdata->spinlock, flags);
300
301         return ret;
302 }
303
304 static int tmc_enable_etf_sink(struct coresight_device *csdev,
305                                enum cs_mode mode, void *data)
306 {
307         int ret;
308
309         switch (mode) {
310         case CS_MODE_SYSFS:
311                 ret = tmc_enable_etf_sink_sysfs(csdev);
312                 break;
313         case CS_MODE_PERF:
314                 ret = tmc_enable_etf_sink_perf(csdev, data);
315                 break;
316         /* We shouldn't be here */
317         default:
318                 ret = -EINVAL;
319                 break;
320         }
321
322         if (ret)
323                 return ret;
324
325         dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
326         return 0;
327 }
328
329 static int tmc_disable_etf_sink(struct coresight_device *csdev)
330 {
331         unsigned long flags;
332         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
333
334         spin_lock_irqsave(&drvdata->spinlock, flags);
335
336         if (drvdata->reading) {
337                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
338                 return -EBUSY;
339         }
340
341         csdev->refcnt--;
342         if (csdev->refcnt) {
343                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
344                 return -EBUSY;
345         }
346
347         /* Complain if we (somehow) got out of sync */
348         WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
349         tmc_etb_disable_hw(drvdata);
350         /* Dissociate from monitored process. */
351         drvdata->pid = -1;
352         coresight_set_mode(csdev, CS_MODE_DISABLED);
353
354         spin_unlock_irqrestore(&drvdata->spinlock, flags);
355
356         dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
357         return 0;
358 }
359
360 static int tmc_enable_etf_link(struct coresight_device *csdev,
361                                struct coresight_connection *in,
362                                struct coresight_connection *out)
363 {
364         int ret = 0;
365         unsigned long flags;
366         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
367         bool first_enable = false;
368
369         spin_lock_irqsave(&drvdata->spinlock, flags);
370         if (drvdata->reading) {
371                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
372                 return -EBUSY;
373         }
374
375         if (csdev->refcnt == 0) {
376                 ret = tmc_etf_enable_hw(drvdata);
377                 if (!ret) {
378                         coresight_set_mode(csdev, CS_MODE_SYSFS);
379                         first_enable = true;
380                 }
381         }
382         if (!ret)
383                 csdev->refcnt++;
384         spin_unlock_irqrestore(&drvdata->spinlock, flags);
385
386         if (first_enable)
387                 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
388         return ret;
389 }
390
391 static void tmc_disable_etf_link(struct coresight_device *csdev,
392                                  struct coresight_connection *in,
393                                  struct coresight_connection *out)
394 {
395         unsigned long flags;
396         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
397         bool last_disable = false;
398
399         spin_lock_irqsave(&drvdata->spinlock, flags);
400         if (drvdata->reading) {
401                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
402                 return;
403         }
404
405         csdev->refcnt--;
406         if (csdev->refcnt == 0) {
407                 tmc_etf_disable_hw(drvdata);
408                 coresight_set_mode(csdev, CS_MODE_DISABLED);
409                 last_disable = true;
410         }
411         spin_unlock_irqrestore(&drvdata->spinlock, flags);
412
413         if (last_disable)
414                 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
415 }
416
417 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
418                                   struct perf_event *event, void **pages,
419                                   int nr_pages, bool overwrite)
420 {
421         int node;
422         struct cs_buffers *buf;
423
424         node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
425
426         /* Allocate memory structure for interaction with Perf */
427         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
428         if (!buf)
429                 return NULL;
430
431         buf->pid = task_pid_nr(event->owner);
432         buf->snapshot = overwrite;
433         buf->nr_pages = nr_pages;
434         buf->data_pages = pages;
435
436         return buf;
437 }
438
439 static void tmc_free_etf_buffer(void *config)
440 {
441         struct cs_buffers *buf = config;
442
443         kfree(buf);
444 }
445
446 static int tmc_set_etf_buffer(struct coresight_device *csdev,
447                               struct perf_output_handle *handle)
448 {
449         int ret = 0;
450         unsigned long head;
451         struct cs_buffers *buf = etm_perf_sink_config(handle);
452
453         if (!buf)
454                 return -EINVAL;
455
456         /* wrap head around to the amount of space we have */
457         head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
458
459         /* find the page to write to */
460         buf->cur = head / PAGE_SIZE;
461
462         /* and offset within that page */
463         buf->offset = head % PAGE_SIZE;
464
465         local_set(&buf->data_size, 0);
466
467         return ret;
468 }
469
470 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
471                                   struct perf_output_handle *handle,
472                                   void *sink_config)
473 {
474         bool lost = false;
475         int i, cur;
476         const u32 *barrier;
477         u32 *buf_ptr;
478         u64 read_ptr, write_ptr;
479         u32 status;
480         unsigned long offset, to_read = 0, flags;
481         struct cs_buffers *buf = sink_config;
482         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
483
484         if (!buf)
485                 return 0;
486
487         /* This shouldn't happen */
488         if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF))
489                 return 0;
490
491         spin_lock_irqsave(&drvdata->spinlock, flags);
492
493         /* Don't do anything if another tracer is using this sink */
494         if (csdev->refcnt != 1)
495                 goto out;
496
497         CS_UNLOCK(drvdata->base);
498
499         tmc_flush_and_stop(drvdata);
500
501         read_ptr = tmc_read_rrp(drvdata);
502         write_ptr = tmc_read_rwp(drvdata);
503
504         /*
505          * Get a hold of the status register and see if a wrap around
506          * has occurred.  If so adjust things accordingly.
507          */
508         status = readl_relaxed(drvdata->base + TMC_STS);
509         if (status & TMC_STS_FULL) {
510                 lost = true;
511                 to_read = drvdata->size;
512         } else {
513                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
514         }
515
516         /*
517          * The TMC RAM buffer may be bigger than the space available in the
518          * perf ring buffer (handle->size).  If so advance the RRP so that we
519          * get the latest trace data.  In snapshot mode none of that matters
520          * since we are expected to clobber stale data in favour of the latest
521          * traces.
522          */
523         if (!buf->snapshot && to_read > handle->size) {
524                 u32 mask = tmc_get_memwidth_mask(drvdata);
525
526                 /*
527                  * Make sure the new size is aligned in accordance with the
528                  * requirement explained in function tmc_get_memwidth_mask().
529                  */
530                 to_read = handle->size & mask;
531                 /* Move the RAM read pointer up */
532                 read_ptr = (write_ptr + drvdata->size) - to_read;
533                 /* Make sure we are still within our limits */
534                 if (read_ptr > (drvdata->size - 1))
535                         read_ptr -= drvdata->size;
536                 /* Tell the HW */
537                 tmc_write_rrp(drvdata, read_ptr);
538                 lost = true;
539         }
540
541         /*
542          * Don't set the TRUNCATED flag in snapshot mode because 1) the
543          * captured buffer is expected to be truncated and 2) a full buffer
544          * prevents the event from being re-enabled by the perf core,
545          * resulting in stale data being send to user space.
546          */
547         if (!buf->snapshot && lost)
548                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
549
550         cur = buf->cur;
551         offset = buf->offset;
552         barrier = coresight_barrier_pkt;
553
554         /* for every byte to read */
555         for (i = 0; i < to_read; i += 4) {
556                 buf_ptr = buf->data_pages[cur] + offset;
557                 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
558
559                 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
560                         *buf_ptr = *barrier;
561                         barrier++;
562                 }
563
564                 offset += 4;
565                 if (offset >= PAGE_SIZE) {
566                         offset = 0;
567                         cur++;
568                         /* wrap around at the end of the buffer */
569                         cur &= buf->nr_pages - 1;
570                 }
571         }
572
573         /*
574          * In snapshot mode we simply increment the head by the number of byte
575          * that were written.  User space will figure out how many bytes to get
576          * from the AUX buffer based on the position of the head.
577          */
578         if (buf->snapshot)
579                 handle->head += to_read;
580
581         /*
582          * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
583          * data before the aux_head is updated via perf_aux_output_end(), which
584          * is expected by the perf ring buffer.
585          */
586         CS_LOCK(drvdata->base);
587 out:
588         spin_unlock_irqrestore(&drvdata->spinlock, flags);
589
590         return to_read;
591 }
592
593 static const struct coresight_ops_sink tmc_etf_sink_ops = {
594         .enable         = tmc_enable_etf_sink,
595         .disable        = tmc_disable_etf_sink,
596         .alloc_buffer   = tmc_alloc_etf_buffer,
597         .free_buffer    = tmc_free_etf_buffer,
598         .update_buffer  = tmc_update_etf_buffer,
599 };
600
601 static const struct coresight_ops_link tmc_etf_link_ops = {
602         .enable         = tmc_enable_etf_link,
603         .disable        = tmc_disable_etf_link,
604 };
605
606 const struct coresight_ops tmc_etb_cs_ops = {
607         .sink_ops       = &tmc_etf_sink_ops,
608 };
609
610 const struct coresight_ops tmc_etf_cs_ops = {
611         .sink_ops       = &tmc_etf_sink_ops,
612         .link_ops       = &tmc_etf_link_ops,
613 };
614
615 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
616 {
617         enum tmc_mode mode;
618         int ret = 0;
619         unsigned long flags;
620
621         /* config types are set a boot time and never change */
622         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
623                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
624                 return -EINVAL;
625
626         spin_lock_irqsave(&drvdata->spinlock, flags);
627
628         if (drvdata->reading) {
629                 ret = -EBUSY;
630                 goto out;
631         }
632
633         /* Don't interfere if operated from Perf */
634         if (coresight_get_mode(drvdata->csdev) == CS_MODE_PERF) {
635                 ret = -EINVAL;
636                 goto out;
637         }
638
639         /* If drvdata::buf is NULL the trace data has been read already */
640         if (drvdata->buf == NULL) {
641                 ret = -EINVAL;
642                 goto out;
643         }
644
645         /* Disable the TMC if need be */
646         if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
647                 /* There is no point in reading a TMC in HW FIFO mode */
648                 mode = readl_relaxed(drvdata->base + TMC_MODE);
649                 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
650                         ret = -EINVAL;
651                         goto out;
652                 }
653                 __tmc_etb_disable_hw(drvdata);
654         }
655
656         drvdata->reading = true;
657 out:
658         spin_unlock_irqrestore(&drvdata->spinlock, flags);
659
660         return ret;
661 }
662
663 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
664 {
665         char *buf = NULL;
666         enum tmc_mode mode;
667         unsigned long flags;
668         int rc = 0;
669
670         /* config types are set a boot time and never change */
671         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
672                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
673                 return -EINVAL;
674
675         spin_lock_irqsave(&drvdata->spinlock, flags);
676
677         /* Re-enable the TMC if need be */
678         if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
679                 /* There is no point in reading a TMC in HW FIFO mode */
680                 mode = readl_relaxed(drvdata->base + TMC_MODE);
681                 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
682                         spin_unlock_irqrestore(&drvdata->spinlock, flags);
683                         return -EINVAL;
684                 }
685                 /*
686                  * The trace run will continue with the same allocated trace
687                  * buffer. As such zero-out the buffer so that we don't end
688                  * up with stale data.
689                  *
690                  * Since the tracer is still enabled drvdata::buf
691                  * can't be NULL.
692                  */
693                 memset(drvdata->buf, 0, drvdata->size);
694                 rc = __tmc_etb_enable_hw(drvdata);
695                 if (rc) {
696                         spin_unlock_irqrestore(&drvdata->spinlock, flags);
697                         return rc;
698                 }
699         } else {
700                 /*
701                  * The ETB/ETF is not tracing and the buffer was just read.
702                  * As such prepare to free the trace buffer.
703                  */
704                 buf = drvdata->buf;
705                 drvdata->buf = NULL;
706         }
707
708         drvdata->reading = false;
709         spin_unlock_irqrestore(&drvdata->spinlock, flags);
710
711         /*
712          * Free allocated memory outside of the spinlock.  There is no need
713          * to assert the validity of 'buf' since calling kfree(NULL) is safe.
714          */
715         kfree(buf);
716
717         return 0;
718 }
This page took 0.071179 seconds and 4 git commands to generate.