]> Git Repo - linux.git/blob - kernel/bpf/bpf_lru_list.c
lib: radix-tree: check accounting of existing slot replacement users
[linux.git] / kernel / bpf / bpf_lru_list.c
1 /* Copyright (c) 2016 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/cpumask.h>
8 #include <linux/spinlock.h>
9 #include <linux/percpu.h>
10
11 #include "bpf_lru_list.h"
12
13 #define LOCAL_FREE_TARGET               (128)
14 #define LOCAL_NR_SCANS                  LOCAL_FREE_TARGET
15
16 #define PERCPU_FREE_TARGET              (16)
17 #define PERCPU_NR_SCANS                 PERCPU_FREE_TARGET
18
19 /* Helpers to get the local list index */
20 #define LOCAL_LIST_IDX(t)       ((t) - BPF_LOCAL_LIST_T_OFFSET)
21 #define LOCAL_FREE_LIST_IDX     LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
22 #define LOCAL_PENDING_LIST_IDX  LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
23 #define IS_LOCAL_LIST_TYPE(t)   ((t) >= BPF_LOCAL_LIST_T_OFFSET)
24
25 static int get_next_cpu(int cpu)
26 {
27         cpu = cpumask_next(cpu, cpu_possible_mask);
28         if (cpu >= nr_cpu_ids)
29                 cpu = cpumask_first(cpu_possible_mask);
30         return cpu;
31 }
32
33 /* Local list helpers */
34 static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
35 {
36         return &loc_l->lists[LOCAL_FREE_LIST_IDX];
37 }
38
39 static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
40 {
41         return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
42 }
43
44 /* bpf_lru_node helpers */
45 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
46 {
47         return node->ref;
48 }
49
50 static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
51                                    enum bpf_lru_list_type type)
52 {
53         if (type < NR_BPF_LRU_LIST_COUNT)
54                 l->counts[type]++;
55 }
56
57 static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
58                                    enum bpf_lru_list_type type)
59 {
60         if (type < NR_BPF_LRU_LIST_COUNT)
61                 l->counts[type]--;
62 }
63
64 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
65                                         struct bpf_lru_node *node,
66                                         struct list_head *free_list,
67                                         enum bpf_lru_list_type tgt_free_type)
68 {
69         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
70                 return;
71
72         /* If the removing node is the next_inactive_rotation candidate,
73          * move the next_inactive_rotation pointer also.
74          */
75         if (&node->list == l->next_inactive_rotation)
76                 l->next_inactive_rotation = l->next_inactive_rotation->prev;
77
78         bpf_lru_list_count_dec(l, node->type);
79
80         node->type = tgt_free_type;
81         list_move(&node->list, free_list);
82 }
83
84 /* Move nodes from local list to the LRU list */
85 static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
86                                    struct bpf_lru_node *node,
87                                    enum bpf_lru_list_type tgt_type)
88 {
89         if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
90             WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
91                 return;
92
93         bpf_lru_list_count_inc(l, tgt_type);
94         node->type = tgt_type;
95         node->ref = 0;
96         list_move(&node->list, &l->lists[tgt_type]);
97 }
98
99 /* Move nodes between or within active and inactive list (like
100  * active to inactive, inactive to active or tail of active back to
101  * the head of active).
102  */
103 static void __bpf_lru_node_move(struct bpf_lru_list *l,
104                                 struct bpf_lru_node *node,
105                                 enum bpf_lru_list_type tgt_type)
106 {
107         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
108             WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
109                 return;
110
111         if (node->type != tgt_type) {
112                 bpf_lru_list_count_dec(l, node->type);
113                 bpf_lru_list_count_inc(l, tgt_type);
114                 node->type = tgt_type;
115         }
116         node->ref = 0;
117
118         /* If the moving node is the next_inactive_rotation candidate,
119          * move the next_inactive_rotation pointer also.
120          */
121         if (&node->list == l->next_inactive_rotation)
122                 l->next_inactive_rotation = l->next_inactive_rotation->prev;
123
124         list_move(&node->list, &l->lists[tgt_type]);
125 }
126
127 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
128 {
129         return l->counts[BPF_LRU_LIST_T_INACTIVE] <
130                 l->counts[BPF_LRU_LIST_T_ACTIVE];
131 }
132
133 /* Rotate the active list:
134  * 1. Start from tail
135  * 2. If the node has the ref bit set, it will be rotated
136  *    back to the head of active list with the ref bit cleared.
137  *    Give this node one more chance to survive in the active list.
138  * 3. If the ref bit is not set, move it to the head of the
139  *    inactive list.
140  * 4. It will at most scan nr_scans nodes
141  */
142 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
143                                          struct bpf_lru_list *l)
144 {
145         struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
146         struct bpf_lru_node *node, *tmp_node, *first_node;
147         unsigned int i = 0;
148
149         first_node = list_first_entry(active, struct bpf_lru_node, list);
150         list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
151                 if (bpf_lru_node_is_ref(node))
152                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
153                 else
154                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
155
156                 if (++i == lru->nr_scans || node == first_node)
157                         break;
158         }
159 }
160
161 /* Rotate the inactive list.  It starts from the next_inactive_rotation
162  * 1. If the node has ref bit set, it will be moved to the head
163  *    of active list with the ref bit cleared.
164  * 2. If the node does not have ref bit set, it will leave it
165  *    at its current location (i.e. do nothing) so that it can
166  *    be considered during the next inactive_shrink.
167  * 3. It will at most scan nr_scans nodes
168  */
169 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
170                                            struct bpf_lru_list *l)
171 {
172         struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
173         struct list_head *cur, *last, *next = inactive;
174         struct bpf_lru_node *node;
175         unsigned int i = 0;
176
177         if (list_empty(inactive))
178                 return;
179
180         last = l->next_inactive_rotation->next;
181         if (last == inactive)
182                 last = last->next;
183
184         cur = l->next_inactive_rotation;
185         while (i < lru->nr_scans) {
186                 if (cur == inactive) {
187                         cur = cur->prev;
188                         continue;
189                 }
190
191                 node = list_entry(cur, struct bpf_lru_node, list);
192                 next = cur->prev;
193                 if (bpf_lru_node_is_ref(node))
194                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
195                 if (cur == last)
196                         break;
197                 cur = next;
198                 i++;
199         }
200
201         l->next_inactive_rotation = next;
202 }
203
204 /* Shrink the inactive list.  It starts from the tail of the
205  * inactive list and only move the nodes without the ref bit
206  * set to the designated free list.
207  */
208 static unsigned int
209 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
210                                struct bpf_lru_list *l,
211                                unsigned int tgt_nshrink,
212                                struct list_head *free_list,
213                                enum bpf_lru_list_type tgt_free_type)
214 {
215         struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
216         struct bpf_lru_node *node, *tmp_node, *first_node;
217         unsigned int nshrinked = 0;
218         unsigned int i = 0;
219
220         first_node = list_first_entry(inactive, struct bpf_lru_node, list);
221         list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
222                 if (bpf_lru_node_is_ref(node)) {
223                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
224                 } else if (lru->del_from_htab(lru->del_arg, node)) {
225                         __bpf_lru_node_move_to_free(l, node, free_list,
226                                                     tgt_free_type);
227                         if (++nshrinked == tgt_nshrink)
228                                 break;
229                 }
230
231                 if (++i == lru->nr_scans)
232                         break;
233         }
234
235         return nshrinked;
236 }
237
238 /* 1. Rotate the active list (if needed)
239  * 2. Always rotate the inactive list
240  */
241 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
242 {
243         if (bpf_lru_list_inactive_low(l))
244                 __bpf_lru_list_rotate_active(lru, l);
245
246         __bpf_lru_list_rotate_inactive(lru, l);
247 }
248
249 /* Calls __bpf_lru_list_shrink_inactive() to shrink some
250  * ref-bit-cleared nodes and move them to the designated
251  * free list.
252  *
253  * If it cannot get a free node after calling
254  * __bpf_lru_list_shrink_inactive().  It will just remove
255  * one node from either inactive or active list without
256  * honoring the ref-bit.  It prefers inactive list to active
257  * list in this situation.
258  */
259 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
260                                           struct bpf_lru_list *l,
261                                           unsigned int tgt_nshrink,
262                                           struct list_head *free_list,
263                                           enum bpf_lru_list_type tgt_free_type)
264
265 {
266         struct bpf_lru_node *node, *tmp_node;
267         struct list_head *force_shrink_list;
268         unsigned int nshrinked;
269
270         nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
271                                                    free_list, tgt_free_type);
272         if (nshrinked)
273                 return nshrinked;
274
275         /* Do a force shrink by ignoring the reference bit */
276         if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
277                 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
278         else
279                 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
280
281         list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
282                                          list) {
283                 if (lru->del_from_htab(lru->del_arg, node)) {
284                         __bpf_lru_node_move_to_free(l, node, free_list,
285                                                     tgt_free_type);
286                         return 1;
287                 }
288         }
289
290         return 0;
291 }
292
293 /* Flush the nodes from the local pending list to the LRU list */
294 static void __local_list_flush(struct bpf_lru_list *l,
295                                struct bpf_lru_locallist *loc_l)
296 {
297         struct bpf_lru_node *node, *tmp_node;
298
299         list_for_each_entry_safe_reverse(node, tmp_node,
300                                          local_pending_list(loc_l), list) {
301                 if (bpf_lru_node_is_ref(node))
302                         __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
303                 else
304                         __bpf_lru_node_move_in(l, node,
305                                                BPF_LRU_LIST_T_INACTIVE);
306         }
307 }
308
309 static void bpf_lru_list_push_free(struct bpf_lru_list *l,
310                                    struct bpf_lru_node *node)
311 {
312         unsigned long flags;
313
314         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
315                 return;
316
317         raw_spin_lock_irqsave(&l->lock, flags);
318         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
319         raw_spin_unlock_irqrestore(&l->lock, flags);
320 }
321
322 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
323                                            struct bpf_lru_locallist *loc_l)
324 {
325         struct bpf_lru_list *l = &lru->common_lru.lru_list;
326         struct bpf_lru_node *node, *tmp_node;
327         unsigned int nfree = 0;
328
329         raw_spin_lock(&l->lock);
330
331         __local_list_flush(l, loc_l);
332
333         __bpf_lru_list_rotate(lru, l);
334
335         list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
336                                  list) {
337                 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
338                                             BPF_LRU_LOCAL_LIST_T_FREE);
339                 if (++nfree == LOCAL_FREE_TARGET)
340                         break;
341         }
342
343         if (nfree < LOCAL_FREE_TARGET)
344                 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
345                                       local_free_list(loc_l),
346                                       BPF_LRU_LOCAL_LIST_T_FREE);
347
348         raw_spin_unlock(&l->lock);
349 }
350
351 static void __local_list_add_pending(struct bpf_lru *lru,
352                                      struct bpf_lru_locallist *loc_l,
353                                      int cpu,
354                                      struct bpf_lru_node *node,
355                                      u32 hash)
356 {
357         *(u32 *)((void *)node + lru->hash_offset) = hash;
358         node->cpu = cpu;
359         node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
360         node->ref = 0;
361         list_add(&node->list, local_pending_list(loc_l));
362 }
363
364 struct bpf_lru_node *__local_list_pop_free(struct bpf_lru_locallist *loc_l)
365 {
366         struct bpf_lru_node *node;
367
368         node = list_first_entry_or_null(local_free_list(loc_l),
369                                         struct bpf_lru_node,
370                                         list);
371         if (node)
372                 list_del(&node->list);
373
374         return node;
375 }
376
377 struct bpf_lru_node *__local_list_pop_pending(struct bpf_lru *lru,
378                                               struct bpf_lru_locallist *loc_l)
379 {
380         struct bpf_lru_node *node;
381         bool force = false;
382
383 ignore_ref:
384         /* Get from the tail (i.e. older element) of the pending list. */
385         list_for_each_entry_reverse(node, local_pending_list(loc_l),
386                                     list) {
387                 if ((!bpf_lru_node_is_ref(node) || force) &&
388                     lru->del_from_htab(lru->del_arg, node)) {
389                         list_del(&node->list);
390                         return node;
391                 }
392         }
393
394         if (!force) {
395                 force = true;
396                 goto ignore_ref;
397         }
398
399         return NULL;
400 }
401
402 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
403                                                     u32 hash)
404 {
405         struct list_head *free_list;
406         struct bpf_lru_node *node = NULL;
407         struct bpf_lru_list *l;
408         unsigned long flags;
409         int cpu = raw_smp_processor_id();
410
411         l = per_cpu_ptr(lru->percpu_lru, cpu);
412
413         raw_spin_lock_irqsave(&l->lock, flags);
414
415         __bpf_lru_list_rotate(lru, l);
416
417         free_list = &l->lists[BPF_LRU_LIST_T_FREE];
418         if (list_empty(free_list))
419                 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
420                                       BPF_LRU_LIST_T_FREE);
421
422         if (!list_empty(free_list)) {
423                 node = list_first_entry(free_list, struct bpf_lru_node, list);
424                 *(u32 *)((void *)node + lru->hash_offset) = hash;
425                 node->ref = 0;
426                 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
427         }
428
429         raw_spin_unlock_irqrestore(&l->lock, flags);
430
431         return node;
432 }
433
434 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
435                                                     u32 hash)
436 {
437         struct bpf_lru_locallist *loc_l, *steal_loc_l;
438         struct bpf_common_lru *clru = &lru->common_lru;
439         struct bpf_lru_node *node;
440         int steal, first_steal;
441         unsigned long flags;
442         int cpu = raw_smp_processor_id();
443
444         loc_l = per_cpu_ptr(clru->local_list, cpu);
445
446         raw_spin_lock_irqsave(&loc_l->lock, flags);
447
448         node = __local_list_pop_free(loc_l);
449         if (!node) {
450                 bpf_lru_list_pop_free_to_local(lru, loc_l);
451                 node = __local_list_pop_free(loc_l);
452         }
453
454         if (node)
455                 __local_list_add_pending(lru, loc_l, cpu, node, hash);
456
457         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
458
459         if (node)
460                 return node;
461
462         /* No free nodes found from the local free list and
463          * the global LRU list.
464          *
465          * Steal from the local free/pending list of the
466          * current CPU and remote CPU in RR.  It starts
467          * with the loc_l->next_steal CPU.
468          */
469
470         first_steal = loc_l->next_steal;
471         steal = first_steal;
472         do {
473                 steal_loc_l = per_cpu_ptr(clru->local_list, steal);
474
475                 raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
476
477                 node = __local_list_pop_free(steal_loc_l);
478                 if (!node)
479                         node = __local_list_pop_pending(lru, steal_loc_l);
480
481                 raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
482
483                 steal = get_next_cpu(steal);
484         } while (!node && steal != first_steal);
485
486         loc_l->next_steal = steal;
487
488         if (node) {
489                 raw_spin_lock_irqsave(&loc_l->lock, flags);
490                 __local_list_add_pending(lru, loc_l, cpu, node, hash);
491                 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
492         }
493
494         return node;
495 }
496
497 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
498 {
499         if (lru->percpu)
500                 return bpf_percpu_lru_pop_free(lru, hash);
501         else
502                 return bpf_common_lru_pop_free(lru, hash);
503 }
504
505 static void bpf_common_lru_push_free(struct bpf_lru *lru,
506                                      struct bpf_lru_node *node)
507 {
508         unsigned long flags;
509
510         if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
511             WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
512                 return;
513
514         if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
515                 struct bpf_lru_locallist *loc_l;
516
517                 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
518
519                 raw_spin_lock_irqsave(&loc_l->lock, flags);
520
521                 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
522                         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
523                         goto check_lru_list;
524                 }
525
526                 node->type = BPF_LRU_LOCAL_LIST_T_FREE;
527                 node->ref = 0;
528                 list_move(&node->list, local_free_list(loc_l));
529
530                 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
531                 return;
532         }
533
534 check_lru_list:
535         bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
536 }
537
538 static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
539                                      struct bpf_lru_node *node)
540 {
541         struct bpf_lru_list *l;
542         unsigned long flags;
543
544         l = per_cpu_ptr(lru->percpu_lru, node->cpu);
545
546         raw_spin_lock_irqsave(&l->lock, flags);
547
548         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
549
550         raw_spin_unlock_irqrestore(&l->lock, flags);
551 }
552
553 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
554 {
555         if (lru->percpu)
556                 bpf_percpu_lru_push_free(lru, node);
557         else
558                 bpf_common_lru_push_free(lru, node);
559 }
560
561 void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
562                              u32 elem_size, u32 nr_elems)
563 {
564         struct bpf_lru_list *l = &lru->common_lru.lru_list;
565         u32 i;
566
567         for (i = 0; i < nr_elems; i++) {
568                 struct bpf_lru_node *node;
569
570                 node = (struct bpf_lru_node *)(buf + node_offset);
571                 node->type = BPF_LRU_LIST_T_FREE;
572                 node->ref = 0;
573                 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
574                 buf += elem_size;
575         }
576 }
577
578 void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
579                              u32 elem_size, u32 nr_elems)
580 {
581         u32 i, pcpu_entries;
582         int cpu;
583         struct bpf_lru_list *l;
584
585         pcpu_entries = nr_elems / num_possible_cpus();
586
587         i = 0;
588
589         for_each_possible_cpu(cpu) {
590                 struct bpf_lru_node *node;
591
592                 l = per_cpu_ptr(lru->percpu_lru, cpu);
593 again:
594                 node = (struct bpf_lru_node *)(buf + node_offset);
595                 node->cpu = cpu;
596                 node->type = BPF_LRU_LIST_T_FREE;
597                 node->ref = 0;
598                 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
599                 i++;
600                 buf += elem_size;
601                 if (i == nr_elems)
602                         break;
603                 if (i % pcpu_entries)
604                         goto again;
605         }
606 }
607
608 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
609                       u32 elem_size, u32 nr_elems)
610 {
611         if (lru->percpu)
612                 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
613                                         nr_elems);
614         else
615                 bpf_common_lru_populate(lru, buf, node_offset, elem_size,
616                                         nr_elems);
617 }
618
619 static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
620 {
621         int i;
622
623         for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
624                 INIT_LIST_HEAD(&loc_l->lists[i]);
625
626         loc_l->next_steal = cpu;
627
628         raw_spin_lock_init(&loc_l->lock);
629 }
630
631 static void bpf_lru_list_init(struct bpf_lru_list *l)
632 {
633         int i;
634
635         for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
636                 INIT_LIST_HEAD(&l->lists[i]);
637
638         for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
639                 l->counts[i] = 0;
640
641         l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
642
643         raw_spin_lock_init(&l->lock);
644 }
645
646 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
647                  del_from_htab_func del_from_htab, void *del_arg)
648 {
649         int cpu;
650
651         if (percpu) {
652                 lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
653                 if (!lru->percpu_lru)
654                         return -ENOMEM;
655
656                 for_each_possible_cpu(cpu) {
657                         struct bpf_lru_list *l;
658
659                         l = per_cpu_ptr(lru->percpu_lru, cpu);
660                         bpf_lru_list_init(l);
661                 }
662                 lru->nr_scans = PERCPU_NR_SCANS;
663         } else {
664                 struct bpf_common_lru *clru = &lru->common_lru;
665
666                 clru->local_list = alloc_percpu(struct bpf_lru_locallist);
667                 if (!clru->local_list)
668                         return -ENOMEM;
669
670                 for_each_possible_cpu(cpu) {
671                         struct bpf_lru_locallist *loc_l;
672
673                         loc_l = per_cpu_ptr(clru->local_list, cpu);
674                         bpf_lru_locallist_init(loc_l, cpu);
675                 }
676
677                 bpf_lru_list_init(&clru->lru_list);
678                 lru->nr_scans = LOCAL_NR_SCANS;
679         }
680
681         lru->percpu = percpu;
682         lru->del_from_htab = del_from_htab;
683         lru->del_arg = del_arg;
684         lru->hash_offset = hash_offset;
685
686         return 0;
687 }
688
689 void bpf_lru_destroy(struct bpf_lru *lru)
690 {
691         if (lru->percpu)
692                 free_percpu(lru->percpu_lru);
693         else
694                 free_percpu(lru->common_lru.local_list);
695 }
This page took 0.06998 seconds and 4 git commands to generate.