1 /* SPDX-License-Identifier: GPL-2.0 */
5 #ifndef _LINUX_ALLOC_TAG_H
6 #define _LINUX_ALLOC_TAG_H
9 #include <linux/codetag.h>
10 #include <linux/container_of.h>
11 #include <linux/preempt.h>
12 #include <asm/percpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/static_key.h>
15 #include <linux/irqflags.h>
17 struct alloc_tag_counters {
23 * An instance of this structure is created in a special ELF section at every
24 * allocation callsite. At runtime, the special section is treated as
25 * an array of these. Embedded codetag utilizes codetag framework.
29 struct alloc_tag_counters __percpu *counters;
32 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
34 #define CODETAG_EMPTY ((void *)1)
36 static inline bool is_codetag_empty(union codetag_ref *ref)
38 return ref->ct == CODETAG_EMPTY;
41 static inline void set_codetag_empty(union codetag_ref *ref)
44 ref->ct = CODETAG_EMPTY;
47 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
49 static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
50 static inline void set_codetag_empty(union codetag_ref *ref) {}
52 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
54 #ifdef CONFIG_MEM_ALLOC_PROFILING
56 struct codetag_bytes {
61 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
63 static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
65 return container_of(ct, struct alloc_tag, ct);
68 #ifdef ARCH_NEEDS_WEAK_PER_CPU
70 * When percpu variables are required to be defined as weak, static percpu
71 * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
72 * Instead we will accound all module allocations to a single counter.
74 DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
76 #define DEFINE_ALLOC_TAG(_alloc_tag) \
77 static struct alloc_tag _alloc_tag __used __aligned(8) \
78 __section("alloc_tags") = { \
79 .ct = CODE_TAG_INIT, \
80 .counters = &_shared_alloc_tag };
82 #else /* ARCH_NEEDS_WEAK_PER_CPU */
84 #define DEFINE_ALLOC_TAG(_alloc_tag) \
85 static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
86 static struct alloc_tag _alloc_tag __used __aligned(8) \
87 __section("alloc_tags") = { \
88 .ct = CODE_TAG_INIT, \
89 .counters = &_alloc_tag_cntr };
91 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
93 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
94 mem_alloc_profiling_key);
96 static inline bool mem_alloc_profiling_enabled(void)
98 return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
99 &mem_alloc_profiling_key);
102 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
104 struct alloc_tag_counters v = { 0, 0 };
105 struct alloc_tag_counters *counter;
108 for_each_possible_cpu(cpu) {
109 counter = per_cpu_ptr(tag->counters, cpu);
110 v.bytes += counter->bytes;
111 v.calls += counter->calls;
117 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
118 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
120 WARN_ONCE(ref && ref->ct,
121 "alloc_tag was not cleared (got tag for %s:%u)\n",
122 ref->ct->filename, ref->ct->lineno);
124 WARN_ONCE(!tag, "current->alloc_tag not set");
127 static inline void alloc_tag_sub_check(union codetag_ref *ref)
129 WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
132 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
133 static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
136 /* Caller should verify both ref and tag to be valid */
137 static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
141 * We need in increment the call counter every time we have a new
142 * allocation or when we split a large allocation into smaller ones.
143 * Each new reference for every sub-allocation needs to increment call
144 * counter because when we free each part the counter will be decremented.
146 this_cpu_inc(tag->counters->calls);
149 static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
151 alloc_tag_add_check(ref, tag);
155 __alloc_tag_ref_set(ref, tag);
158 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
160 alloc_tag_add_check(ref, tag);
164 __alloc_tag_ref_set(ref, tag);
165 this_cpu_add(tag->counters->bytes, bytes);
168 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
170 struct alloc_tag *tag;
172 alloc_tag_sub_check(ref);
173 if (!ref || !ref->ct)
176 if (is_codetag_empty(ref)) {
181 tag = ct_to_alloc_tag(ref->ct);
183 this_cpu_sub(tag->counters->bytes, bytes);
184 this_cpu_dec(tag->counters->calls);
189 #define alloc_tag_record(p) ((p) = current->alloc_tag)
191 #else /* CONFIG_MEM_ALLOC_PROFILING */
193 #define DEFINE_ALLOC_TAG(_alloc_tag)
194 static inline bool mem_alloc_profiling_enabled(void) { return false; }
195 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
197 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
198 #define alloc_tag_record(p) do {} while (0)
200 #endif /* CONFIG_MEM_ALLOC_PROFILING */
202 #define alloc_hooks_tag(_tag, _do_alloc) \
204 struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \
205 typeof(_do_alloc) _res = _do_alloc; \
206 alloc_tag_restore(_tag, _old); \
210 #define alloc_hooks(_do_alloc) \
212 DEFINE_ALLOC_TAG(_alloc_tag); \
213 alloc_hooks_tag(&_alloc_tag, _do_alloc); \
216 #endif /* _LINUX_ALLOC_TAG_H */