]>
Commit | Line | Data |
---|---|---|
16d69265 | 1 | #include <linux/mm.h> |
30992c97 MM |
2 | #include <linux/slab.h> |
3 | #include <linux/string.h> | |
b95f1b31 | 4 | #include <linux/export.h> |
96840aa0 | 5 | #include <linux/err.h> |
3b8f14b4 | 6 | #include <linux/sched.h> |
96840aa0 | 7 | #include <asm/uaccess.h> |
30992c97 | 8 | |
6038def0 NK |
9 | #include "internal.h" |
10 | ||
a8d154b0 | 11 | #define CREATE_TRACE_POINTS |
ad8d75ff | 12 | #include <trace/events/kmem.h> |
a8d154b0 | 13 | |
30992c97 | 14 | /** |
30992c97 | 15 | * kstrdup - allocate space for and copy an existing string |
30992c97 MM |
16 | * @s: the string to duplicate |
17 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
18 | */ | |
19 | char *kstrdup(const char *s, gfp_t gfp) | |
20 | { | |
21 | size_t len; | |
22 | char *buf; | |
23 | ||
24 | if (!s) | |
25 | return NULL; | |
26 | ||
27 | len = strlen(s) + 1; | |
1d2c8eea | 28 | buf = kmalloc_track_caller(len, gfp); |
30992c97 MM |
29 | if (buf) |
30 | memcpy(buf, s, len); | |
31 | return buf; | |
32 | } | |
33 | EXPORT_SYMBOL(kstrdup); | |
96840aa0 | 34 | |
1e66df3e JF |
35 | /** |
36 | * kstrndup - allocate space for and copy an existing string | |
37 | * @s: the string to duplicate | |
38 | * @max: read at most @max chars from @s | |
39 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
40 | */ | |
41 | char *kstrndup(const char *s, size_t max, gfp_t gfp) | |
42 | { | |
43 | size_t len; | |
44 | char *buf; | |
45 | ||
46 | if (!s) | |
47 | return NULL; | |
48 | ||
49 | len = strnlen(s, max); | |
50 | buf = kmalloc_track_caller(len+1, gfp); | |
51 | if (buf) { | |
52 | memcpy(buf, s, len); | |
53 | buf[len] = '\0'; | |
54 | } | |
55 | return buf; | |
56 | } | |
57 | EXPORT_SYMBOL(kstrndup); | |
58 | ||
1a2f67b4 AD |
59 | /** |
60 | * kmemdup - duplicate region of memory | |
61 | * | |
62 | * @src: memory region to duplicate | |
63 | * @len: memory region length | |
64 | * @gfp: GFP mask to use | |
65 | */ | |
66 | void *kmemdup(const void *src, size_t len, gfp_t gfp) | |
67 | { | |
68 | void *p; | |
69 | ||
1d2c8eea | 70 | p = kmalloc_track_caller(len, gfp); |
1a2f67b4 AD |
71 | if (p) |
72 | memcpy(p, src, len); | |
73 | return p; | |
74 | } | |
75 | EXPORT_SYMBOL(kmemdup); | |
76 | ||
610a77e0 LZ |
77 | /** |
78 | * memdup_user - duplicate memory region from user space | |
79 | * | |
80 | * @src: source address in user space | |
81 | * @len: number of bytes to copy | |
82 | * | |
83 | * Returns an ERR_PTR() on failure. | |
84 | */ | |
85 | void *memdup_user(const void __user *src, size_t len) | |
86 | { | |
87 | void *p; | |
88 | ||
89 | /* | |
90 | * Always use GFP_KERNEL, since copy_from_user() can sleep and | |
91 | * cause pagefault, which makes it pointless to use GFP_NOFS | |
92 | * or GFP_ATOMIC. | |
93 | */ | |
94 | p = kmalloc_track_caller(len, GFP_KERNEL); | |
95 | if (!p) | |
96 | return ERR_PTR(-ENOMEM); | |
97 | ||
98 | if (copy_from_user(p, src, len)) { | |
99 | kfree(p); | |
100 | return ERR_PTR(-EFAULT); | |
101 | } | |
102 | ||
103 | return p; | |
104 | } | |
105 | EXPORT_SYMBOL(memdup_user); | |
106 | ||
ef2ad80c | 107 | /** |
93bc4e89 | 108 | * __krealloc - like krealloc() but don't free @p. |
ef2ad80c CL |
109 | * @p: object to reallocate memory for. |
110 | * @new_size: how many bytes of memory are required. | |
111 | * @flags: the type of memory to allocate. | |
112 | * | |
93bc4e89 PE |
113 | * This function is like krealloc() except it never frees the originally |
114 | * allocated buffer. Use this if you don't want to free the buffer immediately | |
115 | * like, for example, with RCU. | |
ef2ad80c | 116 | */ |
93bc4e89 | 117 | void *__krealloc(const void *p, size_t new_size, gfp_t flags) |
ef2ad80c CL |
118 | { |
119 | void *ret; | |
ef8b4520 | 120 | size_t ks = 0; |
ef2ad80c | 121 | |
93bc4e89 | 122 | if (unlikely(!new_size)) |
6cb8f913 | 123 | return ZERO_SIZE_PTR; |
ef2ad80c | 124 | |
ef8b4520 CL |
125 | if (p) |
126 | ks = ksize(p); | |
127 | ||
ef2ad80c CL |
128 | if (ks >= new_size) |
129 | return (void *)p; | |
130 | ||
131 | ret = kmalloc_track_caller(new_size, flags); | |
93bc4e89 | 132 | if (ret && p) |
be21f0ab | 133 | memcpy(ret, p, ks); |
93bc4e89 PE |
134 | |
135 | return ret; | |
136 | } | |
137 | EXPORT_SYMBOL(__krealloc); | |
138 | ||
139 | /** | |
140 | * krealloc - reallocate memory. The contents will remain unchanged. | |
141 | * @p: object to reallocate memory for. | |
142 | * @new_size: how many bytes of memory are required. | |
143 | * @flags: the type of memory to allocate. | |
144 | * | |
145 | * The contents of the object pointed to are preserved up to the | |
146 | * lesser of the new and old sizes. If @p is %NULL, krealloc() | |
147 | * behaves exactly like kmalloc(). If @size is 0 and @p is not a | |
148 | * %NULL pointer, the object pointed to is freed. | |
149 | */ | |
150 | void *krealloc(const void *p, size_t new_size, gfp_t flags) | |
151 | { | |
152 | void *ret; | |
153 | ||
154 | if (unlikely(!new_size)) { | |
ef2ad80c | 155 | kfree(p); |
93bc4e89 | 156 | return ZERO_SIZE_PTR; |
ef2ad80c | 157 | } |
93bc4e89 PE |
158 | |
159 | ret = __krealloc(p, new_size, flags); | |
160 | if (ret && p != ret) | |
161 | kfree(p); | |
162 | ||
ef2ad80c CL |
163 | return ret; |
164 | } | |
165 | EXPORT_SYMBOL(krealloc); | |
166 | ||
3ef0e5ba JW |
167 | /** |
168 | * kzfree - like kfree but zero memory | |
169 | * @p: object to free memory of | |
170 | * | |
171 | * The memory of the object @p points to is zeroed before freed. | |
172 | * If @p is %NULL, kzfree() does nothing. | |
a234bdc9 PE |
173 | * |
174 | * Note: this function zeroes the whole allocated buffer which can be a good | |
175 | * deal bigger than the requested buffer size passed to kmalloc(). So be | |
176 | * careful when using this function in performance sensitive code. | |
3ef0e5ba JW |
177 | */ |
178 | void kzfree(const void *p) | |
179 | { | |
180 | size_t ks; | |
181 | void *mem = (void *)p; | |
182 | ||
183 | if (unlikely(ZERO_OR_NULL_PTR(mem))) | |
184 | return; | |
185 | ks = ksize(mem); | |
186 | memset(mem, 0, ks); | |
187 | kfree(mem); | |
188 | } | |
189 | EXPORT_SYMBOL(kzfree); | |
190 | ||
96840aa0 DA |
191 | /* |
192 | * strndup_user - duplicate an existing string from user space | |
96840aa0 DA |
193 | * @s: The string to duplicate |
194 | * @n: Maximum number of bytes to copy, including the trailing NUL. | |
195 | */ | |
196 | char *strndup_user(const char __user *s, long n) | |
197 | { | |
198 | char *p; | |
199 | long length; | |
200 | ||
201 | length = strnlen_user(s, n); | |
202 | ||
203 | if (!length) | |
204 | return ERR_PTR(-EFAULT); | |
205 | ||
206 | if (length > n) | |
207 | return ERR_PTR(-EINVAL); | |
208 | ||
90d74045 | 209 | p = memdup_user(s, length); |
96840aa0 | 210 | |
90d74045 JL |
211 | if (IS_ERR(p)) |
212 | return p; | |
96840aa0 DA |
213 | |
214 | p[length - 1] = '\0'; | |
215 | ||
216 | return p; | |
217 | } | |
218 | EXPORT_SYMBOL(strndup_user); | |
16d69265 | 219 | |
6038def0 NK |
220 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
221 | struct vm_area_struct *prev, struct rb_node *rb_parent) | |
222 | { | |
223 | struct vm_area_struct *next; | |
224 | ||
225 | vma->vm_prev = prev; | |
226 | if (prev) { | |
227 | next = prev->vm_next; | |
228 | prev->vm_next = vma; | |
229 | } else { | |
230 | mm->mmap = vma; | |
231 | if (rb_parent) | |
232 | next = rb_entry(rb_parent, | |
233 | struct vm_area_struct, vm_rb); | |
234 | else | |
235 | next = NULL; | |
236 | } | |
237 | vma->vm_next = next; | |
238 | if (next) | |
239 | next->vm_prev = vma; | |
240 | } | |
241 | ||
efc1a3b1 | 242 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
16d69265 AM |
243 | void arch_pick_mmap_layout(struct mm_struct *mm) |
244 | { | |
245 | mm->mmap_base = TASK_UNMAPPED_BASE; | |
246 | mm->get_unmapped_area = arch_get_unmapped_area; | |
247 | mm->unmap_area = arch_unmap_area; | |
248 | } | |
249 | #endif | |
912985dc | 250 | |
45888a0c XG |
251 | /* |
252 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall | |
253 | * back to the regular GUP. | |
25985edc | 254 | * If the architecture not support this function, simply return with no |
45888a0c XG |
255 | * page pinned |
256 | */ | |
257 | int __attribute__((weak)) __get_user_pages_fast(unsigned long start, | |
258 | int nr_pages, int write, struct page **pages) | |
259 | { | |
260 | return 0; | |
261 | } | |
262 | EXPORT_SYMBOL_GPL(__get_user_pages_fast); | |
263 | ||
9de100d0 AG |
264 | /** |
265 | * get_user_pages_fast() - pin user pages in memory | |
266 | * @start: starting user address | |
267 | * @nr_pages: number of pages from start to pin | |
268 | * @write: whether pages will be written to | |
269 | * @pages: array that receives pointers to the pages pinned. | |
270 | * Should be at least nr_pages long. | |
271 | * | |
9de100d0 AG |
272 | * Returns number of pages pinned. This may be fewer than the number |
273 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
274 | * were pinned, returns -errno. | |
d2bf6be8 NP |
275 | * |
276 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | |
277 | * operating on current and current->mm, with force=0 and vma=NULL. However | |
278 | * unlike get_user_pages, it must be called without mmap_sem held. | |
279 | * | |
280 | * get_user_pages_fast may take mmap_sem and page table locks, so no | |
281 | * assumptions can be made about lack of locking. get_user_pages_fast is to be | |
282 | * implemented in a way that is advantageous (vs get_user_pages()) when the | |
283 | * user memory area is already faulted in and present in ptes. However if the | |
284 | * pages have to be faulted in, it may turn out to be slightly slower so | |
285 | * callers need to carefully consider what to use. On many architectures, | |
286 | * get_user_pages_fast simply falls back to get_user_pages. | |
9de100d0 | 287 | */ |
912985dc RR |
288 | int __attribute__((weak)) get_user_pages_fast(unsigned long start, |
289 | int nr_pages, int write, struct page **pages) | |
290 | { | |
291 | struct mm_struct *mm = current->mm; | |
292 | int ret; | |
293 | ||
294 | down_read(&mm->mmap_sem); | |
295 | ret = get_user_pages(current, mm, start, nr_pages, | |
296 | write, 0, pages, NULL); | |
297 | up_read(&mm->mmap_sem); | |
298 | ||
299 | return ret; | |
300 | } | |
301 | EXPORT_SYMBOL_GPL(get_user_pages_fast); | |
ca2b84cb EGM |
302 | |
303 | /* Tracepoints definitions. */ | |
ca2b84cb EGM |
304 | EXPORT_TRACEPOINT_SYMBOL(kmalloc); |
305 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); | |
306 | EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); | |
307 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); | |
308 | EXPORT_TRACEPOINT_SYMBOL(kfree); | |
309 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); |