]>
Commit | Line | Data |
---|---|---|
61989a80 NG |
1 | /* |
2 | * zsmalloc memory allocator | |
3 | * | |
4 | * Copyright (C) 2011 Nitin Gupta | |
5 | * | |
6 | * This code is released using a dual license strategy: BSD/GPL | |
7 | * You can choose the license that better fits your requirements. | |
8 | * | |
9 | * Released under the terms of 3-clause BSD License | |
10 | * Released under the terms of GNU General Public License Version 2.0 | |
11 | */ | |
12 | ||
13 | #ifdef CONFIG_ZSMALLOC_DEBUG | |
14 | #define DEBUG | |
15 | #endif | |
16 | ||
17 | #include <linux/module.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/slab.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/pgtable.h> | |
27 | #include <linux/cpumask.h> | |
28 | #include <linux/cpu.h> | |
29 | ||
30 | #include "zsmalloc.h" | |
31 | #include "zsmalloc_int.h" | |
32 | ||
33 | /* | |
34 | * A zspage's class index and fullness group | |
35 | * are encoded in its (first)page->mapping | |
36 | */ | |
37 | #define CLASS_IDX_BITS 28 | |
38 | #define FULLNESS_BITS 4 | |
39 | #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) | |
40 | #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) | |
41 | ||
42 | /* | |
43 | * Object location (<PFN>, <obj_idx>) is encoded as | |
44 | * as single (void *) handle value. | |
45 | * | |
46 | * Note that object index <obj_idx> is relative to system | |
47 | * page <PFN> it is stored in, so for each sub-page belonging | |
48 | * to a zspage, obj_idx starts with 0. | |
49 | */ | |
50 | #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) | |
51 | #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) | |
52 | #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) | |
53 | ||
54 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ | |
55 | static DEFINE_PER_CPU(struct mapping_area, zs_map_area); | |
56 | ||
57 | static int is_first_page(struct page *page) | |
58 | { | |
59 | return test_bit(PG_private, &page->flags); | |
60 | } | |
61 | ||
62 | static int is_last_page(struct page *page) | |
63 | { | |
64 | return test_bit(PG_private_2, &page->flags); | |
65 | } | |
66 | ||
67 | static void get_zspage_mapping(struct page *page, unsigned int *class_idx, | |
68 | enum fullness_group *fullness) | |
69 | { | |
70 | unsigned long m; | |
71 | BUG_ON(!is_first_page(page)); | |
72 | ||
73 | m = (unsigned long)page->mapping; | |
74 | *fullness = m & FULLNESS_MASK; | |
75 | *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; | |
76 | } | |
77 | ||
78 | static void set_zspage_mapping(struct page *page, unsigned int class_idx, | |
79 | enum fullness_group fullness) | |
80 | { | |
81 | unsigned long m; | |
82 | BUG_ON(!is_first_page(page)); | |
83 | ||
84 | m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | | |
85 | (fullness & FULLNESS_MASK); | |
86 | page->mapping = (struct address_space *)m; | |
87 | } | |
88 | ||
89 | static int get_size_class_index(int size) | |
90 | { | |
91 | int idx = 0; | |
92 | ||
93 | if (likely(size > ZS_MIN_ALLOC_SIZE)) | |
94 | idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, | |
95 | ZS_SIZE_CLASS_DELTA); | |
96 | ||
97 | return idx; | |
98 | } | |
99 | ||
100 | static enum fullness_group get_fullness_group(struct page *page) | |
101 | { | |
102 | int inuse, max_objects; | |
103 | enum fullness_group fg; | |
104 | BUG_ON(!is_first_page(page)); | |
105 | ||
106 | inuse = page->inuse; | |
107 | max_objects = page->objects; | |
108 | ||
109 | if (inuse == 0) | |
110 | fg = ZS_EMPTY; | |
111 | else if (inuse == max_objects) | |
112 | fg = ZS_FULL; | |
113 | else if (inuse <= max_objects / fullness_threshold_frac) | |
114 | fg = ZS_ALMOST_EMPTY; | |
115 | else | |
116 | fg = ZS_ALMOST_FULL; | |
117 | ||
118 | return fg; | |
119 | } | |
120 | ||
121 | static void insert_zspage(struct page *page, struct size_class *class, | |
122 | enum fullness_group fullness) | |
123 | { | |
124 | struct page **head; | |
125 | ||
126 | BUG_ON(!is_first_page(page)); | |
127 | ||
128 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
129 | return; | |
130 | ||
131 | head = &class->fullness_list[fullness]; | |
132 | if (*head) | |
133 | list_add_tail(&page->lru, &(*head)->lru); | |
134 | ||
135 | *head = page; | |
136 | } | |
137 | ||
138 | static void remove_zspage(struct page *page, struct size_class *class, | |
139 | enum fullness_group fullness) | |
140 | { | |
141 | struct page **head; | |
142 | ||
143 | BUG_ON(!is_first_page(page)); | |
144 | ||
145 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
146 | return; | |
147 | ||
148 | head = &class->fullness_list[fullness]; | |
149 | BUG_ON(!*head); | |
150 | if (list_empty(&(*head)->lru)) | |
151 | *head = NULL; | |
152 | else if (*head == page) | |
153 | *head = (struct page *)list_entry((*head)->lru.next, | |
154 | struct page, lru); | |
155 | ||
156 | list_del_init(&page->lru); | |
157 | } | |
158 | ||
159 | static enum fullness_group fix_fullness_group(struct zs_pool *pool, | |
160 | struct page *page) | |
161 | { | |
162 | int class_idx; | |
163 | struct size_class *class; | |
164 | enum fullness_group currfg, newfg; | |
165 | ||
166 | BUG_ON(!is_first_page(page)); | |
167 | ||
168 | get_zspage_mapping(page, &class_idx, &currfg); | |
169 | newfg = get_fullness_group(page); | |
170 | if (newfg == currfg) | |
171 | goto out; | |
172 | ||
173 | class = &pool->size_class[class_idx]; | |
174 | remove_zspage(page, class, currfg); | |
175 | insert_zspage(page, class, newfg); | |
176 | set_zspage_mapping(page, class_idx, newfg); | |
177 | ||
178 | out: | |
179 | return newfg; | |
180 | } | |
181 | ||
182 | /* | |
183 | * We have to decide on how many pages to link together | |
184 | * to form a zspage for each size class. This is important | |
185 | * to reduce wastage due to unusable space left at end of | |
186 | * each zspage which is given as: | |
187 | * wastage = Zp - Zp % size_class | |
188 | * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... | |
189 | * | |
190 | * For example, for size class of 3/8 * PAGE_SIZE, we should | |
191 | * link together 3 PAGE_SIZE sized pages to form a zspage | |
192 | * since then we can perfectly fit in 8 such objects. | |
193 | */ | |
194 | static int get_zspage_order(int class_size) | |
195 | { | |
196 | int i, max_usedpc = 0; | |
197 | /* zspage order which gives maximum used size per KB */ | |
198 | int max_usedpc_order = 1; | |
199 | ||
200 | for (i = 1; i <= max_zspage_order; i++) { | |
201 | int zspage_size; | |
202 | int waste, usedpc; | |
203 | ||
204 | zspage_size = i * PAGE_SIZE; | |
205 | waste = zspage_size % class_size; | |
206 | usedpc = (zspage_size - waste) * 100 / zspage_size; | |
207 | ||
208 | if (usedpc > max_usedpc) { | |
209 | max_usedpc = usedpc; | |
210 | max_usedpc_order = i; | |
211 | } | |
212 | } | |
213 | ||
214 | return max_usedpc_order; | |
215 | } | |
216 | ||
217 | /* | |
218 | * A single 'zspage' is composed of many system pages which are | |
219 | * linked together using fields in struct page. This function finds | |
220 | * the first/head page, given any component page of a zspage. | |
221 | */ | |
222 | static struct page *get_first_page(struct page *page) | |
223 | { | |
224 | if (is_first_page(page)) | |
225 | return page; | |
226 | else | |
227 | return page->first_page; | |
228 | } | |
229 | ||
230 | static struct page *get_next_page(struct page *page) | |
231 | { | |
232 | struct page *next; | |
233 | ||
234 | if (is_last_page(page)) | |
235 | next = NULL; | |
236 | else if (is_first_page(page)) | |
237 | next = (struct page *)page->private; | |
238 | else | |
239 | next = list_entry(page->lru.next, struct page, lru); | |
240 | ||
241 | return next; | |
242 | } | |
243 | ||
244 | /* Encode <page, obj_idx> as a single handle value */ | |
245 | static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) | |
246 | { | |
247 | unsigned long handle; | |
248 | ||
249 | if (!page) { | |
250 | BUG_ON(obj_idx); | |
251 | return NULL; | |
252 | } | |
253 | ||
254 | handle = page_to_pfn(page) << OBJ_INDEX_BITS; | |
255 | handle |= (obj_idx & OBJ_INDEX_MASK); | |
256 | ||
257 | return (void *)handle; | |
258 | } | |
259 | ||
260 | /* Decode <page, obj_idx> pair from the given object handle */ | |
261 | static void obj_handle_to_location(void *handle, struct page **page, | |
262 | unsigned long *obj_idx) | |
263 | { | |
264 | unsigned long hval = (unsigned long)handle; | |
265 | ||
266 | *page = pfn_to_page(hval >> OBJ_INDEX_BITS); | |
267 | *obj_idx = hval & OBJ_INDEX_MASK; | |
268 | } | |
269 | ||
270 | static unsigned long obj_idx_to_offset(struct page *page, | |
271 | unsigned long obj_idx, int class_size) | |
272 | { | |
273 | unsigned long off = 0; | |
274 | ||
275 | if (!is_first_page(page)) | |
276 | off = page->index; | |
277 | ||
278 | return off + obj_idx * class_size; | |
279 | } | |
280 | ||
281 | static void free_zspage(struct page *first_page) | |
282 | { | |
283 | struct page *nextp, *tmp; | |
284 | ||
285 | BUG_ON(!is_first_page(first_page)); | |
286 | BUG_ON(first_page->inuse); | |
287 | ||
288 | nextp = (struct page *)page_private(first_page); | |
289 | ||
290 | clear_bit(PG_private, &first_page->flags); | |
291 | clear_bit(PG_private_2, &first_page->flags); | |
292 | set_page_private(first_page, 0); | |
293 | first_page->mapping = NULL; | |
294 | first_page->freelist = NULL; | |
295 | reset_page_mapcount(first_page); | |
296 | __free_page(first_page); | |
297 | ||
298 | /* zspage with only 1 system page */ | |
299 | if (!nextp) | |
300 | return; | |
301 | ||
302 | list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) { | |
303 | list_del(&nextp->lru); | |
304 | clear_bit(PG_private_2, &nextp->flags); | |
305 | nextp->index = 0; | |
306 | __free_page(nextp); | |
307 | } | |
308 | } | |
309 | ||
310 | /* Initialize a newly allocated zspage */ | |
311 | static void init_zspage(struct page *first_page, struct size_class *class) | |
312 | { | |
313 | unsigned long off = 0; | |
314 | struct page *page = first_page; | |
315 | ||
316 | BUG_ON(!is_first_page(first_page)); | |
317 | while (page) { | |
318 | struct page *next_page; | |
319 | struct link_free *link; | |
320 | unsigned int i, objs_on_page; | |
321 | ||
322 | /* | |
323 | * page->index stores offset of first object starting | |
324 | * in the page. For the first page, this is always 0, | |
325 | * so we use first_page->index (aka ->freelist) to store | |
326 | * head of corresponding zspage's freelist. | |
327 | */ | |
328 | if (page != first_page) | |
329 | page->index = off; | |
330 | ||
331 | link = (struct link_free *)kmap_atomic(page) + | |
332 | off / sizeof(*link); | |
333 | objs_on_page = (PAGE_SIZE - off) / class->size; | |
334 | ||
335 | for (i = 1; i <= objs_on_page; i++) { | |
336 | off += class->size; | |
337 | if (off < PAGE_SIZE) { | |
338 | link->next = obj_location_to_handle(page, i); | |
339 | link += class->size / sizeof(*link); | |
340 | } | |
341 | } | |
342 | ||
343 | /* | |
344 | * We now come to the last (full or partial) object on this | |
345 | * page, which must point to the first object on the next | |
346 | * page (if present) | |
347 | */ | |
348 | next_page = get_next_page(page); | |
349 | link->next = obj_location_to_handle(next_page, 0); | |
350 | kunmap_atomic(link); | |
351 | page = next_page; | |
352 | off = (off + class->size) % PAGE_SIZE; | |
353 | } | |
354 | } | |
355 | ||
356 | /* | |
357 | * Allocate a zspage for the given size class | |
358 | */ | |
359 | static struct page *alloc_zspage(struct size_class *class, gfp_t flags) | |
360 | { | |
361 | int i, error; | |
362 | struct page *first_page = NULL; | |
363 | ||
364 | /* | |
365 | * Allocate individual pages and link them together as: | |
366 | * 1. first page->private = first sub-page | |
367 | * 2. all sub-pages are linked together using page->lru | |
368 | * 3. each sub-page is linked to the first page using page->first_page | |
369 | * | |
370 | * For each size class, First/Head pages are linked together using | |
371 | * page->lru. Also, we set PG_private to identify the first page | |
372 | * (i.e. no other sub-page has this flag set) and PG_private_2 to | |
373 | * identify the last page. | |
374 | */ | |
375 | error = -ENOMEM; | |
376 | for (i = 0; i < class->zspage_order; i++) { | |
377 | struct page *page, *prev_page; | |
378 | ||
379 | page = alloc_page(flags); | |
380 | if (!page) | |
381 | goto cleanup; | |
382 | ||
383 | INIT_LIST_HEAD(&page->lru); | |
384 | if (i == 0) { /* first page */ | |
385 | set_bit(PG_private, &page->flags); | |
386 | set_page_private(page, 0); | |
387 | first_page = page; | |
388 | first_page->inuse = 0; | |
389 | } | |
390 | if (i == 1) | |
391 | first_page->private = (unsigned long)page; | |
392 | if (i >= 1) | |
393 | page->first_page = first_page; | |
394 | if (i >= 2) | |
395 | list_add(&page->lru, &prev_page->lru); | |
396 | if (i == class->zspage_order - 1) /* last page */ | |
397 | set_bit(PG_private_2, &page->flags); | |
398 | ||
399 | prev_page = page; | |
400 | } | |
401 | ||
402 | init_zspage(first_page, class); | |
403 | ||
404 | first_page->freelist = obj_location_to_handle(first_page, 0); | |
405 | /* Maximum number of objects we can store in this zspage */ | |
406 | first_page->objects = class->zspage_order * PAGE_SIZE / class->size; | |
407 | ||
408 | error = 0; /* Success */ | |
409 | ||
410 | cleanup: | |
411 | if (unlikely(error) && first_page) { | |
412 | free_zspage(first_page); | |
413 | first_page = NULL; | |
414 | } | |
415 | ||
416 | return first_page; | |
417 | } | |
418 | ||
419 | static struct page *find_get_zspage(struct size_class *class) | |
420 | { | |
421 | int i; | |
422 | struct page *page; | |
423 | ||
424 | for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { | |
425 | page = class->fullness_list[i]; | |
426 | if (page) | |
427 | break; | |
428 | } | |
429 | ||
430 | return page; | |
431 | } | |
432 | ||
433 | ||
434 | /* | |
435 | * If this becomes a separate module, register zs_init() with | |
436 | * module_init(), zs_exit with module_exit(), and remove zs_initialized | |
437 | */ | |
438 | static int zs_initialized; | |
439 | ||
440 | static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, | |
441 | void *pcpu) | |
442 | { | |
443 | int cpu = (long)pcpu; | |
444 | struct mapping_area *area; | |
445 | ||
446 | switch (action) { | |
447 | case CPU_UP_PREPARE: | |
448 | area = &per_cpu(zs_map_area, cpu); | |
449 | if (area->vm) | |
450 | break; | |
451 | area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes); | |
452 | if (!area->vm) | |
453 | return notifier_from_errno(-ENOMEM); | |
454 | break; | |
455 | case CPU_DEAD: | |
456 | case CPU_UP_CANCELED: | |
457 | area = &per_cpu(zs_map_area, cpu); | |
458 | if (area->vm) | |
459 | free_vm_area(area->vm); | |
460 | area->vm = NULL; | |
461 | break; | |
462 | } | |
463 | ||
464 | return NOTIFY_OK; | |
465 | } | |
466 | ||
467 | static struct notifier_block zs_cpu_nb = { | |
468 | .notifier_call = zs_cpu_notifier | |
469 | }; | |
470 | ||
471 | static void zs_exit(void) | |
472 | { | |
473 | int cpu; | |
474 | ||
475 | for_each_online_cpu(cpu) | |
476 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); | |
477 | unregister_cpu_notifier(&zs_cpu_nb); | |
478 | } | |
479 | ||
480 | static int zs_init(void) | |
481 | { | |
482 | int cpu, ret; | |
483 | ||
484 | register_cpu_notifier(&zs_cpu_nb); | |
485 | for_each_online_cpu(cpu) { | |
486 | ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | |
487 | if (notifier_to_errno(ret)) | |
488 | goto fail; | |
489 | } | |
490 | return 0; | |
491 | fail: | |
492 | zs_exit(); | |
493 | return notifier_to_errno(ret); | |
494 | } | |
495 | ||
496 | struct zs_pool *zs_create_pool(const char *name, gfp_t flags) | |
497 | { | |
498 | int i, error, ovhd_size; | |
499 | struct zs_pool *pool; | |
500 | ||
501 | if (!name) | |
502 | return NULL; | |
503 | ||
504 | ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); | |
505 | pool = kzalloc(ovhd_size, GFP_KERNEL); | |
506 | if (!pool) | |
507 | return NULL; | |
508 | ||
509 | for (i = 0; i < ZS_SIZE_CLASSES; i++) { | |
510 | int size; | |
511 | struct size_class *class; | |
512 | ||
513 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; | |
514 | if (size > ZS_MAX_ALLOC_SIZE) | |
515 | size = ZS_MAX_ALLOC_SIZE; | |
516 | ||
517 | class = &pool->size_class[i]; | |
518 | class->size = size; | |
519 | class->index = i; | |
520 | spin_lock_init(&class->lock); | |
521 | class->zspage_order = get_zspage_order(size); | |
522 | ||
523 | } | |
524 | ||
525 | /* | |
526 | * If this becomes a separate module, register zs_init with | |
527 | * module_init, and remove this block | |
528 | */ | |
529 | if (!zs_initialized) { | |
530 | error = zs_init(); | |
531 | if (error) | |
532 | goto cleanup; | |
533 | zs_initialized = 1; | |
534 | } | |
535 | ||
536 | pool->flags = flags; | |
537 | pool->name = name; | |
538 | ||
539 | error = 0; /* Success */ | |
540 | ||
541 | cleanup: | |
542 | if (error) { | |
543 | zs_destroy_pool(pool); | |
544 | pool = NULL; | |
545 | } | |
546 | ||
547 | return pool; | |
548 | } | |
549 | EXPORT_SYMBOL_GPL(zs_create_pool); | |
550 | ||
551 | void zs_destroy_pool(struct zs_pool *pool) | |
552 | { | |
553 | int i; | |
554 | ||
555 | for (i = 0; i < ZS_SIZE_CLASSES; i++) { | |
556 | int fg; | |
557 | struct size_class *class = &pool->size_class[i]; | |
558 | ||
559 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { | |
560 | if (class->fullness_list[fg]) { | |
561 | pr_info("Freeing non-empty class with size " | |
562 | "%db, fullness group %d\n", | |
563 | class->size, fg); | |
564 | } | |
565 | } | |
566 | } | |
567 | kfree(pool); | |
568 | } | |
569 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | |
570 | ||
571 | /** | |
572 | * zs_malloc - Allocate block of given size from pool. | |
573 | * @pool: pool to allocate from | |
574 | * @size: size of block to allocate | |
575 | * @page: page no. that holds the object | |
576 | * @offset: location of object within page | |
577 | * | |
578 | * On success, <page, offset> identifies block allocated | |
579 | * and 0 is returned. On failure, <page, offset> is set to | |
580 | * 0 and -ENOMEM is returned. | |
581 | * | |
582 | * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. | |
583 | */ | |
584 | void *zs_malloc(struct zs_pool *pool, size_t size) | |
585 | { | |
586 | void *obj; | |
587 | struct link_free *link; | |
588 | int class_idx; | |
589 | struct size_class *class; | |
590 | ||
591 | struct page *first_page, *m_page; | |
592 | unsigned long m_objidx, m_offset; | |
593 | ||
594 | if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) | |
595 | return NULL; | |
596 | ||
597 | class_idx = get_size_class_index(size); | |
598 | class = &pool->size_class[class_idx]; | |
599 | BUG_ON(class_idx != class->index); | |
600 | ||
601 | spin_lock(&class->lock); | |
602 | first_page = find_get_zspage(class); | |
603 | ||
604 | if (!first_page) { | |
605 | spin_unlock(&class->lock); | |
606 | first_page = alloc_zspage(class, pool->flags); | |
607 | if (unlikely(!first_page)) | |
608 | return NULL; | |
609 | ||
610 | set_zspage_mapping(first_page, class->index, ZS_EMPTY); | |
611 | spin_lock(&class->lock); | |
612 | class->pages_allocated += class->zspage_order; | |
613 | } | |
614 | ||
615 | obj = first_page->freelist; | |
616 | obj_handle_to_location(obj, &m_page, &m_objidx); | |
617 | m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); | |
618 | ||
619 | link = (struct link_free *)kmap_atomic(m_page) + | |
620 | m_offset / sizeof(*link); | |
621 | first_page->freelist = link->next; | |
622 | memset(link, POISON_INUSE, sizeof(*link)); | |
623 | kunmap_atomic(link); | |
624 | ||
625 | first_page->inuse++; | |
626 | /* Now move the zspage to another fullness group, if required */ | |
627 | fix_fullness_group(pool, first_page); | |
628 | spin_unlock(&class->lock); | |
629 | ||
630 | return obj; | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(zs_malloc); | |
633 | ||
634 | void zs_free(struct zs_pool *pool, void *obj) | |
635 | { | |
636 | struct link_free *link; | |
637 | struct page *first_page, *f_page; | |
638 | unsigned long f_objidx, f_offset; | |
639 | ||
640 | int class_idx; | |
641 | struct size_class *class; | |
642 | enum fullness_group fullness; | |
643 | ||
644 | if (unlikely(!obj)) | |
645 | return; | |
646 | ||
647 | obj_handle_to_location(obj, &f_page, &f_objidx); | |
648 | first_page = get_first_page(f_page); | |
649 | ||
650 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
651 | class = &pool->size_class[class_idx]; | |
652 | f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); | |
653 | ||
654 | spin_lock(&class->lock); | |
655 | ||
656 | /* Insert this object in containing zspage's freelist */ | |
657 | link = (struct link_free *)((unsigned char *)kmap_atomic(f_page) | |
658 | + f_offset); | |
659 | link->next = first_page->freelist; | |
660 | kunmap_atomic(link); | |
661 | first_page->freelist = obj; | |
662 | ||
663 | first_page->inuse--; | |
664 | fullness = fix_fullness_group(pool, first_page); | |
665 | ||
666 | if (fullness == ZS_EMPTY) | |
667 | class->pages_allocated -= class->zspage_order; | |
668 | ||
669 | spin_unlock(&class->lock); | |
670 | ||
671 | if (fullness == ZS_EMPTY) | |
672 | free_zspage(first_page); | |
673 | } | |
674 | EXPORT_SYMBOL_GPL(zs_free); | |
675 | ||
676 | void *zs_map_object(struct zs_pool *pool, void *handle) | |
677 | { | |
678 | struct page *page; | |
679 | unsigned long obj_idx, off; | |
680 | ||
681 | unsigned int class_idx; | |
682 | enum fullness_group fg; | |
683 | struct size_class *class; | |
684 | struct mapping_area *area; | |
685 | ||
686 | BUG_ON(!handle); | |
687 | ||
688 | obj_handle_to_location(handle, &page, &obj_idx); | |
689 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
690 | class = &pool->size_class[class_idx]; | |
691 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
692 | ||
693 | area = &get_cpu_var(zs_map_area); | |
694 | if (off + class->size <= PAGE_SIZE) { | |
695 | /* this object is contained entirely within a page */ | |
696 | area->vm_addr = kmap_atomic(page); | |
697 | } else { | |
698 | /* this object spans two pages */ | |
699 | struct page *nextp; | |
700 | ||
701 | nextp = get_next_page(page); | |
702 | BUG_ON(!nextp); | |
703 | ||
704 | ||
705 | set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL)); | |
706 | set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL)); | |
707 | ||
708 | /* We pre-allocated VM area so mapping can never fail */ | |
709 | area->vm_addr = area->vm->addr; | |
710 | } | |
711 | ||
712 | return area->vm_addr + off; | |
713 | } | |
714 | EXPORT_SYMBOL_GPL(zs_map_object); | |
715 | ||
716 | void zs_unmap_object(struct zs_pool *pool, void *handle) | |
717 | { | |
718 | struct page *page; | |
719 | unsigned long obj_idx, off; | |
720 | ||
721 | unsigned int class_idx; | |
722 | enum fullness_group fg; | |
723 | struct size_class *class; | |
724 | struct mapping_area *area; | |
725 | ||
726 | BUG_ON(!handle); | |
727 | ||
728 | obj_handle_to_location(handle, &page, &obj_idx); | |
729 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
730 | class = &pool->size_class[class_idx]; | |
731 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
732 | ||
733 | area = &__get_cpu_var(zs_map_area); | |
734 | if (off + class->size <= PAGE_SIZE) { | |
735 | kunmap_atomic(area->vm_addr); | |
736 | } else { | |
737 | set_pte(area->vm_ptes[0], __pte(0)); | |
738 | set_pte(area->vm_ptes[1], __pte(0)); | |
739 | __flush_tlb_one((unsigned long)area->vm_addr); | |
740 | __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE); | |
741 | } | |
742 | put_cpu_var(zs_map_area); | |
743 | } | |
744 | EXPORT_SYMBOL_GPL(zs_unmap_object); | |
745 | ||
746 | u64 zs_get_total_size_bytes(struct zs_pool *pool) | |
747 | { | |
748 | int i; | |
749 | u64 npages = 0; | |
750 | ||
751 | for (i = 0; i < ZS_SIZE_CLASSES; i++) | |
752 | npages += pool->size_class[i].pages_allocated; | |
753 | ||
754 | return npages << PAGE_SHIFT; | |
755 | } | |
756 | EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); |