]>
Commit | Line | Data |
---|---|---|
09c434b8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
9a001fc1 VW |
2 | /* |
3 | * z3fold.c | |
4 | * | |
5 | * Author: Vitaly Wool <[email protected]> | |
6 | * Copyright (C) 2016, Sony Mobile Communications Inc. | |
7 | * | |
8 | * This implementation is based on zbud written by Seth Jennings. | |
9 | * | |
10 | * z3fold is an special purpose allocator for storing compressed pages. It | |
11 | * can store up to three compressed pages per page which improves the | |
12 | * compression ratio of zbud while retaining its main concepts (e. g. always | |
13 | * storing an integral number of objects per page) and simplicity. | |
14 | * It still has simple and deterministic reclaim properties that make it | |
15 | * preferable to a higher density approach (with no requirement on integral | |
16 | * number of object per page) when reclaim is used. | |
17 | * | |
18 | * As in zbud, pages are divided into "chunks". The size of the chunks is | |
19 | * fixed at compile time and is determined by NCHUNKS_ORDER below. | |
20 | * | |
21 | * z3fold doesn't export any API and is meant to be used via zpool API. | |
22 | */ | |
23 | ||
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
25 | ||
26 | #include <linux/atomic.h> | |
d30561c5 | 27 | #include <linux/sched.h> |
1f862989 | 28 | #include <linux/cpumask.h> |
9a001fc1 VW |
29 | #include <linux/list.h> |
30 | #include <linux/mm.h> | |
31 | #include <linux/module.h> | |
1f862989 VW |
32 | #include <linux/page-flags.h> |
33 | #include <linux/migrate.h> | |
34 | #include <linux/node.h> | |
35 | #include <linux/compaction.h> | |
d30561c5 | 36 | #include <linux/percpu.h> |
1f862989 | 37 | #include <linux/mount.h> |
ea8157ab | 38 | #include <linux/pseudo_fs.h> |
1f862989 | 39 | #include <linux/fs.h> |
9a001fc1 | 40 | #include <linux/preempt.h> |
d30561c5 | 41 | #include <linux/workqueue.h> |
9a001fc1 VW |
42 | #include <linux/slab.h> |
43 | #include <linux/spinlock.h> | |
44 | #include <linux/zpool.h> | |
ea8157ab | 45 | #include <linux/magic.h> |
9a001fc1 | 46 | |
7c2b8baa VW |
47 | /* |
48 | * NCHUNKS_ORDER determines the internal allocation granularity, effectively | |
49 | * adjusting internal fragmentation. It also determines the number of | |
50 | * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the | |
51 | * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks | |
52 | * in the beginning of an allocated page are occupied by z3fold header, so | |
53 | * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), | |
54 | * which shows the max number of free chunks in z3fold page, also there will | |
55 | * be 63, or 62, respectively, freelists per pool. | |
56 | */ | |
57 | #define NCHUNKS_ORDER 6 | |
58 | ||
59 | #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) | |
60 | #define CHUNK_SIZE (1 << CHUNK_SHIFT) | |
61 | #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) | |
62 | #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) | |
63 | #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) | |
64 | #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) | |
65 | ||
66 | #define BUDDY_MASK (0x3) | |
67 | #define BUDDY_SHIFT 2 | |
68 | #define SLOTS_ALIGN (0x40) | |
69 | ||
9a001fc1 VW |
70 | /***************** |
71 | * Structures | |
72 | *****************/ | |
ede93213 VW |
73 | struct z3fold_pool; |
74 | struct z3fold_ops { | |
75 | int (*evict)(struct z3fold_pool *pool, unsigned long handle); | |
76 | }; | |
77 | ||
78 | enum buddy { | |
79 | HEADLESS = 0, | |
80 | FIRST, | |
81 | MIDDLE, | |
82 | LAST, | |
7c2b8baa VW |
83 | BUDDIES_MAX = LAST |
84 | }; | |
85 | ||
86 | struct z3fold_buddy_slots { | |
87 | /* | |
88 | * we are using BUDDY_MASK in handle_to_buddy etc. so there should | |
89 | * be enough slots to hold all possible variants | |
90 | */ | |
91 | unsigned long slot[BUDDY_MASK + 1]; | |
92 | unsigned long pool; /* back link + flags */ | |
4a3ac931 | 93 | rwlock_t lock; |
ede93213 | 94 | }; |
7c2b8baa | 95 | #define HANDLE_FLAG_MASK (0x03) |
ede93213 VW |
96 | |
97 | /* | |
d30561c5 | 98 | * struct z3fold_header - z3fold page metadata occupying first chunks of each |
ede93213 | 99 | * z3fold page, except for HEADLESS pages |
d30561c5 VW |
100 | * @buddy: links the z3fold page into the relevant list in the |
101 | * pool | |
2f1e5e4d | 102 | * @page_lock: per-page lock |
d30561c5 VW |
103 | * @refcount: reference count for the z3fold page |
104 | * @work: work_struct for page layout optimization | |
7c2b8baa | 105 | * @slots: pointer to the structure holding buddy slots |
bb9a374d | 106 | * @pool: pointer to the containing pool |
d30561c5 | 107 | * @cpu: CPU which this page "belongs" to |
ede93213 VW |
108 | * @first_chunks: the size of the first buddy in chunks, 0 if free |
109 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free | |
110 | * @last_chunks: the size of the last buddy in chunks, 0 if free | |
111 | * @first_num: the starting number (for the first handle) | |
1f862989 | 112 | * @mapped_count: the number of objects currently mapped |
ede93213 VW |
113 | */ |
114 | struct z3fold_header { | |
115 | struct list_head buddy; | |
2f1e5e4d | 116 | spinlock_t page_lock; |
5a27aa82 | 117 | struct kref refcount; |
d30561c5 | 118 | struct work_struct work; |
7c2b8baa | 119 | struct z3fold_buddy_slots *slots; |
bb9a374d | 120 | struct z3fold_pool *pool; |
d30561c5 | 121 | short cpu; |
ede93213 VW |
122 | unsigned short first_chunks; |
123 | unsigned short middle_chunks; | |
124 | unsigned short last_chunks; | |
125 | unsigned short start_middle; | |
126 | unsigned short first_num:2; | |
1f862989 | 127 | unsigned short mapped_count:2; |
4a3ac931 | 128 | unsigned short foreign_handles:2; |
ede93213 VW |
129 | }; |
130 | ||
9a001fc1 VW |
131 | /** |
132 | * struct z3fold_pool - stores metadata for each z3fold pool | |
d30561c5 VW |
133 | * @name: pool name |
134 | * @lock: protects pool unbuddied/lru lists | |
135 | * @stale_lock: protects pool stale page list | |
136 | * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- | |
137 | * buddies; the list each z3fold page is added to depends on | |
138 | * the size of its free region. | |
9a001fc1 VW |
139 | * @lru: list tracking the z3fold pages in LRU order by most recently |
140 | * added buddy. | |
d30561c5 | 141 | * @stale: list of pages marked for freeing |
9a001fc1 | 142 | * @pages_nr: number of z3fold pages in the pool. |
7c2b8baa | 143 | * @c_handle: cache for z3fold_buddy_slots allocation |
9a001fc1 VW |
144 | * @ops: pointer to a structure of user defined operations specified at |
145 | * pool creation time. | |
d30561c5 VW |
146 | * @compact_wq: workqueue for page layout background optimization |
147 | * @release_wq: workqueue for safe page release | |
148 | * @work: work_struct for safe page release | |
1f862989 | 149 | * @inode: inode for z3fold pseudo filesystem |
9a001fc1 VW |
150 | * |
151 | * This structure is allocated at pool creation time and maintains metadata | |
152 | * pertaining to a particular z3fold pool. | |
153 | */ | |
154 | struct z3fold_pool { | |
d30561c5 | 155 | const char *name; |
9a001fc1 | 156 | spinlock_t lock; |
d30561c5 VW |
157 | spinlock_t stale_lock; |
158 | struct list_head *unbuddied; | |
9a001fc1 | 159 | struct list_head lru; |
d30561c5 | 160 | struct list_head stale; |
12d59ae6 | 161 | atomic64_t pages_nr; |
7c2b8baa | 162 | struct kmem_cache *c_handle; |
9a001fc1 VW |
163 | const struct z3fold_ops *ops; |
164 | struct zpool *zpool; | |
165 | const struct zpool_ops *zpool_ops; | |
d30561c5 VW |
166 | struct workqueue_struct *compact_wq; |
167 | struct workqueue_struct *release_wq; | |
168 | struct work_struct work; | |
1f862989 | 169 | struct inode *inode; |
9a001fc1 VW |
170 | }; |
171 | ||
9a001fc1 VW |
172 | /* |
173 | * Internal z3fold page flags | |
174 | */ | |
175 | enum z3fold_page_flags { | |
5a27aa82 | 176 | PAGE_HEADLESS = 0, |
9a001fc1 | 177 | MIDDLE_CHUNK_MAPPED, |
d30561c5 | 178 | NEEDS_COMPACTING, |
6098d7e1 | 179 | PAGE_STALE, |
ca0246bb | 180 | PAGE_CLAIMED, /* by either reclaim or free */ |
9a001fc1 VW |
181 | }; |
182 | ||
4a3ac931 VW |
183 | /* |
184 | * handle flags, go under HANDLE_FLAG_MASK | |
185 | */ | |
186 | enum z3fold_handle_flags { | |
187 | HANDLES_ORPHANED = 0, | |
188 | }; | |
189 | ||
190 | /* | |
191 | * Forward declarations | |
192 | */ | |
193 | static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool); | |
194 | static void compact_page_work(struct work_struct *w); | |
195 | ||
9a001fc1 VW |
196 | /***************** |
197 | * Helpers | |
198 | *****************/ | |
199 | ||
200 | /* Converts an allocation size in bytes to size in z3fold chunks */ | |
201 | static int size_to_chunks(size_t size) | |
202 | { | |
203 | return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; | |
204 | } | |
205 | ||
206 | #define for_each_unbuddied_list(_iter, _begin) \ | |
207 | for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) | |
208 | ||
bb9f6f63 VW |
209 | static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, |
210 | gfp_t gfp) | |
7c2b8baa | 211 | { |
f1549cb5 HB |
212 | struct z3fold_buddy_slots *slots; |
213 | ||
214 | slots = kmem_cache_alloc(pool->c_handle, | |
215 | (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); | |
7c2b8baa VW |
216 | |
217 | if (slots) { | |
218 | memset(slots->slot, 0, sizeof(slots->slot)); | |
219 | slots->pool = (unsigned long)pool; | |
4a3ac931 | 220 | rwlock_init(&slots->lock); |
7c2b8baa VW |
221 | } |
222 | ||
223 | return slots; | |
224 | } | |
225 | ||
226 | static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) | |
227 | { | |
228 | return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); | |
229 | } | |
230 | ||
231 | static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) | |
232 | { | |
233 | return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); | |
234 | } | |
235 | ||
4a3ac931 VW |
236 | /* Lock a z3fold page */ |
237 | static inline void z3fold_page_lock(struct z3fold_header *zhdr) | |
238 | { | |
239 | spin_lock(&zhdr->page_lock); | |
240 | } | |
241 | ||
242 | /* Try to lock a z3fold page */ | |
243 | static inline int z3fold_page_trylock(struct z3fold_header *zhdr) | |
244 | { | |
245 | return spin_trylock(&zhdr->page_lock); | |
246 | } | |
247 | ||
248 | /* Unlock a z3fold page */ | |
249 | static inline void z3fold_page_unlock(struct z3fold_header *zhdr) | |
250 | { | |
251 | spin_unlock(&zhdr->page_lock); | |
252 | } | |
253 | ||
254 | ||
255 | static inline struct z3fold_header *__get_z3fold_header(unsigned long handle, | |
256 | bool lock) | |
257 | { | |
258 | struct z3fold_buddy_slots *slots; | |
259 | struct z3fold_header *zhdr; | |
260 | int locked = 0; | |
261 | ||
262 | if (!(handle & (1 << PAGE_HEADLESS))) { | |
263 | slots = handle_to_slots(handle); | |
264 | do { | |
265 | unsigned long addr; | |
266 | ||
267 | read_lock(&slots->lock); | |
268 | addr = *(unsigned long *)handle; | |
269 | zhdr = (struct z3fold_header *)(addr & PAGE_MASK); | |
270 | if (lock) | |
271 | locked = z3fold_page_trylock(zhdr); | |
272 | read_unlock(&slots->lock); | |
273 | if (locked) | |
274 | break; | |
275 | cpu_relax(); | |
276 | } while (lock); | |
277 | } else { | |
278 | zhdr = (struct z3fold_header *)(handle & PAGE_MASK); | |
279 | } | |
280 | ||
281 | return zhdr; | |
282 | } | |
283 | ||
284 | /* Returns the z3fold page where a given handle is stored */ | |
285 | static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h) | |
286 | { | |
287 | return __get_z3fold_header(h, false); | |
288 | } | |
289 | ||
290 | /* return locked z3fold page if it's not headless */ | |
291 | static inline struct z3fold_header *get_z3fold_header(unsigned long h) | |
292 | { | |
293 | return __get_z3fold_header(h, true); | |
294 | } | |
295 | ||
296 | static inline void put_z3fold_header(struct z3fold_header *zhdr) | |
297 | { | |
298 | struct page *page = virt_to_page(zhdr); | |
299 | ||
300 | if (!test_bit(PAGE_HEADLESS, &page->private)) | |
301 | z3fold_page_unlock(zhdr); | |
302 | } | |
303 | ||
7c2b8baa VW |
304 | static inline void free_handle(unsigned long handle) |
305 | { | |
306 | struct z3fold_buddy_slots *slots; | |
4a3ac931 | 307 | struct z3fold_header *zhdr; |
7c2b8baa VW |
308 | int i; |
309 | bool is_free; | |
310 | ||
311 | if (handle & (1 << PAGE_HEADLESS)) | |
312 | return; | |
313 | ||
4a3ac931 VW |
314 | if (WARN_ON(*(unsigned long *)handle == 0)) |
315 | return; | |
316 | ||
317 | zhdr = handle_to_z3fold_header(handle); | |
7c2b8baa | 318 | slots = handle_to_slots(handle); |
4a3ac931 VW |
319 | write_lock(&slots->lock); |
320 | *(unsigned long *)handle = 0; | |
321 | write_unlock(&slots->lock); | |
322 | if (zhdr->slots == slots) | |
323 | return; /* simple case, nothing else to do */ | |
324 | ||
325 | /* we are freeing a foreign handle if we are here */ | |
326 | zhdr->foreign_handles--; | |
7c2b8baa | 327 | is_free = true; |
4a3ac931 VW |
328 | read_lock(&slots->lock); |
329 | if (!test_bit(HANDLES_ORPHANED, &slots->pool)) { | |
330 | read_unlock(&slots->lock); | |
331 | return; | |
332 | } | |
7c2b8baa VW |
333 | for (i = 0; i <= BUDDY_MASK; i++) { |
334 | if (slots->slot[i]) { | |
335 | is_free = false; | |
336 | break; | |
337 | } | |
338 | } | |
4a3ac931 | 339 | read_unlock(&slots->lock); |
7c2b8baa VW |
340 | |
341 | if (is_free) { | |
342 | struct z3fold_pool *pool = slots_to_pool(slots); | |
343 | ||
344 | kmem_cache_free(pool->c_handle, slots); | |
345 | } | |
346 | } | |
347 | ||
ea8157ab | 348 | static int z3fold_init_fs_context(struct fs_context *fc) |
1f862989 | 349 | { |
ea8157ab | 350 | return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM; |
1f862989 VW |
351 | } |
352 | ||
353 | static struct file_system_type z3fold_fs = { | |
354 | .name = "z3fold", | |
ea8157ab | 355 | .init_fs_context = z3fold_init_fs_context, |
1f862989 VW |
356 | .kill_sb = kill_anon_super, |
357 | }; | |
358 | ||
359 | static struct vfsmount *z3fold_mnt; | |
360 | static int z3fold_mount(void) | |
361 | { | |
362 | int ret = 0; | |
363 | ||
364 | z3fold_mnt = kern_mount(&z3fold_fs); | |
365 | if (IS_ERR(z3fold_mnt)) | |
366 | ret = PTR_ERR(z3fold_mnt); | |
367 | ||
368 | return ret; | |
369 | } | |
370 | ||
371 | static void z3fold_unmount(void) | |
372 | { | |
373 | kern_unmount(z3fold_mnt); | |
374 | } | |
375 | ||
376 | static const struct address_space_operations z3fold_aops; | |
377 | static int z3fold_register_migration(struct z3fold_pool *pool) | |
378 | { | |
379 | pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); | |
380 | if (IS_ERR(pool->inode)) { | |
381 | pool->inode = NULL; | |
382 | return 1; | |
383 | } | |
384 | ||
385 | pool->inode->i_mapping->private_data = pool; | |
386 | pool->inode->i_mapping->a_ops = &z3fold_aops; | |
387 | return 0; | |
388 | } | |
389 | ||
390 | static void z3fold_unregister_migration(struct z3fold_pool *pool) | |
391 | { | |
392 | if (pool->inode) | |
393 | iput(pool->inode); | |
394 | } | |
395 | ||
9a001fc1 | 396 | /* Initializes the z3fold header of a newly allocated z3fold page */ |
63398413 | 397 | static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, |
bb9f6f63 | 398 | struct z3fold_pool *pool, gfp_t gfp) |
9a001fc1 VW |
399 | { |
400 | struct z3fold_header *zhdr = page_address(page); | |
63398413 | 401 | struct z3fold_buddy_slots *slots; |
9a001fc1 VW |
402 | |
403 | INIT_LIST_HEAD(&page->lru); | |
9a001fc1 VW |
404 | clear_bit(PAGE_HEADLESS, &page->private); |
405 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | |
d30561c5 VW |
406 | clear_bit(NEEDS_COMPACTING, &page->private); |
407 | clear_bit(PAGE_STALE, &page->private); | |
ca0246bb | 408 | clear_bit(PAGE_CLAIMED, &page->private); |
63398413 VW |
409 | if (headless) |
410 | return zhdr; | |
411 | ||
412 | slots = alloc_slots(pool, gfp); | |
413 | if (!slots) | |
414 | return NULL; | |
9a001fc1 | 415 | |
2f1e5e4d | 416 | spin_lock_init(&zhdr->page_lock); |
5a27aa82 | 417 | kref_init(&zhdr->refcount); |
9a001fc1 VW |
418 | zhdr->first_chunks = 0; |
419 | zhdr->middle_chunks = 0; | |
420 | zhdr->last_chunks = 0; | |
421 | zhdr->first_num = 0; | |
422 | zhdr->start_middle = 0; | |
d30561c5 | 423 | zhdr->cpu = -1; |
4a3ac931 | 424 | zhdr->foreign_handles = 0; |
7c2b8baa | 425 | zhdr->slots = slots; |
bb9a374d | 426 | zhdr->pool = pool; |
9a001fc1 | 427 | INIT_LIST_HEAD(&zhdr->buddy); |
d30561c5 | 428 | INIT_WORK(&zhdr->work, compact_page_work); |
9a001fc1 VW |
429 | return zhdr; |
430 | } | |
431 | ||
432 | /* Resets the struct page fields and frees the page */ | |
1f862989 | 433 | static void free_z3fold_page(struct page *page, bool headless) |
9a001fc1 | 434 | { |
1f862989 VW |
435 | if (!headless) { |
436 | lock_page(page); | |
437 | __ClearPageMovable(page); | |
438 | unlock_page(page); | |
439 | } | |
440 | ClearPagePrivate(page); | |
5a27aa82 VW |
441 | __free_page(page); |
442 | } | |
443 | ||
7c2b8baa VW |
444 | /* Helper function to build the index */ |
445 | static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) | |
446 | { | |
447 | return (bud + zhdr->first_num) & BUDDY_MASK; | |
448 | } | |
449 | ||
9a001fc1 VW |
450 | /* |
451 | * Encodes the handle of a particular buddy within a z3fold page | |
452 | * Pool lock should be held as this function accesses first_num | |
453 | */ | |
3f9d2b57 VW |
454 | static unsigned long __encode_handle(struct z3fold_header *zhdr, |
455 | struct z3fold_buddy_slots *slots, | |
456 | enum buddy bud) | |
9a001fc1 | 457 | { |
7c2b8baa VW |
458 | unsigned long h = (unsigned long)zhdr; |
459 | int idx = 0; | |
9a001fc1 | 460 | |
7c2b8baa VW |
461 | /* |
462 | * For a headless page, its handle is its pointer with the extra | |
463 | * PAGE_HEADLESS bit set | |
464 | */ | |
465 | if (bud == HEADLESS) | |
466 | return h | (1 << PAGE_HEADLESS); | |
467 | ||
468 | /* otherwise, return pointer to encoded handle */ | |
469 | idx = __idx(zhdr, bud); | |
470 | h += idx; | |
471 | if (bud == LAST) | |
472 | h |= (zhdr->last_chunks << BUDDY_SHIFT); | |
473 | ||
4a3ac931 | 474 | write_lock(&slots->lock); |
7c2b8baa | 475 | slots->slot[idx] = h; |
4a3ac931 | 476 | write_unlock(&slots->lock); |
7c2b8baa | 477 | return (unsigned long)&slots->slot[idx]; |
9a001fc1 VW |
478 | } |
479 | ||
3f9d2b57 VW |
480 | static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) |
481 | { | |
482 | return __encode_handle(zhdr, zhdr->slots, bud); | |
483 | } | |
484 | ||
ca0246bb VW |
485 | /* only for LAST bud, returns zero otherwise */ |
486 | static unsigned short handle_to_chunks(unsigned long handle) | |
487 | { | |
4a3ac931 VW |
488 | struct z3fold_buddy_slots *slots = handle_to_slots(handle); |
489 | unsigned long addr; | |
7c2b8baa | 490 | |
4a3ac931 VW |
491 | read_lock(&slots->lock); |
492 | addr = *(unsigned long *)handle; | |
493 | read_unlock(&slots->lock); | |
7c2b8baa | 494 | return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; |
ca0246bb VW |
495 | } |
496 | ||
f201ebd8 | 497 | /* |
498 | * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle | |
499 | * but that doesn't matter. because the masking will result in the | |
500 | * correct buddy number. | |
501 | */ | |
9a001fc1 VW |
502 | static enum buddy handle_to_buddy(unsigned long handle) |
503 | { | |
7c2b8baa | 504 | struct z3fold_header *zhdr; |
4a3ac931 | 505 | struct z3fold_buddy_slots *slots = handle_to_slots(handle); |
7c2b8baa VW |
506 | unsigned long addr; |
507 | ||
4a3ac931 | 508 | read_lock(&slots->lock); |
7c2b8baa VW |
509 | WARN_ON(handle & (1 << PAGE_HEADLESS)); |
510 | addr = *(unsigned long *)handle; | |
4a3ac931 | 511 | read_unlock(&slots->lock); |
7c2b8baa VW |
512 | zhdr = (struct z3fold_header *)(addr & PAGE_MASK); |
513 | return (addr - zhdr->first_num) & BUDDY_MASK; | |
9a001fc1 VW |
514 | } |
515 | ||
9050cce1 VW |
516 | static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) |
517 | { | |
bb9a374d | 518 | return zhdr->pool; |
9050cce1 VW |
519 | } |
520 | ||
d30561c5 VW |
521 | static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) |
522 | { | |
523 | struct page *page = virt_to_page(zhdr); | |
9050cce1 | 524 | struct z3fold_pool *pool = zhdr_to_pool(zhdr); |
4a3ac931 VW |
525 | bool is_free = true; |
526 | int i; | |
d30561c5 VW |
527 | |
528 | WARN_ON(!list_empty(&zhdr->buddy)); | |
529 | set_bit(PAGE_STALE, &page->private); | |
35529357 | 530 | clear_bit(NEEDS_COMPACTING, &page->private); |
d30561c5 VW |
531 | spin_lock(&pool->lock); |
532 | if (!list_empty(&page->lru)) | |
1f862989 | 533 | list_del_init(&page->lru); |
d30561c5 | 534 | spin_unlock(&pool->lock); |
4a3ac931 VW |
535 | |
536 | /* If there are no foreign handles, free the handles array */ | |
537 | read_lock(&zhdr->slots->lock); | |
538 | for (i = 0; i <= BUDDY_MASK; i++) { | |
539 | if (zhdr->slots->slot[i]) { | |
540 | is_free = false; | |
541 | break; | |
542 | } | |
543 | } | |
544 | if (!is_free) | |
545 | set_bit(HANDLES_ORPHANED, &zhdr->slots->pool); | |
546 | read_unlock(&zhdr->slots->lock); | |
547 | ||
548 | if (is_free) | |
549 | kmem_cache_free(pool->c_handle, zhdr->slots); | |
550 | ||
d30561c5 VW |
551 | if (locked) |
552 | z3fold_page_unlock(zhdr); | |
4a3ac931 | 553 | |
d30561c5 VW |
554 | spin_lock(&pool->stale_lock); |
555 | list_add(&zhdr->buddy, &pool->stale); | |
556 | queue_work(pool->release_wq, &pool->work); | |
557 | spin_unlock(&pool->stale_lock); | |
558 | } | |
559 | ||
560 | static void __attribute__((__unused__)) | |
561 | release_z3fold_page(struct kref *ref) | |
562 | { | |
563 | struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, | |
564 | refcount); | |
565 | __release_z3fold_page(zhdr, false); | |
566 | } | |
567 | ||
568 | static void release_z3fold_page_locked(struct kref *ref) | |
569 | { | |
570 | struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, | |
571 | refcount); | |
572 | WARN_ON(z3fold_page_trylock(zhdr)); | |
573 | __release_z3fold_page(zhdr, true); | |
574 | } | |
575 | ||
576 | static void release_z3fold_page_locked_list(struct kref *ref) | |
577 | { | |
578 | struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, | |
579 | refcount); | |
9050cce1 | 580 | struct z3fold_pool *pool = zhdr_to_pool(zhdr); |
4a3ac931 | 581 | |
9050cce1 | 582 | spin_lock(&pool->lock); |
d30561c5 | 583 | list_del_init(&zhdr->buddy); |
9050cce1 | 584 | spin_unlock(&pool->lock); |
d30561c5 VW |
585 | |
586 | WARN_ON(z3fold_page_trylock(zhdr)); | |
587 | __release_z3fold_page(zhdr, true); | |
588 | } | |
589 | ||
590 | static void free_pages_work(struct work_struct *w) | |
591 | { | |
592 | struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); | |
593 | ||
594 | spin_lock(&pool->stale_lock); | |
595 | while (!list_empty(&pool->stale)) { | |
596 | struct z3fold_header *zhdr = list_first_entry(&pool->stale, | |
597 | struct z3fold_header, buddy); | |
598 | struct page *page = virt_to_page(zhdr); | |
599 | ||
600 | list_del(&zhdr->buddy); | |
601 | if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) | |
602 | continue; | |
d30561c5 VW |
603 | spin_unlock(&pool->stale_lock); |
604 | cancel_work_sync(&zhdr->work); | |
1f862989 | 605 | free_z3fold_page(page, false); |
d30561c5 VW |
606 | cond_resched(); |
607 | spin_lock(&pool->stale_lock); | |
608 | } | |
609 | spin_unlock(&pool->stale_lock); | |
610 | } | |
611 | ||
9a001fc1 VW |
612 | /* |
613 | * Returns the number of free chunks in a z3fold page. | |
614 | * NB: can't be used with HEADLESS pages. | |
615 | */ | |
616 | static int num_free_chunks(struct z3fold_header *zhdr) | |
617 | { | |
618 | int nfree; | |
619 | /* | |
620 | * If there is a middle object, pick up the bigger free space | |
621 | * either before or after it. Otherwise just subtract the number | |
622 | * of chunks occupied by the first and the last objects. | |
623 | */ | |
624 | if (zhdr->middle_chunks != 0) { | |
625 | int nfree_before = zhdr->first_chunks ? | |
ede93213 | 626 | 0 : zhdr->start_middle - ZHDR_CHUNKS; |
9a001fc1 | 627 | int nfree_after = zhdr->last_chunks ? |
ede93213 VW |
628 | 0 : TOTAL_CHUNKS - |
629 | (zhdr->start_middle + zhdr->middle_chunks); | |
9a001fc1 VW |
630 | nfree = max(nfree_before, nfree_after); |
631 | } else | |
632 | nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; | |
633 | return nfree; | |
634 | } | |
635 | ||
9050cce1 VW |
636 | /* Add to the appropriate unbuddied list */ |
637 | static inline void add_to_unbuddied(struct z3fold_pool *pool, | |
638 | struct z3fold_header *zhdr) | |
639 | { | |
640 | if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || | |
641 | zhdr->middle_chunks == 0) { | |
642 | struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); | |
643 | ||
644 | int freechunks = num_free_chunks(zhdr); | |
645 | spin_lock(&pool->lock); | |
646 | list_add(&zhdr->buddy, &unbuddied[freechunks]); | |
647 | spin_unlock(&pool->lock); | |
648 | zhdr->cpu = smp_processor_id(); | |
649 | put_cpu_ptr(pool->unbuddied); | |
650 | } | |
651 | } | |
652 | ||
ede93213 VW |
653 | static inline void *mchunk_memmove(struct z3fold_header *zhdr, |
654 | unsigned short dst_chunk) | |
655 | { | |
656 | void *beg = zhdr; | |
657 | return memmove(beg + (dst_chunk << CHUNK_SHIFT), | |
658 | beg + (zhdr->start_middle << CHUNK_SHIFT), | |
659 | zhdr->middle_chunks << CHUNK_SHIFT); | |
660 | } | |
661 | ||
4a3ac931 VW |
662 | static inline bool buddy_single(struct z3fold_header *zhdr) |
663 | { | |
664 | return !((zhdr->first_chunks && zhdr->middle_chunks) || | |
665 | (zhdr->first_chunks && zhdr->last_chunks) || | |
666 | (zhdr->middle_chunks && zhdr->last_chunks)); | |
667 | } | |
668 | ||
669 | static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) | |
670 | { | |
671 | struct z3fold_pool *pool = zhdr_to_pool(zhdr); | |
672 | void *p = zhdr; | |
673 | unsigned long old_handle = 0; | |
674 | size_t sz = 0; | |
675 | struct z3fold_header *new_zhdr = NULL; | |
676 | int first_idx = __idx(zhdr, FIRST); | |
677 | int middle_idx = __idx(zhdr, MIDDLE); | |
678 | int last_idx = __idx(zhdr, LAST); | |
679 | unsigned short *moved_chunks = NULL; | |
680 | ||
681 | /* | |
682 | * No need to protect slots here -- all the slots are "local" and | |
683 | * the page lock is already taken | |
684 | */ | |
685 | if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { | |
686 | p += ZHDR_SIZE_ALIGNED; | |
687 | sz = zhdr->first_chunks << CHUNK_SHIFT; | |
688 | old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; | |
689 | moved_chunks = &zhdr->first_chunks; | |
690 | } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { | |
691 | p += zhdr->start_middle << CHUNK_SHIFT; | |
692 | sz = zhdr->middle_chunks << CHUNK_SHIFT; | |
693 | old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; | |
694 | moved_chunks = &zhdr->middle_chunks; | |
695 | } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { | |
696 | p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); | |
697 | sz = zhdr->last_chunks << CHUNK_SHIFT; | |
698 | old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; | |
699 | moved_chunks = &zhdr->last_chunks; | |
700 | } | |
701 | ||
702 | if (sz > 0) { | |
703 | enum buddy new_bud = HEADLESS; | |
704 | short chunks = size_to_chunks(sz); | |
705 | void *q; | |
706 | ||
707 | new_zhdr = __z3fold_alloc(pool, sz, false); | |
708 | if (!new_zhdr) | |
709 | return NULL; | |
710 | ||
711 | if (WARN_ON(new_zhdr == zhdr)) | |
712 | goto out_fail; | |
713 | ||
714 | if (new_zhdr->first_chunks == 0) { | |
715 | if (new_zhdr->middle_chunks != 0 && | |
716 | chunks >= new_zhdr->start_middle) { | |
717 | new_bud = LAST; | |
718 | } else { | |
719 | new_bud = FIRST; | |
720 | } | |
721 | } else if (new_zhdr->last_chunks == 0) { | |
722 | new_bud = LAST; | |
723 | } else if (new_zhdr->middle_chunks == 0) { | |
724 | new_bud = MIDDLE; | |
725 | } | |
726 | q = new_zhdr; | |
727 | switch (new_bud) { | |
728 | case FIRST: | |
729 | new_zhdr->first_chunks = chunks; | |
730 | q += ZHDR_SIZE_ALIGNED; | |
731 | break; | |
732 | case MIDDLE: | |
733 | new_zhdr->middle_chunks = chunks; | |
734 | new_zhdr->start_middle = | |
735 | new_zhdr->first_chunks + ZHDR_CHUNKS; | |
736 | q += new_zhdr->start_middle << CHUNK_SHIFT; | |
737 | break; | |
738 | case LAST: | |
739 | new_zhdr->last_chunks = chunks; | |
740 | q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT); | |
741 | break; | |
742 | default: | |
743 | goto out_fail; | |
744 | } | |
745 | new_zhdr->foreign_handles++; | |
746 | memcpy(q, p, sz); | |
747 | write_lock(&zhdr->slots->lock); | |
748 | *(unsigned long *)old_handle = (unsigned long)new_zhdr + | |
749 | __idx(new_zhdr, new_bud); | |
750 | if (new_bud == LAST) | |
751 | *(unsigned long *)old_handle |= | |
752 | (new_zhdr->last_chunks << BUDDY_SHIFT); | |
753 | write_unlock(&zhdr->slots->lock); | |
754 | add_to_unbuddied(pool, new_zhdr); | |
755 | z3fold_page_unlock(new_zhdr); | |
756 | ||
757 | *moved_chunks = 0; | |
758 | } | |
759 | ||
760 | return new_zhdr; | |
761 | ||
762 | out_fail: | |
763 | if (new_zhdr) { | |
764 | if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) | |
765 | atomic64_dec(&pool->pages_nr); | |
766 | else { | |
767 | add_to_unbuddied(pool, new_zhdr); | |
768 | z3fold_page_unlock(new_zhdr); | |
769 | } | |
770 | } | |
771 | return NULL; | |
772 | ||
773 | } | |
774 | ||
1b096e5a | 775 | #define BIG_CHUNK_GAP 3 |
9a001fc1 VW |
776 | /* Has to be called with lock held */ |
777 | static int z3fold_compact_page(struct z3fold_header *zhdr) | |
778 | { | |
779 | struct page *page = virt_to_page(zhdr); | |
9a001fc1 | 780 | |
ede93213 VW |
781 | if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) |
782 | return 0; /* can't move middle chunk, it's used */ | |
9a001fc1 | 783 | |
1f862989 VW |
784 | if (unlikely(PageIsolated(page))) |
785 | return 0; | |
786 | ||
ede93213 VW |
787 | if (zhdr->middle_chunks == 0) |
788 | return 0; /* nothing to compact */ | |
789 | ||
790 | if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { | |
791 | /* move to the beginning */ | |
792 | mchunk_memmove(zhdr, ZHDR_CHUNKS); | |
9a001fc1 VW |
793 | zhdr->first_chunks = zhdr->middle_chunks; |
794 | zhdr->middle_chunks = 0; | |
795 | zhdr->start_middle = 0; | |
796 | zhdr->first_num++; | |
1b096e5a | 797 | return 1; |
9a001fc1 | 798 | } |
1b096e5a VW |
799 | |
800 | /* | |
801 | * moving data is expensive, so let's only do that if | |
802 | * there's substantial gain (at least BIG_CHUNK_GAP chunks) | |
803 | */ | |
804 | if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && | |
805 | zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= | |
806 | BIG_CHUNK_GAP) { | |
807 | mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); | |
808 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; | |
809 | return 1; | |
810 | } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && | |
811 | TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle | |
812 | + zhdr->middle_chunks) >= | |
813 | BIG_CHUNK_GAP) { | |
814 | unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - | |
815 | zhdr->middle_chunks; | |
816 | mchunk_memmove(zhdr, new_start); | |
817 | zhdr->start_middle = new_start; | |
818 | return 1; | |
819 | } | |
820 | ||
821 | return 0; | |
9a001fc1 VW |
822 | } |
823 | ||
d30561c5 VW |
824 | static void do_compact_page(struct z3fold_header *zhdr, bool locked) |
825 | { | |
9050cce1 | 826 | struct z3fold_pool *pool = zhdr_to_pool(zhdr); |
d30561c5 | 827 | struct page *page; |
d30561c5 VW |
828 | |
829 | page = virt_to_page(zhdr); | |
830 | if (locked) | |
831 | WARN_ON(z3fold_page_trylock(zhdr)); | |
832 | else | |
833 | z3fold_page_lock(zhdr); | |
5d03a661 | 834 | if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { |
d30561c5 VW |
835 | z3fold_page_unlock(zhdr); |
836 | return; | |
837 | } | |
838 | spin_lock(&pool->lock); | |
839 | list_del_init(&zhdr->buddy); | |
840 | spin_unlock(&pool->lock); | |
841 | ||
5d03a661 VW |
842 | if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { |
843 | atomic64_dec(&pool->pages_nr); | |
844 | return; | |
845 | } | |
846 | ||
1f862989 | 847 | if (unlikely(PageIsolated(page) || |
3f9d2b57 | 848 | test_bit(PAGE_CLAIMED, &page->private) || |
1f862989 VW |
849 | test_bit(PAGE_STALE, &page->private))) { |
850 | z3fold_page_unlock(zhdr); | |
851 | return; | |
852 | } | |
853 | ||
4a3ac931 VW |
854 | if (!zhdr->foreign_handles && buddy_single(zhdr) && |
855 | zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { | |
856 | if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) | |
857 | atomic64_dec(&pool->pages_nr); | |
858 | else | |
859 | z3fold_page_unlock(zhdr); | |
860 | return; | |
861 | } | |
862 | ||
d30561c5 | 863 | z3fold_compact_page(zhdr); |
9050cce1 | 864 | add_to_unbuddied(pool, zhdr); |
d30561c5 VW |
865 | z3fold_page_unlock(zhdr); |
866 | } | |
867 | ||
868 | static void compact_page_work(struct work_struct *w) | |
869 | { | |
870 | struct z3fold_header *zhdr = container_of(w, struct z3fold_header, | |
871 | work); | |
872 | ||
873 | do_compact_page(zhdr, false); | |
874 | } | |
875 | ||
9050cce1 VW |
876 | /* returns _locked_ z3fold page header or NULL */ |
877 | static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, | |
878 | size_t size, bool can_sleep) | |
879 | { | |
880 | struct z3fold_header *zhdr = NULL; | |
881 | struct page *page; | |
882 | struct list_head *unbuddied; | |
883 | int chunks = size_to_chunks(size), i; | |
884 | ||
885 | lookup: | |
886 | /* First, try to find an unbuddied z3fold page. */ | |
887 | unbuddied = get_cpu_ptr(pool->unbuddied); | |
888 | for_each_unbuddied_list(i, chunks) { | |
889 | struct list_head *l = &unbuddied[i]; | |
890 | ||
891 | zhdr = list_first_entry_or_null(READ_ONCE(l), | |
892 | struct z3fold_header, buddy); | |
893 | ||
894 | if (!zhdr) | |
895 | continue; | |
896 | ||
897 | /* Re-check under lock. */ | |
898 | spin_lock(&pool->lock); | |
899 | l = &unbuddied[i]; | |
900 | if (unlikely(zhdr != list_first_entry(READ_ONCE(l), | |
901 | struct z3fold_header, buddy)) || | |
902 | !z3fold_page_trylock(zhdr)) { | |
903 | spin_unlock(&pool->lock); | |
904 | zhdr = NULL; | |
905 | put_cpu_ptr(pool->unbuddied); | |
906 | if (can_sleep) | |
907 | cond_resched(); | |
908 | goto lookup; | |
909 | } | |
910 | list_del_init(&zhdr->buddy); | |
911 | zhdr->cpu = -1; | |
912 | spin_unlock(&pool->lock); | |
913 | ||
914 | page = virt_to_page(zhdr); | |
4a3ac931 VW |
915 | if (test_bit(NEEDS_COMPACTING, &page->private) || |
916 | test_bit(PAGE_CLAIMED, &page->private)) { | |
9050cce1 VW |
917 | z3fold_page_unlock(zhdr); |
918 | zhdr = NULL; | |
919 | put_cpu_ptr(pool->unbuddied); | |
920 | if (can_sleep) | |
921 | cond_resched(); | |
922 | goto lookup; | |
923 | } | |
924 | ||
925 | /* | |
926 | * this page could not be removed from its unbuddied | |
927 | * list while pool lock was held, and then we've taken | |
928 | * page lock so kref_put could not be called before | |
929 | * we got here, so it's safe to just call kref_get() | |
930 | */ | |
931 | kref_get(&zhdr->refcount); | |
932 | break; | |
933 | } | |
934 | put_cpu_ptr(pool->unbuddied); | |
935 | ||
351618b2 VW |
936 | if (!zhdr) { |
937 | int cpu; | |
938 | ||
939 | /* look for _exact_ match on other cpus' lists */ | |
940 | for_each_online_cpu(cpu) { | |
941 | struct list_head *l; | |
942 | ||
943 | unbuddied = per_cpu_ptr(pool->unbuddied, cpu); | |
944 | spin_lock(&pool->lock); | |
945 | l = &unbuddied[chunks]; | |
946 | ||
947 | zhdr = list_first_entry_or_null(READ_ONCE(l), | |
948 | struct z3fold_header, buddy); | |
949 | ||
950 | if (!zhdr || !z3fold_page_trylock(zhdr)) { | |
951 | spin_unlock(&pool->lock); | |
952 | zhdr = NULL; | |
953 | continue; | |
954 | } | |
955 | list_del_init(&zhdr->buddy); | |
956 | zhdr->cpu = -1; | |
957 | spin_unlock(&pool->lock); | |
958 | ||
959 | page = virt_to_page(zhdr); | |
4a3ac931 VW |
960 | if (test_bit(NEEDS_COMPACTING, &page->private) || |
961 | test_bit(PAGE_CLAIMED, &page->private)) { | |
351618b2 VW |
962 | z3fold_page_unlock(zhdr); |
963 | zhdr = NULL; | |
964 | if (can_sleep) | |
965 | cond_resched(); | |
966 | continue; | |
967 | } | |
968 | kref_get(&zhdr->refcount); | |
969 | break; | |
970 | } | |
971 | } | |
972 | ||
9050cce1 VW |
973 | return zhdr; |
974 | } | |
d30561c5 VW |
975 | |
976 | /* | |
977 | * API Functions | |
978 | */ | |
979 | ||
980 | /** | |
981 | * z3fold_create_pool() - create a new z3fold pool | |
982 | * @name: pool name | |
983 | * @gfp: gfp flags when allocating the z3fold pool structure | |
984 | * @ops: user-defined operations for the z3fold pool | |
985 | * | |
986 | * Return: pointer to the new z3fold pool or NULL if the metadata allocation | |
987 | * failed. | |
988 | */ | |
989 | static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, | |
990 | const struct z3fold_ops *ops) | |
991 | { | |
992 | struct z3fold_pool *pool = NULL; | |
993 | int i, cpu; | |
994 | ||
995 | pool = kzalloc(sizeof(struct z3fold_pool), gfp); | |
996 | if (!pool) | |
997 | goto out; | |
7c2b8baa VW |
998 | pool->c_handle = kmem_cache_create("z3fold_handle", |
999 | sizeof(struct z3fold_buddy_slots), | |
1000 | SLOTS_ALIGN, 0, NULL); | |
1001 | if (!pool->c_handle) | |
1002 | goto out_c; | |
d30561c5 VW |
1003 | spin_lock_init(&pool->lock); |
1004 | spin_lock_init(&pool->stale_lock); | |
1005 | pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); | |
1ec6995d XW |
1006 | if (!pool->unbuddied) |
1007 | goto out_pool; | |
d30561c5 VW |
1008 | for_each_possible_cpu(cpu) { |
1009 | struct list_head *unbuddied = | |
1010 | per_cpu_ptr(pool->unbuddied, cpu); | |
1011 | for_each_unbuddied_list(i, 0) | |
1012 | INIT_LIST_HEAD(&unbuddied[i]); | |
1013 | } | |
1014 | INIT_LIST_HEAD(&pool->lru); | |
1015 | INIT_LIST_HEAD(&pool->stale); | |
1016 | atomic64_set(&pool->pages_nr, 0); | |
1017 | pool->name = name; | |
1018 | pool->compact_wq = create_singlethread_workqueue(pool->name); | |
1019 | if (!pool->compact_wq) | |
1ec6995d | 1020 | goto out_unbuddied; |
d30561c5 VW |
1021 | pool->release_wq = create_singlethread_workqueue(pool->name); |
1022 | if (!pool->release_wq) | |
1023 | goto out_wq; | |
1f862989 VW |
1024 | if (z3fold_register_migration(pool)) |
1025 | goto out_rwq; | |
d30561c5 VW |
1026 | INIT_WORK(&pool->work, free_pages_work); |
1027 | pool->ops = ops; | |
1028 | return pool; | |
1029 | ||
1f862989 VW |
1030 | out_rwq: |
1031 | destroy_workqueue(pool->release_wq); | |
d30561c5 VW |
1032 | out_wq: |
1033 | destroy_workqueue(pool->compact_wq); | |
1ec6995d XW |
1034 | out_unbuddied: |
1035 | free_percpu(pool->unbuddied); | |
1036 | out_pool: | |
7c2b8baa VW |
1037 | kmem_cache_destroy(pool->c_handle); |
1038 | out_c: | |
d30561c5 | 1039 | kfree(pool); |
1ec6995d | 1040 | out: |
d30561c5 VW |
1041 | return NULL; |
1042 | } | |
1043 | ||
1044 | /** | |
1045 | * z3fold_destroy_pool() - destroys an existing z3fold pool | |
1046 | * @pool: the z3fold pool to be destroyed | |
1047 | * | |
1048 | * The pool should be emptied before this function is called. | |
1049 | */ | |
1050 | static void z3fold_destroy_pool(struct z3fold_pool *pool) | |
1051 | { | |
7c2b8baa | 1052 | kmem_cache_destroy(pool->c_handle); |
6051d3bd HB |
1053 | |
1054 | /* | |
1055 | * We need to destroy pool->compact_wq before pool->release_wq, | |
1056 | * as any pending work on pool->compact_wq will call | |
1057 | * queue_work(pool->release_wq, &pool->work). | |
b997052b HB |
1058 | * |
1059 | * There are still outstanding pages until both workqueues are drained, | |
1060 | * so we cannot unregister migration until then. | |
6051d3bd HB |
1061 | */ |
1062 | ||
d30561c5 | 1063 | destroy_workqueue(pool->compact_wq); |
6051d3bd | 1064 | destroy_workqueue(pool->release_wq); |
b997052b | 1065 | z3fold_unregister_migration(pool); |
d30561c5 VW |
1066 | kfree(pool); |
1067 | } | |
1068 | ||
9a001fc1 VW |
1069 | /** |
1070 | * z3fold_alloc() - allocates a region of a given size | |
1071 | * @pool: z3fold pool from which to allocate | |
1072 | * @size: size in bytes of the desired allocation | |
1073 | * @gfp: gfp flags used if the pool needs to grow | |
1074 | * @handle: handle of the new allocation | |
1075 | * | |
1076 | * This function will attempt to find a free region in the pool large enough to | |
1077 | * satisfy the allocation request. A search of the unbuddied lists is | |
1078 | * performed first. If no suitable free region is found, then a new page is | |
1079 | * allocated and added to the pool to satisfy the request. | |
1080 | * | |
1081 | * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used | |
1082 | * as z3fold pool pages. | |
1083 | * | |
1084 | * Return: 0 if success and handle is set, otherwise -EINVAL if the size or | |
1085 | * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate | |
1086 | * a new page. | |
1087 | */ | |
1088 | static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |
1089 | unsigned long *handle) | |
1090 | { | |
9050cce1 | 1091 | int chunks = size_to_chunks(size); |
9a001fc1 | 1092 | struct z3fold_header *zhdr = NULL; |
d30561c5 | 1093 | struct page *page = NULL; |
9a001fc1 | 1094 | enum buddy bud; |
8a97ea54 | 1095 | bool can_sleep = gfpflags_allow_blocking(gfp); |
9a001fc1 | 1096 | |
f1549cb5 | 1097 | if (!size) |
9a001fc1 VW |
1098 | return -EINVAL; |
1099 | ||
1100 | if (size > PAGE_SIZE) | |
1101 | return -ENOSPC; | |
1102 | ||
1103 | if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) | |
1104 | bud = HEADLESS; | |
1105 | else { | |
9050cce1 VW |
1106 | retry: |
1107 | zhdr = __z3fold_alloc(pool, size, can_sleep); | |
d30561c5 | 1108 | if (zhdr) { |
2f1e5e4d VW |
1109 | if (zhdr->first_chunks == 0) { |
1110 | if (zhdr->middle_chunks != 0 && | |
1111 | chunks >= zhdr->start_middle) | |
9a001fc1 | 1112 | bud = LAST; |
2f1e5e4d VW |
1113 | else |
1114 | bud = FIRST; | |
1115 | } else if (zhdr->last_chunks == 0) | |
1116 | bud = LAST; | |
1117 | else if (zhdr->middle_chunks == 0) | |
1118 | bud = MIDDLE; | |
1119 | else { | |
5a27aa82 | 1120 | if (kref_put(&zhdr->refcount, |
d30561c5 | 1121 | release_z3fold_page_locked)) |
5a27aa82 | 1122 | atomic64_dec(&pool->pages_nr); |
d30561c5 VW |
1123 | else |
1124 | z3fold_page_unlock(zhdr); | |
2f1e5e4d VW |
1125 | pr_err("No free chunks in unbuddied\n"); |
1126 | WARN_ON(1); | |
9050cce1 | 1127 | goto retry; |
9a001fc1 | 1128 | } |
9050cce1 | 1129 | page = virt_to_page(zhdr); |
2f1e5e4d | 1130 | goto found; |
9a001fc1 VW |
1131 | } |
1132 | bud = FIRST; | |
9a001fc1 VW |
1133 | } |
1134 | ||
5c9bab59 VW |
1135 | page = NULL; |
1136 | if (can_sleep) { | |
1137 | spin_lock(&pool->stale_lock); | |
1138 | zhdr = list_first_entry_or_null(&pool->stale, | |
1139 | struct z3fold_header, buddy); | |
1140 | /* | |
1141 | * Before allocating a page, let's see if we can take one from | |
1142 | * the stale pages list. cancel_work_sync() can sleep so we | |
1143 | * limit this case to the contexts where we can sleep | |
1144 | */ | |
1145 | if (zhdr) { | |
1146 | list_del(&zhdr->buddy); | |
1147 | spin_unlock(&pool->stale_lock); | |
d30561c5 | 1148 | cancel_work_sync(&zhdr->work); |
5c9bab59 VW |
1149 | page = virt_to_page(zhdr); |
1150 | } else { | |
1151 | spin_unlock(&pool->stale_lock); | |
1152 | } | |
d30561c5 | 1153 | } |
5c9bab59 VW |
1154 | if (!page) |
1155 | page = alloc_page(gfp); | |
d30561c5 | 1156 | |
9a001fc1 VW |
1157 | if (!page) |
1158 | return -ENOMEM; | |
2f1e5e4d | 1159 | |
63398413 | 1160 | zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); |
9050cce1 VW |
1161 | if (!zhdr) { |
1162 | __free_page(page); | |
1163 | return -ENOMEM; | |
1164 | } | |
1165 | atomic64_inc(&pool->pages_nr); | |
9a001fc1 VW |
1166 | |
1167 | if (bud == HEADLESS) { | |
1168 | set_bit(PAGE_HEADLESS, &page->private); | |
1169 | goto headless; | |
1170 | } | |
810481a2 HB |
1171 | if (can_sleep) { |
1172 | lock_page(page); | |
1173 | __SetPageMovable(page, pool->inode->i_mapping); | |
1174 | unlock_page(page); | |
1175 | } else { | |
1176 | if (trylock_page(page)) { | |
1177 | __SetPageMovable(page, pool->inode->i_mapping); | |
1178 | unlock_page(page); | |
1179 | } | |
1180 | } | |
2f1e5e4d | 1181 | z3fold_page_lock(zhdr); |
9a001fc1 VW |
1182 | |
1183 | found: | |
1184 | if (bud == FIRST) | |
1185 | zhdr->first_chunks = chunks; | |
1186 | else if (bud == LAST) | |
1187 | zhdr->last_chunks = chunks; | |
1188 | else { | |
1189 | zhdr->middle_chunks = chunks; | |
ede93213 | 1190 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; |
9a001fc1 | 1191 | } |
9050cce1 | 1192 | add_to_unbuddied(pool, zhdr); |
9a001fc1 VW |
1193 | |
1194 | headless: | |
d30561c5 | 1195 | spin_lock(&pool->lock); |
9a001fc1 VW |
1196 | /* Add/move z3fold page to beginning of LRU */ |
1197 | if (!list_empty(&page->lru)) | |
1198 | list_del(&page->lru); | |
1199 | ||
1200 | list_add(&page->lru, &pool->lru); | |
1201 | ||
1202 | *handle = encode_handle(zhdr, bud); | |
1203 | spin_unlock(&pool->lock); | |
2f1e5e4d VW |
1204 | if (bud != HEADLESS) |
1205 | z3fold_page_unlock(zhdr); | |
9a001fc1 VW |
1206 | |
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | /** | |
1211 | * z3fold_free() - frees the allocation associated with the given handle | |
1212 | * @pool: pool in which the allocation resided | |
1213 | * @handle: handle associated with the allocation returned by z3fold_alloc() | |
1214 | * | |
1215 | * In the case that the z3fold page in which the allocation resides is under | |
1216 | * reclaim, as indicated by the PG_reclaim flag being set, this function | |
1217 | * only sets the first|last_chunks to 0. The page is actually freed | |
1218 | * once both buddies are evicted (see z3fold_reclaim_page() below). | |
1219 | */ | |
1220 | static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |
1221 | { | |
1222 | struct z3fold_header *zhdr; | |
9a001fc1 VW |
1223 | struct page *page; |
1224 | enum buddy bud; | |
5b6807de | 1225 | bool page_claimed; |
9a001fc1 | 1226 | |
4a3ac931 | 1227 | zhdr = get_z3fold_header(handle); |
9a001fc1 | 1228 | page = virt_to_page(zhdr); |
5b6807de | 1229 | page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private); |
9a001fc1 VW |
1230 | |
1231 | if (test_bit(PAGE_HEADLESS, &page->private)) { | |
ca0246bb VW |
1232 | /* if a headless page is under reclaim, just leave. |
1233 | * NB: we use test_and_set_bit for a reason: if the bit | |
1234 | * has not been set before, we release this page | |
1235 | * immediately so we don't care about its value any more. | |
1236 | */ | |
5b6807de | 1237 | if (!page_claimed) { |
ca0246bb VW |
1238 | spin_lock(&pool->lock); |
1239 | list_del(&page->lru); | |
1240 | spin_unlock(&pool->lock); | |
4a3ac931 | 1241 | put_z3fold_header(zhdr); |
1f862989 | 1242 | free_z3fold_page(page, true); |
ca0246bb | 1243 | atomic64_dec(&pool->pages_nr); |
9a001fc1 | 1244 | } |
ca0246bb | 1245 | return; |
9a001fc1 VW |
1246 | } |
1247 | ||
ca0246bb | 1248 | /* Non-headless case */ |
ca0246bb VW |
1249 | bud = handle_to_buddy(handle); |
1250 | ||
1251 | switch (bud) { | |
1252 | case FIRST: | |
1253 | zhdr->first_chunks = 0; | |
1254 | break; | |
1255 | case MIDDLE: | |
1256 | zhdr->middle_chunks = 0; | |
1257 | break; | |
1258 | case LAST: | |
1259 | zhdr->last_chunks = 0; | |
1260 | break; | |
1261 | default: | |
1262 | pr_err("%s: unknown bud %d\n", __func__, bud); | |
1263 | WARN_ON(1); | |
4a3ac931 VW |
1264 | put_z3fold_header(zhdr); |
1265 | clear_bit(PAGE_CLAIMED, &page->private); | |
d30561c5 VW |
1266 | return; |
1267 | } | |
1268 | ||
4a3ac931 VW |
1269 | if (!page_claimed) |
1270 | free_handle(handle); | |
d30561c5 VW |
1271 | if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { |
1272 | atomic64_dec(&pool->pages_nr); | |
1273 | return; | |
1274 | } | |
5b6807de VW |
1275 | if (page_claimed) { |
1276 | /* the page has not been claimed by us */ | |
6098d7e1 VW |
1277 | z3fold_page_unlock(zhdr); |
1278 | return; | |
1279 | } | |
1f862989 VW |
1280 | if (unlikely(PageIsolated(page)) || |
1281 | test_and_set_bit(NEEDS_COMPACTING, &page->private)) { | |
4a3ac931 | 1282 | put_z3fold_header(zhdr); |
5b6807de | 1283 | clear_bit(PAGE_CLAIMED, &page->private); |
d30561c5 VW |
1284 | return; |
1285 | } | |
1286 | if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { | |
2f1e5e4d | 1287 | spin_lock(&pool->lock); |
d30561c5 | 1288 | list_del_init(&zhdr->buddy); |
2f1e5e4d | 1289 | spin_unlock(&pool->lock); |
d30561c5 | 1290 | zhdr->cpu = -1; |
5d03a661 | 1291 | kref_get(&zhdr->refcount); |
5b6807de | 1292 | clear_bit(PAGE_CLAIMED, &page->private); |
4a3ac931 | 1293 | do_compact_page(zhdr, true); |
d30561c5 | 1294 | return; |
9a001fc1 | 1295 | } |
5d03a661 | 1296 | kref_get(&zhdr->refcount); |
5b6807de | 1297 | clear_bit(PAGE_CLAIMED, &page->private); |
4a3ac931 VW |
1298 | queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); |
1299 | put_z3fold_header(zhdr); | |
9a001fc1 VW |
1300 | } |
1301 | ||
1302 | /** | |
1303 | * z3fold_reclaim_page() - evicts allocations from a pool page and frees it | |
1304 | * @pool: pool from which a page will attempt to be evicted | |
f144c390 | 1305 | * @retries: number of pages on the LRU list for which eviction will |
9a001fc1 VW |
1306 | * be attempted before failing |
1307 | * | |
1308 | * z3fold reclaim is different from normal system reclaim in that it is done | |
1309 | * from the bottom, up. This is because only the bottom layer, z3fold, has | |
1310 | * information on how the allocations are organized within each z3fold page. | |
1311 | * This has the potential to create interesting locking situations between | |
1312 | * z3fold and the user, however. | |
1313 | * | |
1314 | * To avoid these, this is how z3fold_reclaim_page() should be called: | |
f144c390 | 1315 | * |
9a001fc1 VW |
1316 | * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). |
1317 | * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and | |
1318 | * call the user-defined eviction handler with the pool and handle as | |
1319 | * arguments. | |
1320 | * | |
1321 | * If the handle can not be evicted, the eviction handler should return | |
1322 | * non-zero. z3fold_reclaim_page() will add the z3fold page back to the | |
1323 | * appropriate list and try the next z3fold page on the LRU up to | |
1324 | * a user defined number of retries. | |
1325 | * | |
1326 | * If the handle is successfully evicted, the eviction handler should | |
1327 | * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() | |
1328 | * contains logic to delay freeing the page if the page is under reclaim, | |
1329 | * as indicated by the setting of the PG_reclaim flag on the underlying page. | |
1330 | * | |
1331 | * If all buddies in the z3fold page are successfully evicted, then the | |
1332 | * z3fold page can be freed. | |
1333 | * | |
1334 | * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are | |
1335 | * no pages to evict or an eviction handler is not registered, -EAGAIN if | |
1336 | * the retry limit was hit. | |
1337 | */ | |
1338 | static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |
1339 | { | |
4a3ac931 | 1340 | int i, ret = -1; |
d30561c5 VW |
1341 | struct z3fold_header *zhdr = NULL; |
1342 | struct page *page = NULL; | |
1343 | struct list_head *pos; | |
9a001fc1 VW |
1344 | unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; |
1345 | ||
1346 | spin_lock(&pool->lock); | |
2f1e5e4d | 1347 | if (!pool->ops || !pool->ops->evict || retries == 0) { |
9a001fc1 VW |
1348 | spin_unlock(&pool->lock); |
1349 | return -EINVAL; | |
1350 | } | |
1351 | for (i = 0; i < retries; i++) { | |
2f1e5e4d VW |
1352 | if (list_empty(&pool->lru)) { |
1353 | spin_unlock(&pool->lock); | |
1354 | return -EINVAL; | |
1355 | } | |
d30561c5 VW |
1356 | list_for_each_prev(pos, &pool->lru) { |
1357 | page = list_entry(pos, struct page, lru); | |
ca0246bb VW |
1358 | |
1359 | /* this bit could have been set by free, in which case | |
1360 | * we pass over to the next page in the pool. | |
1361 | */ | |
3f9d2b57 VW |
1362 | if (test_and_set_bit(PAGE_CLAIMED, &page->private)) { |
1363 | page = NULL; | |
ca0246bb | 1364 | continue; |
3f9d2b57 | 1365 | } |
ca0246bb | 1366 | |
3f9d2b57 VW |
1367 | if (unlikely(PageIsolated(page))) { |
1368 | clear_bit(PAGE_CLAIMED, &page->private); | |
1369 | page = NULL; | |
1f862989 | 1370 | continue; |
3f9d2b57 VW |
1371 | } |
1372 | zhdr = page_address(page); | |
d30561c5 | 1373 | if (test_bit(PAGE_HEADLESS, &page->private)) |
d30561c5 VW |
1374 | break; |
1375 | ||
ca0246bb | 1376 | if (!z3fold_page_trylock(zhdr)) { |
3f9d2b57 | 1377 | clear_bit(PAGE_CLAIMED, &page->private); |
ca0246bb | 1378 | zhdr = NULL; |
d30561c5 | 1379 | continue; /* can't evict at this point */ |
ca0246bb | 1380 | } |
4a3ac931 VW |
1381 | if (zhdr->foreign_handles) { |
1382 | clear_bit(PAGE_CLAIMED, &page->private); | |
1383 | z3fold_page_unlock(zhdr); | |
1384 | zhdr = NULL; | |
1385 | continue; /* can't evict such page */ | |
1386 | } | |
d30561c5 VW |
1387 | kref_get(&zhdr->refcount); |
1388 | list_del_init(&zhdr->buddy); | |
1389 | zhdr->cpu = -1; | |
6098d7e1 | 1390 | break; |
d30561c5 VW |
1391 | } |
1392 | ||
ca0246bb VW |
1393 | if (!zhdr) |
1394 | break; | |
1395 | ||
5a27aa82 | 1396 | list_del_init(&page->lru); |
d30561c5 | 1397 | spin_unlock(&pool->lock); |
9a001fc1 | 1398 | |
9a001fc1 | 1399 | if (!test_bit(PAGE_HEADLESS, &page->private)) { |
9a001fc1 | 1400 | /* |
3f9d2b57 VW |
1401 | * We need encode the handles before unlocking, and |
1402 | * use our local slots structure because z3fold_free | |
1403 | * can zero out zhdr->slots and we can't do much | |
1404 | * about that | |
9a001fc1 VW |
1405 | */ |
1406 | first_handle = 0; | |
1407 | last_handle = 0; | |
1408 | middle_handle = 0; | |
1409 | if (zhdr->first_chunks) | |
4a3ac931 | 1410 | first_handle = encode_handle(zhdr, FIRST); |
9a001fc1 | 1411 | if (zhdr->middle_chunks) |
4a3ac931 | 1412 | middle_handle = encode_handle(zhdr, MIDDLE); |
9a001fc1 | 1413 | if (zhdr->last_chunks) |
4a3ac931 | 1414 | last_handle = encode_handle(zhdr, LAST); |
d30561c5 VW |
1415 | /* |
1416 | * it's safe to unlock here because we hold a | |
1417 | * reference to this page | |
1418 | */ | |
2f1e5e4d | 1419 | z3fold_page_unlock(zhdr); |
9a001fc1 | 1420 | } else { |
4a3ac931 | 1421 | first_handle = encode_handle(zhdr, HEADLESS); |
9a001fc1 VW |
1422 | last_handle = middle_handle = 0; |
1423 | } | |
9a001fc1 VW |
1424 | /* Issue the eviction callback(s) */ |
1425 | if (middle_handle) { | |
1426 | ret = pool->ops->evict(pool, middle_handle); | |
1427 | if (ret) | |
1428 | goto next; | |
4a3ac931 | 1429 | free_handle(middle_handle); |
9a001fc1 VW |
1430 | } |
1431 | if (first_handle) { | |
1432 | ret = pool->ops->evict(pool, first_handle); | |
1433 | if (ret) | |
1434 | goto next; | |
4a3ac931 | 1435 | free_handle(first_handle); |
9a001fc1 VW |
1436 | } |
1437 | if (last_handle) { | |
1438 | ret = pool->ops->evict(pool, last_handle); | |
1439 | if (ret) | |
1440 | goto next; | |
4a3ac931 | 1441 | free_handle(last_handle); |
9a001fc1 VW |
1442 | } |
1443 | next: | |
5a27aa82 VW |
1444 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
1445 | if (ret == 0) { | |
1f862989 | 1446 | free_z3fold_page(page, true); |
ca0246bb | 1447 | atomic64_dec(&pool->pages_nr); |
5a27aa82 | 1448 | return 0; |
5a27aa82 | 1449 | } |
6098d7e1 VW |
1450 | spin_lock(&pool->lock); |
1451 | list_add(&page->lru, &pool->lru); | |
1452 | spin_unlock(&pool->lock); | |
3f9d2b57 | 1453 | clear_bit(PAGE_CLAIMED, &page->private); |
6098d7e1 VW |
1454 | } else { |
1455 | z3fold_page_lock(zhdr); | |
6098d7e1 VW |
1456 | if (kref_put(&zhdr->refcount, |
1457 | release_z3fold_page_locked)) { | |
1458 | atomic64_dec(&pool->pages_nr); | |
1459 | return 0; | |
1460 | } | |
1461 | /* | |
1462 | * if we are here, the page is still not completely | |
1463 | * free. Take the global pool lock then to be able | |
1464 | * to add it back to the lru list | |
1465 | */ | |
1466 | spin_lock(&pool->lock); | |
1467 | list_add(&page->lru, &pool->lru); | |
d5567c9d | 1468 | spin_unlock(&pool->lock); |
6098d7e1 | 1469 | z3fold_page_unlock(zhdr); |
3f9d2b57 | 1470 | clear_bit(PAGE_CLAIMED, &page->private); |
5a27aa82 | 1471 | } |
2f1e5e4d | 1472 | |
6098d7e1 VW |
1473 | /* We started off locked to we need to lock the pool back */ |
1474 | spin_lock(&pool->lock); | |
9a001fc1 VW |
1475 | } |
1476 | spin_unlock(&pool->lock); | |
1477 | return -EAGAIN; | |
1478 | } | |
1479 | ||
1480 | /** | |
1481 | * z3fold_map() - maps the allocation associated with the given handle | |
1482 | * @pool: pool in which the allocation resides | |
1483 | * @handle: handle associated with the allocation to be mapped | |
1484 | * | |
1485 | * Extracts the buddy number from handle and constructs the pointer to the | |
1486 | * correct starting chunk within the page. | |
1487 | * | |
1488 | * Returns: a pointer to the mapped allocation | |
1489 | */ | |
1490 | static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |
1491 | { | |
1492 | struct z3fold_header *zhdr; | |
1493 | struct page *page; | |
1494 | void *addr; | |
1495 | enum buddy buddy; | |
1496 | ||
4a3ac931 | 1497 | zhdr = get_z3fold_header(handle); |
9a001fc1 VW |
1498 | addr = zhdr; |
1499 | page = virt_to_page(zhdr); | |
1500 | ||
1501 | if (test_bit(PAGE_HEADLESS, &page->private)) | |
1502 | goto out; | |
1503 | ||
1504 | buddy = handle_to_buddy(handle); | |
1505 | switch (buddy) { | |
1506 | case FIRST: | |
1507 | addr += ZHDR_SIZE_ALIGNED; | |
1508 | break; | |
1509 | case MIDDLE: | |
1510 | addr += zhdr->start_middle << CHUNK_SHIFT; | |
1511 | set_bit(MIDDLE_CHUNK_MAPPED, &page->private); | |
1512 | break; | |
1513 | case LAST: | |
ca0246bb | 1514 | addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); |
9a001fc1 VW |
1515 | break; |
1516 | default: | |
1517 | pr_err("unknown buddy id %d\n", buddy); | |
1518 | WARN_ON(1); | |
1519 | addr = NULL; | |
1520 | break; | |
1521 | } | |
2f1e5e4d | 1522 | |
1f862989 VW |
1523 | if (addr) |
1524 | zhdr->mapped_count++; | |
9a001fc1 | 1525 | out: |
4a3ac931 | 1526 | put_z3fold_header(zhdr); |
9a001fc1 VW |
1527 | return addr; |
1528 | } | |
1529 | ||
1530 | /** | |
1531 | * z3fold_unmap() - unmaps the allocation associated with the given handle | |
1532 | * @pool: pool in which the allocation resides | |
1533 | * @handle: handle associated with the allocation to be unmapped | |
1534 | */ | |
1535 | static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) | |
1536 | { | |
1537 | struct z3fold_header *zhdr; | |
1538 | struct page *page; | |
1539 | enum buddy buddy; | |
1540 | ||
4a3ac931 | 1541 | zhdr = get_z3fold_header(handle); |
9a001fc1 VW |
1542 | page = virt_to_page(zhdr); |
1543 | ||
2f1e5e4d | 1544 | if (test_bit(PAGE_HEADLESS, &page->private)) |
9a001fc1 | 1545 | return; |
9a001fc1 VW |
1546 | |
1547 | buddy = handle_to_buddy(handle); | |
1548 | if (buddy == MIDDLE) | |
1549 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | |
1f862989 | 1550 | zhdr->mapped_count--; |
4a3ac931 | 1551 | put_z3fold_header(zhdr); |
9a001fc1 VW |
1552 | } |
1553 | ||
1554 | /** | |
1555 | * z3fold_get_pool_size() - gets the z3fold pool size in pages | |
1556 | * @pool: pool whose size is being queried | |
1557 | * | |
12d59ae6 | 1558 | * Returns: size in pages of the given pool. |
9a001fc1 VW |
1559 | */ |
1560 | static u64 z3fold_get_pool_size(struct z3fold_pool *pool) | |
1561 | { | |
12d59ae6 | 1562 | return atomic64_read(&pool->pages_nr); |
9a001fc1 VW |
1563 | } |
1564 | ||
1f862989 VW |
1565 | static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) |
1566 | { | |
1567 | struct z3fold_header *zhdr; | |
1568 | struct z3fold_pool *pool; | |
1569 | ||
1570 | VM_BUG_ON_PAGE(!PageMovable(page), page); | |
1571 | VM_BUG_ON_PAGE(PageIsolated(page), page); | |
1572 | ||
3f9d2b57 VW |
1573 | if (test_bit(PAGE_HEADLESS, &page->private) || |
1574 | test_bit(PAGE_CLAIMED, &page->private)) | |
1f862989 VW |
1575 | return false; |
1576 | ||
1577 | zhdr = page_address(page); | |
1578 | z3fold_page_lock(zhdr); | |
1579 | if (test_bit(NEEDS_COMPACTING, &page->private) || | |
1580 | test_bit(PAGE_STALE, &page->private)) | |
1581 | goto out; | |
1582 | ||
4a3ac931 VW |
1583 | if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) |
1584 | goto out; | |
1585 | ||
1f862989 | 1586 | pool = zhdr_to_pool(zhdr); |
4a3ac931 VW |
1587 | spin_lock(&pool->lock); |
1588 | if (!list_empty(&zhdr->buddy)) | |
1589 | list_del_init(&zhdr->buddy); | |
1590 | if (!list_empty(&page->lru)) | |
1591 | list_del_init(&page->lru); | |
1592 | spin_unlock(&pool->lock); | |
1593 | ||
1594 | kref_get(&zhdr->refcount); | |
1595 | z3fold_page_unlock(zhdr); | |
1596 | return true; | |
1f862989 | 1597 | |
1f862989 VW |
1598 | out: |
1599 | z3fold_page_unlock(zhdr); | |
1600 | return false; | |
1601 | } | |
1602 | ||
1603 | static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage, | |
1604 | struct page *page, enum migrate_mode mode) | |
1605 | { | |
1606 | struct z3fold_header *zhdr, *new_zhdr; | |
1607 | struct z3fold_pool *pool; | |
1608 | struct address_space *new_mapping; | |
1609 | ||
1610 | VM_BUG_ON_PAGE(!PageMovable(page), page); | |
1611 | VM_BUG_ON_PAGE(!PageIsolated(page), page); | |
810481a2 | 1612 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
1f862989 VW |
1613 | |
1614 | zhdr = page_address(page); | |
1615 | pool = zhdr_to_pool(zhdr); | |
1616 | ||
1f862989 | 1617 | if (!z3fold_page_trylock(zhdr)) { |
1f862989 VW |
1618 | return -EAGAIN; |
1619 | } | |
4a3ac931 | 1620 | if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { |
1f862989 | 1621 | z3fold_page_unlock(zhdr); |
1f862989 VW |
1622 | return -EBUSY; |
1623 | } | |
c92d2f38 HB |
1624 | if (work_pending(&zhdr->work)) { |
1625 | z3fold_page_unlock(zhdr); | |
1626 | return -EAGAIN; | |
1627 | } | |
1f862989 VW |
1628 | new_zhdr = page_address(newpage); |
1629 | memcpy(new_zhdr, zhdr, PAGE_SIZE); | |
1630 | newpage->private = page->private; | |
1631 | page->private = 0; | |
1632 | z3fold_page_unlock(zhdr); | |
1633 | spin_lock_init(&new_zhdr->page_lock); | |
c92d2f38 HB |
1634 | INIT_WORK(&new_zhdr->work, compact_page_work); |
1635 | /* | |
1636 | * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, | |
1637 | * so we only have to reinitialize it. | |
1638 | */ | |
1639 | INIT_LIST_HEAD(&new_zhdr->buddy); | |
1f862989 VW |
1640 | new_mapping = page_mapping(page); |
1641 | __ClearPageMovable(page); | |
1642 | ClearPagePrivate(page); | |
1643 | ||
1644 | get_page(newpage); | |
1645 | z3fold_page_lock(new_zhdr); | |
1646 | if (new_zhdr->first_chunks) | |
1647 | encode_handle(new_zhdr, FIRST); | |
1648 | if (new_zhdr->last_chunks) | |
1649 | encode_handle(new_zhdr, LAST); | |
1650 | if (new_zhdr->middle_chunks) | |
1651 | encode_handle(new_zhdr, MIDDLE); | |
1652 | set_bit(NEEDS_COMPACTING, &newpage->private); | |
1653 | new_zhdr->cpu = smp_processor_id(); | |
1654 | spin_lock(&pool->lock); | |
1655 | list_add(&newpage->lru, &pool->lru); | |
1656 | spin_unlock(&pool->lock); | |
1657 | __SetPageMovable(newpage, new_mapping); | |
1658 | z3fold_page_unlock(new_zhdr); | |
1659 | ||
1660 | queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); | |
1661 | ||
1662 | page_mapcount_reset(page); | |
1f862989 VW |
1663 | put_page(page); |
1664 | return 0; | |
1665 | } | |
1666 | ||
1667 | static void z3fold_page_putback(struct page *page) | |
1668 | { | |
1669 | struct z3fold_header *zhdr; | |
1670 | struct z3fold_pool *pool; | |
1671 | ||
1672 | zhdr = page_address(page); | |
1673 | pool = zhdr_to_pool(zhdr); | |
1674 | ||
1675 | z3fold_page_lock(zhdr); | |
1676 | if (!list_empty(&zhdr->buddy)) | |
1677 | list_del_init(&zhdr->buddy); | |
1678 | INIT_LIST_HEAD(&page->lru); | |
1679 | if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { | |
1680 | atomic64_dec(&pool->pages_nr); | |
1681 | return; | |
1682 | } | |
1683 | spin_lock(&pool->lock); | |
1684 | list_add(&page->lru, &pool->lru); | |
1685 | spin_unlock(&pool->lock); | |
1686 | z3fold_page_unlock(zhdr); | |
1687 | } | |
1688 | ||
1689 | static const struct address_space_operations z3fold_aops = { | |
1690 | .isolate_page = z3fold_page_isolate, | |
1691 | .migratepage = z3fold_page_migrate, | |
1692 | .putback_page = z3fold_page_putback, | |
1693 | }; | |
1694 | ||
9a001fc1 VW |
1695 | /***************** |
1696 | * zpool | |
1697 | ****************/ | |
1698 | ||
1699 | static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) | |
1700 | { | |
1701 | if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) | |
1702 | return pool->zpool_ops->evict(pool->zpool, handle); | |
1703 | else | |
1704 | return -ENOENT; | |
1705 | } | |
1706 | ||
1707 | static const struct z3fold_ops z3fold_zpool_ops = { | |
1708 | .evict = z3fold_zpool_evict | |
1709 | }; | |
1710 | ||
1711 | static void *z3fold_zpool_create(const char *name, gfp_t gfp, | |
1712 | const struct zpool_ops *zpool_ops, | |
1713 | struct zpool *zpool) | |
1714 | { | |
1715 | struct z3fold_pool *pool; | |
1716 | ||
d30561c5 VW |
1717 | pool = z3fold_create_pool(name, gfp, |
1718 | zpool_ops ? &z3fold_zpool_ops : NULL); | |
9a001fc1 VW |
1719 | if (pool) { |
1720 | pool->zpool = zpool; | |
1721 | pool->zpool_ops = zpool_ops; | |
1722 | } | |
1723 | return pool; | |
1724 | } | |
1725 | ||
1726 | static void z3fold_zpool_destroy(void *pool) | |
1727 | { | |
1728 | z3fold_destroy_pool(pool); | |
1729 | } | |
1730 | ||
1731 | static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, | |
1732 | unsigned long *handle) | |
1733 | { | |
1734 | return z3fold_alloc(pool, size, gfp, handle); | |
1735 | } | |
1736 | static void z3fold_zpool_free(void *pool, unsigned long handle) | |
1737 | { | |
1738 | z3fold_free(pool, handle); | |
1739 | } | |
1740 | ||
1741 | static int z3fold_zpool_shrink(void *pool, unsigned int pages, | |
1742 | unsigned int *reclaimed) | |
1743 | { | |
1744 | unsigned int total = 0; | |
1745 | int ret = -EINVAL; | |
1746 | ||
1747 | while (total < pages) { | |
1748 | ret = z3fold_reclaim_page(pool, 8); | |
1749 | if (ret < 0) | |
1750 | break; | |
1751 | total++; | |
1752 | } | |
1753 | ||
1754 | if (reclaimed) | |
1755 | *reclaimed = total; | |
1756 | ||
1757 | return ret; | |
1758 | } | |
1759 | ||
1760 | static void *z3fold_zpool_map(void *pool, unsigned long handle, | |
1761 | enum zpool_mapmode mm) | |
1762 | { | |
1763 | return z3fold_map(pool, handle); | |
1764 | } | |
1765 | static void z3fold_zpool_unmap(void *pool, unsigned long handle) | |
1766 | { | |
1767 | z3fold_unmap(pool, handle); | |
1768 | } | |
1769 | ||
1770 | static u64 z3fold_zpool_total_size(void *pool) | |
1771 | { | |
1772 | return z3fold_get_pool_size(pool) * PAGE_SIZE; | |
1773 | } | |
1774 | ||
1775 | static struct zpool_driver z3fold_zpool_driver = { | |
1776 | .type = "z3fold", | |
1777 | .owner = THIS_MODULE, | |
1778 | .create = z3fold_zpool_create, | |
1779 | .destroy = z3fold_zpool_destroy, | |
1780 | .malloc = z3fold_zpool_malloc, | |
1781 | .free = z3fold_zpool_free, | |
1782 | .shrink = z3fold_zpool_shrink, | |
1783 | .map = z3fold_zpool_map, | |
1784 | .unmap = z3fold_zpool_unmap, | |
1785 | .total_size = z3fold_zpool_total_size, | |
1786 | }; | |
1787 | ||
1788 | MODULE_ALIAS("zpool-z3fold"); | |
1789 | ||
1790 | static int __init init_z3fold(void) | |
1791 | { | |
1f862989 VW |
1792 | int ret; |
1793 | ||
ede93213 VW |
1794 | /* Make sure the z3fold header is not larger than the page size */ |
1795 | BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); | |
1f862989 VW |
1796 | ret = z3fold_mount(); |
1797 | if (ret) | |
1798 | return ret; | |
1799 | ||
9a001fc1 VW |
1800 | zpool_register_driver(&z3fold_zpool_driver); |
1801 | ||
1802 | return 0; | |
1803 | } | |
1804 | ||
1805 | static void __exit exit_z3fold(void) | |
1806 | { | |
1f862989 | 1807 | z3fold_unmount(); |
9a001fc1 VW |
1808 | zpool_unregister_driver(&z3fold_zpool_driver); |
1809 | } | |
1810 | ||
1811 | module_init(init_z3fold); | |
1812 | module_exit(exit_z3fold); | |
1813 | ||
1814 | MODULE_LICENSE("GPL"); | |
1815 | MODULE_AUTHOR("Vitaly Wool <[email protected]>"); | |
1816 | MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); |