]> Git Repo - linux.git/blob - mm/swapfile.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / mm / swapfile.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/swapfile.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *  Swap reorganised 29.12.95, Stephen Tweedie
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/task.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mman.h>
15 #include <linux/slab.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/swap.h>
18 #include <linux/vmalloc.h>
19 #include <linux/pagemap.h>
20 #include <linux/namei.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/blk-cgroup.h>
23 #include <linux/random.h>
24 #include <linux/writeback.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/init.h>
28 #include <linux/ksm.h>
29 #include <linux/rmap.h>
30 #include <linux/security.h>
31 #include <linux/backing-dev.h>
32 #include <linux/mutex.h>
33 #include <linux/capability.h>
34 #include <linux/syscalls.h>
35 #include <linux/memcontrol.h>
36 #include <linux/poll.h>
37 #include <linux/oom.h>
38 #include <linux/swapfile.h>
39 #include <linux/export.h>
40 #include <linux/swap_slots.h>
41 #include <linux/sort.h>
42 #include <linux/completion.h>
43 #include <linux/suspend.h>
44 #include <linux/zswap.h>
45 #include <linux/plist.h>
46
47 #include <asm/tlbflush.h>
48 #include <linux/swapops.h>
49 #include <linux/swap_cgroup.h>
50 #include "internal.h"
51 #include "swap.h"
52
53 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
54                                  unsigned char);
55 static void free_swap_count_continuations(struct swap_info_struct *);
56 static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
57                                   unsigned int nr_pages);
58 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
59                              unsigned int nr_entries);
60 static bool folio_swapcache_freeable(struct folio *folio);
61 static struct swap_cluster_info *lock_cluster_or_swap_info(
62                 struct swap_info_struct *si, unsigned long offset);
63 static void unlock_cluster_or_swap_info(struct swap_info_struct *si,
64                                         struct swap_cluster_info *ci);
65
66 static DEFINE_SPINLOCK(swap_lock);
67 static unsigned int nr_swapfiles;
68 atomic_long_t nr_swap_pages;
69 /*
70  * Some modules use swappable objects and may try to swap them out under
71  * memory pressure (via the shrinker). Before doing so, they may wish to
72  * check to see if any swap space is available.
73  */
74 EXPORT_SYMBOL_GPL(nr_swap_pages);
75 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
76 long total_swap_pages;
77 static int least_priority = -1;
78 unsigned long swapfile_maximum_size;
79 #ifdef CONFIG_MIGRATION
80 bool swap_migration_ad_supported;
81 #endif  /* CONFIG_MIGRATION */
82
83 static const char Bad_file[] = "Bad swap file entry ";
84 static const char Unused_file[] = "Unused swap file entry ";
85 static const char Bad_offset[] = "Bad swap offset entry ";
86 static const char Unused_offset[] = "Unused swap offset entry ";
87
88 /*
89  * all active swap_info_structs
90  * protected with swap_lock, and ordered by priority.
91  */
92 static PLIST_HEAD(swap_active_head);
93
94 /*
95  * all available (active, not full) swap_info_structs
96  * protected with swap_avail_lock, ordered by priority.
97  * This is used by folio_alloc_swap() instead of swap_active_head
98  * because swap_active_head includes all swap_info_structs,
99  * but folio_alloc_swap() doesn't need to look at full ones.
100  * This uses its own lock instead of swap_lock because when a
101  * swap_info_struct changes between not-full/full, it needs to
102  * add/remove itself to/from this list, but the swap_info_struct->lock
103  * is held and the locking order requires swap_lock to be taken
104  * before any swap_info_struct->lock.
105  */
106 static struct plist_head *swap_avail_heads;
107 static DEFINE_SPINLOCK(swap_avail_lock);
108
109 static struct swap_info_struct *swap_info[MAX_SWAPFILES];
110
111 static DEFINE_MUTEX(swapon_mutex);
112
113 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
114 /* Activity counter to indicate that a swapon or swapoff has occurred */
115 static atomic_t proc_poll_event = ATOMIC_INIT(0);
116
117 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
118
119 static struct swap_info_struct *swap_type_to_swap_info(int type)
120 {
121         if (type >= MAX_SWAPFILES)
122                 return NULL;
123
124         return READ_ONCE(swap_info[type]); /* rcu_dereference() */
125 }
126
127 static inline unsigned char swap_count(unsigned char ent)
128 {
129         return ent & ~SWAP_HAS_CACHE;   /* may include COUNT_CONTINUED flag */
130 }
131
132 /* Reclaim the swap entry anyway if possible */
133 #define TTRS_ANYWAY             0x1
134 /*
135  * Reclaim the swap entry if there are no more mappings of the
136  * corresponding page
137  */
138 #define TTRS_UNMAPPED           0x2
139 /* Reclaim the swap entry if swap is getting full */
140 #define TTRS_FULL               0x4
141 /* Reclaim directly, bypass the slot cache and don't touch device lock */
142 #define TTRS_DIRECT             0x8
143
144 static bool swap_is_has_cache(struct swap_info_struct *si,
145                               unsigned long offset, int nr_pages)
146 {
147         unsigned char *map = si->swap_map + offset;
148         unsigned char *map_end = map + nr_pages;
149
150         do {
151                 VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
152                 if (*map != SWAP_HAS_CACHE)
153                         return false;
154         } while (++map < map_end);
155
156         return true;
157 }
158
159 static bool swap_is_last_map(struct swap_info_struct *si,
160                 unsigned long offset, int nr_pages, bool *has_cache)
161 {
162         unsigned char *map = si->swap_map + offset;
163         unsigned char *map_end = map + nr_pages;
164         unsigned char count = *map;
165
166         if (swap_count(count) != 1)
167                 return false;
168
169         while (++map < map_end) {
170                 if (*map != count)
171                         return false;
172         }
173
174         *has_cache = !!(count & SWAP_HAS_CACHE);
175         return true;
176 }
177
178 /*
179  * returns number of pages in the folio that backs the swap entry. If positive,
180  * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
181  * folio was associated with the swap entry.
182  */
183 static int __try_to_reclaim_swap(struct swap_info_struct *si,
184                                  unsigned long offset, unsigned long flags)
185 {
186         swp_entry_t entry = swp_entry(si->type, offset);
187         struct address_space *address_space = swap_address_space(entry);
188         struct swap_cluster_info *ci;
189         struct folio *folio;
190         int ret, nr_pages;
191         bool need_reclaim;
192
193         folio = filemap_get_folio(address_space, swap_cache_index(entry));
194         if (IS_ERR(folio))
195                 return 0;
196
197         /* offset could point to the middle of a large folio */
198         entry = folio->swap;
199         offset = swp_offset(entry);
200         nr_pages = folio_nr_pages(folio);
201         ret = -nr_pages;
202
203         /*
204          * When this function is called from scan_swap_map_slots() and it's
205          * called by vmscan.c at reclaiming folios. So we hold a folio lock
206          * here. We have to use trylock for avoiding deadlock. This is a special
207          * case and you should use folio_free_swap() with explicit folio_lock()
208          * in usual operations.
209          */
210         if (!folio_trylock(folio))
211                 goto out;
212
213         need_reclaim = ((flags & TTRS_ANYWAY) ||
214                         ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
215                         ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
216         if (!need_reclaim || !folio_swapcache_freeable(folio))
217                 goto out_unlock;
218
219         /*
220          * It's safe to delete the folio from swap cache only if the folio's
221          * swap_map is HAS_CACHE only, which means the slots have no page table
222          * reference or pending writeback, and can't be allocated to others.
223          */
224         ci = lock_cluster_or_swap_info(si, offset);
225         need_reclaim = swap_is_has_cache(si, offset, nr_pages);
226         unlock_cluster_or_swap_info(si, ci);
227         if (!need_reclaim)
228                 goto out_unlock;
229
230         if (!(flags & TTRS_DIRECT)) {
231                 /* Free through slot cache */
232                 delete_from_swap_cache(folio);
233                 folio_set_dirty(folio);
234                 ret = nr_pages;
235                 goto out_unlock;
236         }
237
238         xa_lock_irq(&address_space->i_pages);
239         __delete_from_swap_cache(folio, entry, NULL);
240         xa_unlock_irq(&address_space->i_pages);
241         folio_ref_sub(folio, nr_pages);
242         folio_set_dirty(folio);
243
244         spin_lock(&si->lock);
245         /* Only sinple page folio can be backed by zswap */
246         if (nr_pages == 1)
247                 zswap_invalidate(entry);
248         swap_entry_range_free(si, entry, nr_pages);
249         spin_unlock(&si->lock);
250         ret = nr_pages;
251 out_unlock:
252         folio_unlock(folio);
253 out:
254         folio_put(folio);
255         return ret;
256 }
257
258 static inline struct swap_extent *first_se(struct swap_info_struct *sis)
259 {
260         struct rb_node *rb = rb_first(&sis->swap_extent_root);
261         return rb_entry(rb, struct swap_extent, rb_node);
262 }
263
264 static inline struct swap_extent *next_se(struct swap_extent *se)
265 {
266         struct rb_node *rb = rb_next(&se->rb_node);
267         return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
268 }
269
270 /*
271  * swapon tell device that all the old swap contents can be discarded,
272  * to allow the swap device to optimize its wear-levelling.
273  */
274 static int discard_swap(struct swap_info_struct *si)
275 {
276         struct swap_extent *se;
277         sector_t start_block;
278         sector_t nr_blocks;
279         int err = 0;
280
281         /* Do not discard the swap header page! */
282         se = first_se(si);
283         start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
284         nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
285         if (nr_blocks) {
286                 err = blkdev_issue_discard(si->bdev, start_block,
287                                 nr_blocks, GFP_KERNEL);
288                 if (err)
289                         return err;
290                 cond_resched();
291         }
292
293         for (se = next_se(se); se; se = next_se(se)) {
294                 start_block = se->start_block << (PAGE_SHIFT - 9);
295                 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
296
297                 err = blkdev_issue_discard(si->bdev, start_block,
298                                 nr_blocks, GFP_KERNEL);
299                 if (err)
300                         break;
301
302                 cond_resched();
303         }
304         return err;             /* That will often be -EOPNOTSUPP */
305 }
306
307 static struct swap_extent *
308 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
309 {
310         struct swap_extent *se;
311         struct rb_node *rb;
312
313         rb = sis->swap_extent_root.rb_node;
314         while (rb) {
315                 se = rb_entry(rb, struct swap_extent, rb_node);
316                 if (offset < se->start_page)
317                         rb = rb->rb_left;
318                 else if (offset >= se->start_page + se->nr_pages)
319                         rb = rb->rb_right;
320                 else
321                         return se;
322         }
323         /* It *must* be present */
324         BUG();
325 }
326
327 sector_t swap_folio_sector(struct folio *folio)
328 {
329         struct swap_info_struct *sis = swp_swap_info(folio->swap);
330         struct swap_extent *se;
331         sector_t sector;
332         pgoff_t offset;
333
334         offset = swp_offset(folio->swap);
335         se = offset_to_swap_extent(sis, offset);
336         sector = se->start_block + (offset - se->start_page);
337         return sector << (PAGE_SHIFT - 9);
338 }
339
340 /*
341  * swap allocation tell device that a cluster of swap can now be discarded,
342  * to allow the swap device to optimize its wear-levelling.
343  */
344 static void discard_swap_cluster(struct swap_info_struct *si,
345                                  pgoff_t start_page, pgoff_t nr_pages)
346 {
347         struct swap_extent *se = offset_to_swap_extent(si, start_page);
348
349         while (nr_pages) {
350                 pgoff_t offset = start_page - se->start_page;
351                 sector_t start_block = se->start_block + offset;
352                 sector_t nr_blocks = se->nr_pages - offset;
353
354                 if (nr_blocks > nr_pages)
355                         nr_blocks = nr_pages;
356                 start_page += nr_blocks;
357                 nr_pages -= nr_blocks;
358
359                 start_block <<= PAGE_SHIFT - 9;
360                 nr_blocks <<= PAGE_SHIFT - 9;
361                 if (blkdev_issue_discard(si->bdev, start_block,
362                                         nr_blocks, GFP_NOIO))
363                         break;
364
365                 se = next_se(se);
366         }
367 }
368
369 #ifdef CONFIG_THP_SWAP
370 #define SWAPFILE_CLUSTER        HPAGE_PMD_NR
371
372 #define swap_entry_order(order) (order)
373 #else
374 #define SWAPFILE_CLUSTER        256
375
376 /*
377  * Define swap_entry_order() as constant to let compiler to optimize
378  * out some code if !CONFIG_THP_SWAP
379  */
380 #define swap_entry_order(order) 0
381 #endif
382 #define LATENCY_LIMIT           256
383
384 static inline bool cluster_is_free(struct swap_cluster_info *info)
385 {
386         return info->flags & CLUSTER_FLAG_FREE;
387 }
388
389 static inline unsigned int cluster_index(struct swap_info_struct *si,
390                                          struct swap_cluster_info *ci)
391 {
392         return ci - si->cluster_info;
393 }
394
395 static inline unsigned int cluster_offset(struct swap_info_struct *si,
396                                           struct swap_cluster_info *ci)
397 {
398         return cluster_index(si, ci) * SWAPFILE_CLUSTER;
399 }
400
401 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
402                                                      unsigned long offset)
403 {
404         struct swap_cluster_info *ci;
405
406         ci = si->cluster_info;
407         if (ci) {
408                 ci += offset / SWAPFILE_CLUSTER;
409                 spin_lock(&ci->lock);
410         }
411         return ci;
412 }
413
414 static inline void unlock_cluster(struct swap_cluster_info *ci)
415 {
416         if (ci)
417                 spin_unlock(&ci->lock);
418 }
419
420 /*
421  * Determine the locking method in use for this device.  Return
422  * swap_cluster_info if SSD-style cluster-based locking is in place.
423  */
424 static inline struct swap_cluster_info *lock_cluster_or_swap_info(
425                 struct swap_info_struct *si, unsigned long offset)
426 {
427         struct swap_cluster_info *ci;
428
429         /* Try to use fine-grained SSD-style locking if available: */
430         ci = lock_cluster(si, offset);
431         /* Otherwise, fall back to traditional, coarse locking: */
432         if (!ci)
433                 spin_lock(&si->lock);
434
435         return ci;
436 }
437
438 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
439                                                struct swap_cluster_info *ci)
440 {
441         if (ci)
442                 unlock_cluster(ci);
443         else
444                 spin_unlock(&si->lock);
445 }
446
447 /* Add a cluster to discard list and schedule it to do discard */
448 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
449                 struct swap_cluster_info *ci)
450 {
451         unsigned int idx = cluster_index(si, ci);
452         /*
453          * If scan_swap_map_slots() can't find a free cluster, it will check
454          * si->swap_map directly. To make sure the discarding cluster isn't
455          * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
456          * It will be cleared after discard
457          */
458         memset(si->swap_map + idx * SWAPFILE_CLUSTER,
459                         SWAP_MAP_BAD, SWAPFILE_CLUSTER);
460
461         VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
462         list_move_tail(&ci->list, &si->discard_clusters);
463         ci->flags = 0;
464         schedule_work(&si->discard_work);
465 }
466
467 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
468 {
469         lockdep_assert_held(&si->lock);
470         lockdep_assert_held(&ci->lock);
471
472         if (ci->flags)
473                 list_move_tail(&ci->list, &si->free_clusters);
474         else
475                 list_add_tail(&ci->list, &si->free_clusters);
476         ci->flags = CLUSTER_FLAG_FREE;
477         ci->order = 0;
478 }
479
480 /*
481  * Doing discard actually. After a cluster discard is finished, the cluster
482  * will be added to free cluster list. caller should hold si->lock.
483 */
484 static void swap_do_scheduled_discard(struct swap_info_struct *si)
485 {
486         struct swap_cluster_info *ci;
487         unsigned int idx;
488
489         while (!list_empty(&si->discard_clusters)) {
490                 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
491                 list_del(&ci->list);
492                 idx = cluster_index(si, ci);
493                 spin_unlock(&si->lock);
494
495                 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
496                                 SWAPFILE_CLUSTER);
497
498                 spin_lock(&si->lock);
499                 spin_lock(&ci->lock);
500                 __free_cluster(si, ci);
501                 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
502                                 0, SWAPFILE_CLUSTER);
503                 spin_unlock(&ci->lock);
504         }
505 }
506
507 static void swap_discard_work(struct work_struct *work)
508 {
509         struct swap_info_struct *si;
510
511         si = container_of(work, struct swap_info_struct, discard_work);
512
513         spin_lock(&si->lock);
514         swap_do_scheduled_discard(si);
515         spin_unlock(&si->lock);
516 }
517
518 static void swap_users_ref_free(struct percpu_ref *ref)
519 {
520         struct swap_info_struct *si;
521
522         si = container_of(ref, struct swap_info_struct, users);
523         complete(&si->comp);
524 }
525
526 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
527 {
528         VM_BUG_ON(ci->count != 0);
529         lockdep_assert_held(&si->lock);
530         lockdep_assert_held(&ci->lock);
531
532         if (ci->flags & CLUSTER_FLAG_FRAG)
533                 si->frag_cluster_nr[ci->order]--;
534
535         /*
536          * If the swap is discardable, prepare discard the cluster
537          * instead of free it immediately. The cluster will be freed
538          * after discard.
539          */
540         if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
541             (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
542                 swap_cluster_schedule_discard(si, ci);
543                 return;
544         }
545
546         __free_cluster(si, ci);
547 }
548
549 /*
550  * The cluster corresponding to page_nr will be used. The cluster will not be
551  * added to free cluster list and its usage counter will be increased by 1.
552  * Only used for initialization.
553  */
554 static void inc_cluster_info_page(struct swap_info_struct *si,
555         struct swap_cluster_info *cluster_info, unsigned long page_nr)
556 {
557         unsigned long idx = page_nr / SWAPFILE_CLUSTER;
558         struct swap_cluster_info *ci;
559
560         if (!cluster_info)
561                 return;
562
563         ci = cluster_info + idx;
564         ci->count++;
565
566         VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
567         VM_BUG_ON(ci->flags);
568 }
569
570 /*
571  * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0,
572  * which means no page in the cluster is in use, we can optionally discard
573  * the cluster and add it to free cluster list.
574  */
575 static void dec_cluster_info_page(struct swap_info_struct *si,
576                                   struct swap_cluster_info *ci, int nr_pages)
577 {
578         if (!si->cluster_info)
579                 return;
580
581         VM_BUG_ON(ci->count < nr_pages);
582         VM_BUG_ON(cluster_is_free(ci));
583         lockdep_assert_held(&si->lock);
584         lockdep_assert_held(&ci->lock);
585         ci->count -= nr_pages;
586
587         if (!ci->count) {
588                 free_cluster(si, ci);
589                 return;
590         }
591
592         if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
593                 VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
594                 if (ci->flags & CLUSTER_FLAG_FRAG)
595                         si->frag_cluster_nr[ci->order]--;
596                 list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]);
597                 ci->flags = CLUSTER_FLAG_NONFULL;
598         }
599 }
600
601 static bool cluster_reclaim_range(struct swap_info_struct *si,
602                                   struct swap_cluster_info *ci,
603                                   unsigned long start, unsigned long end)
604 {
605         unsigned char *map = si->swap_map;
606         unsigned long offset;
607
608         spin_unlock(&ci->lock);
609         spin_unlock(&si->lock);
610
611         for (offset = start; offset < end; offset++) {
612                 switch (READ_ONCE(map[offset])) {
613                 case 0:
614                         continue;
615                 case SWAP_HAS_CACHE:
616                         if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0)
617                                 continue;
618                         goto out;
619                 default:
620                         goto out;
621                 }
622         }
623 out:
624         spin_lock(&si->lock);
625         spin_lock(&ci->lock);
626
627         /*
628          * Recheck the range no matter reclaim succeeded or not, the slot
629          * could have been be freed while we are not holding the lock.
630          */
631         for (offset = start; offset < end; offset++)
632                 if (READ_ONCE(map[offset]))
633                         return false;
634
635         return true;
636 }
637
638 static bool cluster_scan_range(struct swap_info_struct *si,
639                                struct swap_cluster_info *ci,
640                                unsigned long start, unsigned int nr_pages)
641 {
642         unsigned long offset, end = start + nr_pages;
643         unsigned char *map = si->swap_map;
644         bool need_reclaim = false;
645
646         for (offset = start; offset < end; offset++) {
647                 switch (READ_ONCE(map[offset])) {
648                 case 0:
649                         continue;
650                 case SWAP_HAS_CACHE:
651                         if (!vm_swap_full())
652                                 return false;
653                         need_reclaim = true;
654                         continue;
655                 default:
656                         return false;
657                 }
658         }
659
660         if (need_reclaim)
661                 return cluster_reclaim_range(si, ci, start, end);
662
663         return true;
664 }
665
666 static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
667                                 unsigned int start, unsigned char usage,
668                                 unsigned int order)
669 {
670         unsigned int nr_pages = 1 << order;
671
672         if (cluster_is_free(ci)) {
673                 if (nr_pages < SWAPFILE_CLUSTER) {
674                         list_move_tail(&ci->list, &si->nonfull_clusters[order]);
675                         ci->flags = CLUSTER_FLAG_NONFULL;
676                 }
677                 ci->order = order;
678         }
679
680         memset(si->swap_map + start, usage, nr_pages);
681         swap_range_alloc(si, start, nr_pages);
682         ci->count += nr_pages;
683
684         if (ci->count == SWAPFILE_CLUSTER) {
685                 VM_BUG_ON(!(ci->flags &
686                           (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG)));
687                 if (ci->flags & CLUSTER_FLAG_FRAG)
688                         si->frag_cluster_nr[ci->order]--;
689                 list_move_tail(&ci->list, &si->full_clusters);
690                 ci->flags = CLUSTER_FLAG_FULL;
691         }
692 }
693
694 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset,
695                                             unsigned int *foundp, unsigned int order,
696                                             unsigned char usage)
697 {
698         unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1);
699         unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
700         unsigned int nr_pages = 1 << order;
701         struct swap_cluster_info *ci;
702
703         if (end < nr_pages)
704                 return SWAP_NEXT_INVALID;
705         end -= nr_pages;
706
707         ci = lock_cluster(si, offset);
708         if (ci->count + nr_pages > SWAPFILE_CLUSTER) {
709                 offset = SWAP_NEXT_INVALID;
710                 goto done;
711         }
712
713         while (offset <= end) {
714                 if (cluster_scan_range(si, ci, offset, nr_pages)) {
715                         cluster_alloc_range(si, ci, offset, usage, order);
716                         *foundp = offset;
717                         if (ci->count == SWAPFILE_CLUSTER) {
718                                 offset = SWAP_NEXT_INVALID;
719                                 goto done;
720                         }
721                         offset += nr_pages;
722                         break;
723                 }
724                 offset += nr_pages;
725         }
726         if (offset > end)
727                 offset = SWAP_NEXT_INVALID;
728 done:
729         unlock_cluster(ci);
730         return offset;
731 }
732
733 static void swap_reclaim_full_clusters(struct swap_info_struct *si)
734 {
735         long to_scan = 1;
736         unsigned long offset, end;
737         struct swap_cluster_info *ci;
738         unsigned char *map = si->swap_map;
739         int nr_reclaim, total_reclaimed = 0;
740
741         if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER)
742                 to_scan = si->inuse_pages / SWAPFILE_CLUSTER;
743
744         while (!list_empty(&si->full_clusters)) {
745                 ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list);
746                 list_move_tail(&ci->list, &si->full_clusters);
747                 offset = cluster_offset(si, ci);
748                 end = min(si->max, offset + SWAPFILE_CLUSTER);
749                 to_scan--;
750
751                 while (offset < end) {
752                         if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
753                                 spin_unlock(&si->lock);
754                                 nr_reclaim = __try_to_reclaim_swap(si, offset,
755                                                                    TTRS_ANYWAY | TTRS_DIRECT);
756                                 spin_lock(&si->lock);
757                                 if (nr_reclaim > 0) {
758                                         offset += nr_reclaim;
759                                         total_reclaimed += nr_reclaim;
760                                         continue;
761                                 } else if (nr_reclaim < 0) {
762                                         offset += -nr_reclaim;
763                                         continue;
764                                 }
765                         }
766                         offset++;
767                 }
768                 if (to_scan <= 0 || total_reclaimed)
769                         break;
770         }
771 }
772
773 /*
774  * Try to get swap entries with specified order from current cpu's swap entry
775  * pool (a cluster). This might involve allocating a new cluster for current CPU
776  * too.
777  */
778 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
779                                               unsigned char usage)
780 {
781         struct percpu_cluster *cluster;
782         struct swap_cluster_info *ci;
783         unsigned int offset, found = 0;
784
785 new_cluster:
786         lockdep_assert_held(&si->lock);
787         cluster = this_cpu_ptr(si->percpu_cluster);
788         offset = cluster->next[order];
789         if (offset) {
790                 offset = alloc_swap_scan_cluster(si, offset, &found, order, usage);
791                 if (found)
792                         goto done;
793         }
794
795         if (!list_empty(&si->free_clusters)) {
796                 ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
797                 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage);
798                 VM_BUG_ON(!found);
799                 goto done;
800         }
801
802         if (order < PMD_ORDER) {
803                 unsigned int frags = 0;
804
805                 while (!list_empty(&si->nonfull_clusters[order])) {
806                         ci = list_first_entry(&si->nonfull_clusters[order],
807                                               struct swap_cluster_info, list);
808                         list_move_tail(&ci->list, &si->frag_clusters[order]);
809                         ci->flags = CLUSTER_FLAG_FRAG;
810                         si->frag_cluster_nr[order]++;
811                         offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
812                                                          &found, order, usage);
813                         frags++;
814                         if (found)
815                                 break;
816                 }
817
818                 if (!found) {
819                         /*
820                          * Nonfull clusters are moved to frag tail if we reached
821                          * here, count them too, don't over scan the frag list.
822                          */
823                         while (frags < si->frag_cluster_nr[order]) {
824                                 ci = list_first_entry(&si->frag_clusters[order],
825                                                       struct swap_cluster_info, list);
826                                 /*
827                                  * Rotate the frag list to iterate, they were all failing
828                                  * high order allocation or moved here due to per-CPU usage,
829                                  * this help keeping usable cluster ahead.
830                                  */
831                                 list_move_tail(&ci->list, &si->frag_clusters[order]);
832                                 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
833                                                                  &found, order, usage);
834                                 frags++;
835                                 if (found)
836                                         break;
837                         }
838                 }
839         }
840
841         if (found)
842                 goto done;
843
844         if (!list_empty(&si->discard_clusters)) {
845                 /*
846                  * we don't have free cluster but have some clusters in
847                  * discarding, do discard now and reclaim them, then
848                  * reread cluster_next_cpu since we dropped si->lock
849                  */
850                 swap_do_scheduled_discard(si);
851                 goto new_cluster;
852         }
853
854         if (order)
855                 goto done;
856
857         /* Order 0 stealing from higher order */
858         for (int o = 1; o < SWAP_NR_ORDERS; o++) {
859                 /*
860                  * Clusters here have at least one usable slots and can't fail order 0
861                  * allocation, but reclaim may drop si->lock and race with another user.
862                  */
863                 while (!list_empty(&si->frag_clusters[o])) {
864                         ci = list_first_entry(&si->frag_clusters[o],
865                                               struct swap_cluster_info, list);
866                         offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
867                                                          &found, 0, usage);
868                         if (found)
869                                 goto done;
870                 }
871
872                 while (!list_empty(&si->nonfull_clusters[o])) {
873                         ci = list_first_entry(&si->nonfull_clusters[o],
874                                               struct swap_cluster_info, list);
875                         offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
876                                                          &found, 0, usage);
877                         if (found)
878                                 goto done;
879                 }
880         }
881
882 done:
883         /* Try reclaim from full clusters if device is nearfull */
884         if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) {
885                 swap_reclaim_full_clusters(si);
886                 if (!found && !order && si->pages != si->inuse_pages)
887                         goto new_cluster;
888         }
889
890         cluster->next[order] = offset;
891         return found;
892 }
893
894 static void __del_from_avail_list(struct swap_info_struct *si)
895 {
896         int nid;
897
898         assert_spin_locked(&si->lock);
899         for_each_node(nid)
900                 plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
901 }
902
903 static void del_from_avail_list(struct swap_info_struct *si)
904 {
905         spin_lock(&swap_avail_lock);
906         __del_from_avail_list(si);
907         spin_unlock(&swap_avail_lock);
908 }
909
910 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
911                              unsigned int nr_entries)
912 {
913         unsigned int end = offset + nr_entries - 1;
914
915         if (offset == si->lowest_bit)
916                 si->lowest_bit += nr_entries;
917         if (end == si->highest_bit)
918                 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
919         WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
920         if (si->inuse_pages == si->pages) {
921                 si->lowest_bit = si->max;
922                 si->highest_bit = 0;
923                 del_from_avail_list(si);
924         }
925 }
926
927 static void add_to_avail_list(struct swap_info_struct *si)
928 {
929         int nid;
930
931         spin_lock(&swap_avail_lock);
932         for_each_node(nid)
933                 plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
934         spin_unlock(&swap_avail_lock);
935 }
936
937 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
938                             unsigned int nr_entries)
939 {
940         unsigned long begin = offset;
941         unsigned long end = offset + nr_entries - 1;
942         void (*swap_slot_free_notify)(struct block_device *, unsigned long);
943         unsigned int i;
944
945         /*
946          * Use atomic clear_bit operations only on zeromap instead of non-atomic
947          * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
948          */
949         for (i = 0; i < nr_entries; i++)
950                 clear_bit(offset + i, si->zeromap);
951
952         if (offset < si->lowest_bit)
953                 si->lowest_bit = offset;
954         if (end > si->highest_bit) {
955                 bool was_full = !si->highest_bit;
956
957                 WRITE_ONCE(si->highest_bit, end);
958                 if (was_full && (si->flags & SWP_WRITEOK))
959                         add_to_avail_list(si);
960         }
961         if (si->flags & SWP_BLKDEV)
962                 swap_slot_free_notify =
963                         si->bdev->bd_disk->fops->swap_slot_free_notify;
964         else
965                 swap_slot_free_notify = NULL;
966         while (offset <= end) {
967                 arch_swap_invalidate_page(si->type, offset);
968                 if (swap_slot_free_notify)
969                         swap_slot_free_notify(si->bdev, offset);
970                 offset++;
971         }
972         clear_shadow_from_swap_cache(si->type, begin, end);
973
974         /*
975          * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
976          * only after the above cleanups are done.
977          */
978         smp_wmb();
979         atomic_long_add(nr_entries, &nr_swap_pages);
980         WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
981 }
982
983 static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
984 {
985         unsigned long prev;
986
987         if (!(si->flags & SWP_SOLIDSTATE)) {
988                 si->cluster_next = next;
989                 return;
990         }
991
992         prev = this_cpu_read(*si->cluster_next_cpu);
993         /*
994          * Cross the swap address space size aligned trunk, choose
995          * another trunk randomly to avoid lock contention on swap
996          * address space if possible.
997          */
998         if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
999             (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
1000                 /* No free swap slots available */
1001                 if (si->highest_bit <= si->lowest_bit)
1002                         return;
1003                 next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
1004                 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
1005                 next = max_t(unsigned int, next, si->lowest_bit);
1006         }
1007         this_cpu_write(*si->cluster_next_cpu, next);
1008 }
1009
1010 static bool swap_offset_available_and_locked(struct swap_info_struct *si,
1011                                              unsigned long offset)
1012 {
1013         if (data_race(!si->swap_map[offset])) {
1014                 spin_lock(&si->lock);
1015                 return true;
1016         }
1017
1018         if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1019                 spin_lock(&si->lock);
1020                 return true;
1021         }
1022
1023         return false;
1024 }
1025
1026 static int cluster_alloc_swap(struct swap_info_struct *si,
1027                              unsigned char usage, int nr,
1028                              swp_entry_t slots[], int order)
1029 {
1030         int n_ret = 0;
1031
1032         VM_BUG_ON(!si->cluster_info);
1033
1034         while (n_ret < nr) {
1035                 unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
1036
1037                 if (!offset)
1038                         break;
1039                 slots[n_ret++] = swp_entry(si->type, offset);
1040         }
1041
1042         return n_ret;
1043 }
1044
1045 static int scan_swap_map_slots(struct swap_info_struct *si,
1046                                unsigned char usage, int nr,
1047                                swp_entry_t slots[], int order)
1048 {
1049         unsigned long offset;
1050         unsigned long scan_base;
1051         unsigned long last_in_cluster = 0;
1052         int latency_ration = LATENCY_LIMIT;
1053         unsigned int nr_pages = 1 << order;
1054         int n_ret = 0;
1055         bool scanned_many = false;
1056
1057         /*
1058          * We try to cluster swap pages by allocating them sequentially
1059          * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
1060          * way, however, we resort to first-free allocation, starting
1061          * a new cluster.  This prevents us from scattering swap pages
1062          * all over the entire swap partition, so that we reduce
1063          * overall disk seek times between swap pages.  -- sct
1064          * But we do now try to find an empty cluster.  -Andrea
1065          * And we let swap pages go all over an SSD partition.  Hugh
1066          */
1067
1068         if (order > 0) {
1069                 /*
1070                  * Should not even be attempting large allocations when huge
1071                  * page swap is disabled.  Warn and fail the allocation.
1072                  */
1073                 if (!IS_ENABLED(CONFIG_THP_SWAP) ||
1074                     nr_pages > SWAPFILE_CLUSTER) {
1075                         VM_WARN_ON_ONCE(1);
1076                         return 0;
1077                 }
1078
1079                 /*
1080                  * Swapfile is not block device or not using clusters so unable
1081                  * to allocate large entries.
1082                  */
1083                 if (!(si->flags & SWP_BLKDEV) || !si->cluster_info)
1084                         return 0;
1085         }
1086
1087         if (si->cluster_info)
1088                 return cluster_alloc_swap(si, usage, nr, slots, order);
1089
1090         si->flags += SWP_SCANNING;
1091
1092         /* For HDD, sequential access is more important. */
1093         scan_base = si->cluster_next;
1094         offset = scan_base;
1095
1096         if (unlikely(!si->cluster_nr--)) {
1097                 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
1098                         si->cluster_nr = SWAPFILE_CLUSTER - 1;
1099                         goto checks;
1100                 }
1101
1102                 spin_unlock(&si->lock);
1103
1104                 /*
1105                  * If seek is expensive, start searching for new cluster from
1106                  * start of partition, to minimize the span of allocated swap.
1107                  */
1108                 scan_base = offset = si->lowest_bit;
1109                 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
1110
1111                 /* Locate the first empty (unaligned) cluster */
1112                 for (; last_in_cluster <= READ_ONCE(si->highest_bit); offset++) {
1113                         if (si->swap_map[offset])
1114                                 last_in_cluster = offset + SWAPFILE_CLUSTER;
1115                         else if (offset == last_in_cluster) {
1116                                 spin_lock(&si->lock);
1117                                 offset -= SWAPFILE_CLUSTER - 1;
1118                                 si->cluster_next = offset;
1119                                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
1120                                 goto checks;
1121                         }
1122                         if (unlikely(--latency_ration < 0)) {
1123                                 cond_resched();
1124                                 latency_ration = LATENCY_LIMIT;
1125                         }
1126                 }
1127
1128                 offset = scan_base;
1129                 spin_lock(&si->lock);
1130                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
1131         }
1132
1133 checks:
1134         if (!(si->flags & SWP_WRITEOK))
1135                 goto no_page;
1136         if (!si->highest_bit)
1137                 goto no_page;
1138         if (offset > si->highest_bit)
1139                 scan_base = offset = si->lowest_bit;
1140
1141         /* reuse swap entry of cache-only swap if not busy. */
1142         if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
1143                 int swap_was_freed;
1144                 spin_unlock(&si->lock);
1145                 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
1146                 spin_lock(&si->lock);
1147                 /* entry was freed successfully, try to use this again */
1148                 if (swap_was_freed > 0)
1149                         goto checks;
1150                 goto scan; /* check next one */
1151         }
1152
1153         if (si->swap_map[offset]) {
1154                 if (!n_ret)
1155                         goto scan;
1156                 else
1157                         goto done;
1158         }
1159         memset(si->swap_map + offset, usage, nr_pages);
1160
1161         swap_range_alloc(si, offset, nr_pages);
1162         slots[n_ret++] = swp_entry(si->type, offset);
1163
1164         /* got enough slots or reach max slots? */
1165         if ((n_ret == nr) || (offset >= si->highest_bit))
1166                 goto done;
1167
1168         /* search for next available slot */
1169
1170         /* time to take a break? */
1171         if (unlikely(--latency_ration < 0)) {
1172                 if (n_ret)
1173                         goto done;
1174                 spin_unlock(&si->lock);
1175                 cond_resched();
1176                 spin_lock(&si->lock);
1177                 latency_ration = LATENCY_LIMIT;
1178         }
1179
1180         if (si->cluster_nr && !si->swap_map[++offset]) {
1181                 /* non-ssd case, still more slots in cluster? */
1182                 --si->cluster_nr;
1183                 goto checks;
1184         }
1185
1186         /*
1187          * Even if there's no free clusters available (fragmented),
1188          * try to scan a little more quickly with lock held unless we
1189          * have scanned too many slots already.
1190          */
1191         if (!scanned_many) {
1192                 unsigned long scan_limit;
1193
1194                 if (offset < scan_base)
1195                         scan_limit = scan_base;
1196                 else
1197                         scan_limit = si->highest_bit;
1198                 for (; offset <= scan_limit && --latency_ration > 0;
1199                      offset++) {
1200                         if (!si->swap_map[offset])
1201                                 goto checks;
1202                 }
1203         }
1204
1205 done:
1206         if (order == 0)
1207                 set_cluster_next(si, offset + 1);
1208         si->flags -= SWP_SCANNING;
1209         return n_ret;
1210
1211 scan:
1212         VM_WARN_ON(order > 0);
1213         spin_unlock(&si->lock);
1214         while (++offset <= READ_ONCE(si->highest_bit)) {
1215                 if (unlikely(--latency_ration < 0)) {
1216                         cond_resched();
1217                         latency_ration = LATENCY_LIMIT;
1218                         scanned_many = true;
1219                 }
1220                 if (swap_offset_available_and_locked(si, offset))
1221                         goto checks;
1222         }
1223         offset = si->lowest_bit;
1224         while (offset < scan_base) {
1225                 if (unlikely(--latency_ration < 0)) {
1226                         cond_resched();
1227                         latency_ration = LATENCY_LIMIT;
1228                         scanned_many = true;
1229                 }
1230                 if (swap_offset_available_and_locked(si, offset))
1231                         goto checks;
1232                 offset++;
1233         }
1234         spin_lock(&si->lock);
1235
1236 no_page:
1237         si->flags -= SWP_SCANNING;
1238         return n_ret;
1239 }
1240
1241 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
1242 {
1243         int order = swap_entry_order(entry_order);
1244         unsigned long size = 1 << order;
1245         struct swap_info_struct *si, *next;
1246         long avail_pgs;
1247         int n_ret = 0;
1248         int node;
1249
1250         spin_lock(&swap_avail_lock);
1251
1252         avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1253         if (avail_pgs <= 0) {
1254                 spin_unlock(&swap_avail_lock);
1255                 goto noswap;
1256         }
1257
1258         n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1259
1260         atomic_long_sub(n_goal * size, &nr_swap_pages);
1261
1262 start_over:
1263         node = numa_node_id();
1264         plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1265                 /* requeue si to after same-priority siblings */
1266                 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1267                 spin_unlock(&swap_avail_lock);
1268                 spin_lock(&si->lock);
1269                 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1270                         spin_lock(&swap_avail_lock);
1271                         if (plist_node_empty(&si->avail_lists[node])) {
1272                                 spin_unlock(&si->lock);
1273                                 goto nextsi;
1274                         }
1275                         WARN(!si->highest_bit,
1276                              "swap_info %d in list but !highest_bit\n",
1277                              si->type);
1278                         WARN(!(si->flags & SWP_WRITEOK),
1279                              "swap_info %d in list but !SWP_WRITEOK\n",
1280                              si->type);
1281                         __del_from_avail_list(si);
1282                         spin_unlock(&si->lock);
1283                         goto nextsi;
1284                 }
1285                 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1286                                             n_goal, swp_entries, order);
1287                 spin_unlock(&si->lock);
1288                 if (n_ret || size > 1)
1289                         goto check_out;
1290                 cond_resched();
1291
1292                 spin_lock(&swap_avail_lock);
1293 nextsi:
1294                 /*
1295                  * if we got here, it's likely that si was almost full before,
1296                  * and since scan_swap_map_slots() can drop the si->lock,
1297                  * multiple callers probably all tried to get a page from the
1298                  * same si and it filled up before we could get one; or, the si
1299                  * filled up between us dropping swap_avail_lock and taking
1300                  * si->lock. Since we dropped the swap_avail_lock, the
1301                  * swap_avail_head list may have been modified; so if next is
1302                  * still in the swap_avail_head list then try it, otherwise
1303                  * start over if we have not gotten any slots.
1304                  */
1305                 if (plist_node_empty(&next->avail_lists[node]))
1306                         goto start_over;
1307         }
1308
1309         spin_unlock(&swap_avail_lock);
1310
1311 check_out:
1312         if (n_ret < n_goal)
1313                 atomic_long_add((long)(n_goal - n_ret) * size,
1314                                 &nr_swap_pages);
1315 noswap:
1316         return n_ret;
1317 }
1318
1319 static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1320 {
1321         struct swap_info_struct *si;
1322         unsigned long offset;
1323
1324         if (!entry.val)
1325                 goto out;
1326         si = swp_swap_info(entry);
1327         if (!si)
1328                 goto bad_nofile;
1329         if (data_race(!(si->flags & SWP_USED)))
1330                 goto bad_device;
1331         offset = swp_offset(entry);
1332         if (offset >= si->max)
1333                 goto bad_offset;
1334         if (data_race(!si->swap_map[swp_offset(entry)]))
1335                 goto bad_free;
1336         return si;
1337
1338 bad_free:
1339         pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1340         goto out;
1341 bad_offset:
1342         pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1343         goto out;
1344 bad_device:
1345         pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1346         goto out;
1347 bad_nofile:
1348         pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1349 out:
1350         return NULL;
1351 }
1352
1353 static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1354                                         struct swap_info_struct *q)
1355 {
1356         struct swap_info_struct *p;
1357
1358         p = _swap_info_get(entry);
1359
1360         if (p != q) {
1361                 if (q != NULL)
1362                         spin_unlock(&q->lock);
1363                 if (p != NULL)
1364                         spin_lock(&p->lock);
1365         }
1366         return p;
1367 }
1368
1369 static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
1370                                               unsigned long offset,
1371                                               unsigned char usage)
1372 {
1373         unsigned char count;
1374         unsigned char has_cache;
1375
1376         count = si->swap_map[offset];
1377
1378         has_cache = count & SWAP_HAS_CACHE;
1379         count &= ~SWAP_HAS_CACHE;
1380
1381         if (usage == SWAP_HAS_CACHE) {
1382                 VM_BUG_ON(!has_cache);
1383                 has_cache = 0;
1384         } else if (count == SWAP_MAP_SHMEM) {
1385                 /*
1386                  * Or we could insist on shmem.c using a special
1387                  * swap_shmem_free() and free_shmem_swap_and_cache()...
1388                  */
1389                 count = 0;
1390         } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1391                 if (count == COUNT_CONTINUED) {
1392                         if (swap_count_continued(si, offset, count))
1393                                 count = SWAP_MAP_MAX | COUNT_CONTINUED;
1394                         else
1395                                 count = SWAP_MAP_MAX;
1396                 } else
1397                         count--;
1398         }
1399
1400         usage = count | has_cache;
1401         if (usage)
1402                 WRITE_ONCE(si->swap_map[offset], usage);
1403         else
1404                 WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
1405
1406         return usage;
1407 }
1408
1409 /*
1410  * When we get a swap entry, if there aren't some other ways to
1411  * prevent swapoff, such as the folio in swap cache is locked, RCU
1412  * reader side is locked, etc., the swap entry may become invalid
1413  * because of swapoff.  Then, we need to enclose all swap related
1414  * functions with get_swap_device() and put_swap_device(), unless the
1415  * swap functions call get/put_swap_device() by themselves.
1416  *
1417  * RCU reader side lock (including any spinlock) is sufficient to
1418  * prevent swapoff, because synchronize_rcu() is called in swapoff()
1419  * before freeing data structures.
1420  *
1421  * Check whether swap entry is valid in the swap device.  If so,
1422  * return pointer to swap_info_struct, and keep the swap entry valid
1423  * via preventing the swap device from being swapoff, until
1424  * put_swap_device() is called.  Otherwise return NULL.
1425  *
1426  * Notice that swapoff or swapoff+swapon can still happen before the
1427  * percpu_ref_tryget_live() in get_swap_device() or after the
1428  * percpu_ref_put() in put_swap_device() if there isn't any other way
1429  * to prevent swapoff.  The caller must be prepared for that.  For
1430  * example, the following situation is possible.
1431  *
1432  *   CPU1                               CPU2
1433  *   do_swap_page()
1434  *     ...                              swapoff+swapon
1435  *     __read_swap_cache_async()
1436  *       swapcache_prepare()
1437  *         __swap_duplicate()
1438  *           // check swap_map
1439  *     // verify PTE not changed
1440  *
1441  * In __swap_duplicate(), the swap_map need to be checked before
1442  * changing partly because the specified swap entry may be for another
1443  * swap device which has been swapoff.  And in do_swap_page(), after
1444  * the page is read from the swap device, the PTE is verified not
1445  * changed with the page table locked to check whether the swap device
1446  * has been swapoff or swapoff+swapon.
1447  */
1448 struct swap_info_struct *get_swap_device(swp_entry_t entry)
1449 {
1450         struct swap_info_struct *si;
1451         unsigned long offset;
1452
1453         if (!entry.val)
1454                 goto out;
1455         si = swp_swap_info(entry);
1456         if (!si)
1457                 goto bad_nofile;
1458         if (!percpu_ref_tryget_live(&si->users))
1459                 goto out;
1460         /*
1461          * Guarantee the si->users are checked before accessing other
1462          * fields of swap_info_struct.
1463          *
1464          * Paired with the spin_unlock() after setup_swap_info() in
1465          * enable_swap_info().
1466          */
1467         smp_rmb();
1468         offset = swp_offset(entry);
1469         if (offset >= si->max)
1470                 goto put_out;
1471
1472         return si;
1473 bad_nofile:
1474         pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1475 out:
1476         return NULL;
1477 put_out:
1478         pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1479         percpu_ref_put(&si->users);
1480         return NULL;
1481 }
1482
1483 static unsigned char __swap_entry_free(struct swap_info_struct *si,
1484                                        swp_entry_t entry)
1485 {
1486         struct swap_cluster_info *ci;
1487         unsigned long offset = swp_offset(entry);
1488         unsigned char usage;
1489
1490         ci = lock_cluster_or_swap_info(si, offset);
1491         usage = __swap_entry_free_locked(si, offset, 1);
1492         unlock_cluster_or_swap_info(si, ci);
1493         if (!usage)
1494                 free_swap_slot(entry);
1495
1496         return usage;
1497 }
1498
1499 static bool __swap_entries_free(struct swap_info_struct *si,
1500                 swp_entry_t entry, int nr)
1501 {
1502         unsigned long offset = swp_offset(entry);
1503         unsigned int type = swp_type(entry);
1504         struct swap_cluster_info *ci;
1505         bool has_cache = false;
1506         unsigned char count;
1507         int i;
1508
1509         if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
1510                 goto fallback;
1511         /* cross into another cluster */
1512         if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
1513                 goto fallback;
1514
1515         ci = lock_cluster_or_swap_info(si, offset);
1516         if (!swap_is_last_map(si, offset, nr, &has_cache)) {
1517                 unlock_cluster_or_swap_info(si, ci);
1518                 goto fallback;
1519         }
1520         for (i = 0; i < nr; i++)
1521                 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
1522         unlock_cluster_or_swap_info(si, ci);
1523
1524         if (!has_cache) {
1525                 for (i = 0; i < nr; i++)
1526                         zswap_invalidate(swp_entry(si->type, offset + i));
1527                 spin_lock(&si->lock);
1528                 swap_entry_range_free(si, entry, nr);
1529                 spin_unlock(&si->lock);
1530         }
1531         return has_cache;
1532
1533 fallback:
1534         for (i = 0; i < nr; i++) {
1535                 if (data_race(si->swap_map[offset + i])) {
1536                         count = __swap_entry_free(si, swp_entry(type, offset + i));
1537                         if (count == SWAP_HAS_CACHE)
1538                                 has_cache = true;
1539                 } else {
1540                         WARN_ON_ONCE(1);
1541                 }
1542         }
1543         return has_cache;
1544 }
1545
1546 /*
1547  * Drop the last HAS_CACHE flag of swap entries, caller have to
1548  * ensure all entries belong to the same cgroup.
1549  */
1550 static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
1551                                   unsigned int nr_pages)
1552 {
1553         unsigned long offset = swp_offset(entry);
1554         unsigned char *map = si->swap_map + offset;
1555         unsigned char *map_end = map + nr_pages;
1556         struct swap_cluster_info *ci;
1557
1558         ci = lock_cluster(si, offset);
1559         do {
1560                 VM_BUG_ON(*map != SWAP_HAS_CACHE);
1561                 *map = 0;
1562         } while (++map < map_end);
1563         dec_cluster_info_page(si, ci, nr_pages);
1564         unlock_cluster(ci);
1565
1566         mem_cgroup_uncharge_swap(entry, nr_pages);
1567         swap_range_free(si, offset, nr_pages);
1568 }
1569
1570 static void cluster_swap_free_nr(struct swap_info_struct *si,
1571                 unsigned long offset, int nr_pages,
1572                 unsigned char usage)
1573 {
1574         struct swap_cluster_info *ci;
1575         DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 };
1576         int i, nr;
1577
1578         ci = lock_cluster_or_swap_info(si, offset);
1579         while (nr_pages) {
1580                 nr = min(BITS_PER_LONG, nr_pages);
1581                 for (i = 0; i < nr; i++) {
1582                         if (!__swap_entry_free_locked(si, offset + i, usage))
1583                                 bitmap_set(to_free, i, 1);
1584                 }
1585                 if (!bitmap_empty(to_free, BITS_PER_LONG)) {
1586                         unlock_cluster_or_swap_info(si, ci);
1587                         for_each_set_bit(i, to_free, BITS_PER_LONG)
1588                                 free_swap_slot(swp_entry(si->type, offset + i));
1589                         if (nr == nr_pages)
1590                                 return;
1591                         bitmap_clear(to_free, 0, BITS_PER_LONG);
1592                         ci = lock_cluster_or_swap_info(si, offset);
1593                 }
1594                 offset += nr;
1595                 nr_pages -= nr;
1596         }
1597         unlock_cluster_or_swap_info(si, ci);
1598 }
1599
1600 /*
1601  * Caller has made sure that the swap device corresponding to entry
1602  * is still around or has not been recycled.
1603  */
1604 void swap_free_nr(swp_entry_t entry, int nr_pages)
1605 {
1606         int nr;
1607         struct swap_info_struct *sis;
1608         unsigned long offset = swp_offset(entry);
1609
1610         sis = _swap_info_get(entry);
1611         if (!sis)
1612                 return;
1613
1614         while (nr_pages) {
1615                 nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
1616                 cluster_swap_free_nr(sis, offset, nr, 1);
1617                 offset += nr;
1618                 nr_pages -= nr;
1619         }
1620 }
1621
1622 /*
1623  * Called after dropping swapcache to decrease refcnt to swap entries.
1624  */
1625 void put_swap_folio(struct folio *folio, swp_entry_t entry)
1626 {
1627         unsigned long offset = swp_offset(entry);
1628         struct swap_cluster_info *ci;
1629         struct swap_info_struct *si;
1630         int size = 1 << swap_entry_order(folio_order(folio));
1631
1632         si = _swap_info_get(entry);
1633         if (!si)
1634                 return;
1635
1636         ci = lock_cluster_or_swap_info(si, offset);
1637         if (size > 1 && swap_is_has_cache(si, offset, size)) {
1638                 unlock_cluster_or_swap_info(si, ci);
1639                 spin_lock(&si->lock);
1640                 swap_entry_range_free(si, entry, size);
1641                 spin_unlock(&si->lock);
1642                 return;
1643         }
1644         for (int i = 0; i < size; i++, entry.val++) {
1645                 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1646                         unlock_cluster_or_swap_info(si, ci);
1647                         free_swap_slot(entry);
1648                         if (i == size - 1)
1649                                 return;
1650                         lock_cluster_or_swap_info(si, offset);
1651                 }
1652         }
1653         unlock_cluster_or_swap_info(si, ci);
1654 }
1655
1656 static int swp_entry_cmp(const void *ent1, const void *ent2)
1657 {
1658         const swp_entry_t *e1 = ent1, *e2 = ent2;
1659
1660         return (int)swp_type(*e1) - (int)swp_type(*e2);
1661 }
1662
1663 void swapcache_free_entries(swp_entry_t *entries, int n)
1664 {
1665         struct swap_info_struct *p, *prev;
1666         int i;
1667
1668         if (n <= 0)
1669                 return;
1670
1671         prev = NULL;
1672         p = NULL;
1673
1674         /*
1675          * Sort swap entries by swap device, so each lock is only taken once.
1676          * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1677          * so low that it isn't necessary to optimize further.
1678          */
1679         if (nr_swapfiles > 1)
1680                 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1681         for (i = 0; i < n; ++i) {
1682                 p = swap_info_get_cont(entries[i], prev);
1683                 if (p)
1684                         swap_entry_range_free(p, entries[i], 1);
1685                 prev = p;
1686         }
1687         if (p)
1688                 spin_unlock(&p->lock);
1689 }
1690
1691 int __swap_count(swp_entry_t entry)
1692 {
1693         struct swap_info_struct *si = swp_swap_info(entry);
1694         pgoff_t offset = swp_offset(entry);
1695
1696         return swap_count(si->swap_map[offset]);
1697 }
1698
1699 /*
1700  * How many references to @entry are currently swapped out?
1701  * This does not give an exact answer when swap count is continued,
1702  * but does include the high COUNT_CONTINUED flag to allow for that.
1703  */
1704 int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1705 {
1706         pgoff_t offset = swp_offset(entry);
1707         struct swap_cluster_info *ci;
1708         int count;
1709
1710         ci = lock_cluster_or_swap_info(si, offset);
1711         count = swap_count(si->swap_map[offset]);
1712         unlock_cluster_or_swap_info(si, ci);
1713         return count;
1714 }
1715
1716 /*
1717  * How many references to @entry are currently swapped out?
1718  * This considers COUNT_CONTINUED so it returns exact answer.
1719  */
1720 int swp_swapcount(swp_entry_t entry)
1721 {
1722         int count, tmp_count, n;
1723         struct swap_info_struct *si;
1724         struct swap_cluster_info *ci;
1725         struct page *page;
1726         pgoff_t offset;
1727         unsigned char *map;
1728
1729         si = _swap_info_get(entry);
1730         if (!si)
1731                 return 0;
1732
1733         offset = swp_offset(entry);
1734
1735         ci = lock_cluster_or_swap_info(si, offset);
1736
1737         count = swap_count(si->swap_map[offset]);
1738         if (!(count & COUNT_CONTINUED))
1739                 goto out;
1740
1741         count &= ~COUNT_CONTINUED;
1742         n = SWAP_MAP_MAX + 1;
1743
1744         page = vmalloc_to_page(si->swap_map + offset);
1745         offset &= ~PAGE_MASK;
1746         VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1747
1748         do {
1749                 page = list_next_entry(page, lru);
1750                 map = kmap_local_page(page);
1751                 tmp_count = map[offset];
1752                 kunmap_local(map);
1753
1754                 count += (tmp_count & ~COUNT_CONTINUED) * n;
1755                 n *= (SWAP_CONT_MAX + 1);
1756         } while (tmp_count & COUNT_CONTINUED);
1757 out:
1758         unlock_cluster_or_swap_info(si, ci);
1759         return count;
1760 }
1761
1762 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1763                                          swp_entry_t entry, int order)
1764 {
1765         struct swap_cluster_info *ci;
1766         unsigned char *map = si->swap_map;
1767         unsigned int nr_pages = 1 << order;
1768         unsigned long roffset = swp_offset(entry);
1769         unsigned long offset = round_down(roffset, nr_pages);
1770         int i;
1771         bool ret = false;
1772
1773         ci = lock_cluster_or_swap_info(si, offset);
1774         if (!ci || nr_pages == 1) {
1775                 if (swap_count(map[roffset]))
1776                         ret = true;
1777                 goto unlock_out;
1778         }
1779         for (i = 0; i < nr_pages; i++) {
1780                 if (swap_count(map[offset + i])) {
1781                         ret = true;
1782                         break;
1783                 }
1784         }
1785 unlock_out:
1786         unlock_cluster_or_swap_info(si, ci);
1787         return ret;
1788 }
1789
1790 static bool folio_swapped(struct folio *folio)
1791 {
1792         swp_entry_t entry = folio->swap;
1793         struct swap_info_struct *si = _swap_info_get(entry);
1794
1795         if (!si)
1796                 return false;
1797
1798         if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1799                 return swap_swapcount(si, entry) != 0;
1800
1801         return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
1802 }
1803
1804 static bool folio_swapcache_freeable(struct folio *folio)
1805 {
1806         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1807
1808         if (!folio_test_swapcache(folio))
1809                 return false;
1810         if (folio_test_writeback(folio))
1811                 return false;
1812
1813         /*
1814          * Once hibernation has begun to create its image of memory,
1815          * there's a danger that one of the calls to folio_free_swap()
1816          * - most probably a call from __try_to_reclaim_swap() while
1817          * hibernation is allocating its own swap pages for the image,
1818          * but conceivably even a call from memory reclaim - will free
1819          * the swap from a folio which has already been recorded in the
1820          * image as a clean swapcache folio, and then reuse its swap for
1821          * another page of the image.  On waking from hibernation, the
1822          * original folio might be freed under memory pressure, then
1823          * later read back in from swap, now with the wrong data.
1824          *
1825          * Hibernation suspends storage while it is writing the image
1826          * to disk so check that here.
1827          */
1828         if (pm_suspended_storage())
1829                 return false;
1830
1831         return true;
1832 }
1833
1834 /**
1835  * folio_free_swap() - Free the swap space used for this folio.
1836  * @folio: The folio to remove.
1837  *
1838  * If swap is getting full, or if there are no more mappings of this folio,
1839  * then call folio_free_swap to free its swap space.
1840  *
1841  * Return: true if we were able to release the swap space.
1842  */
1843 bool folio_free_swap(struct folio *folio)
1844 {
1845         if (!folio_swapcache_freeable(folio))
1846                 return false;
1847         if (folio_swapped(folio))
1848                 return false;
1849
1850         delete_from_swap_cache(folio);
1851         folio_set_dirty(folio);
1852         return true;
1853 }
1854
1855 /**
1856  * free_swap_and_cache_nr() - Release reference on range of swap entries and
1857  *                            reclaim their cache if no more references remain.
1858  * @entry: First entry of range.
1859  * @nr: Number of entries in range.
1860  *
1861  * For each swap entry in the contiguous range, release a reference. If any swap
1862  * entries become free, try to reclaim their underlying folios, if present. The
1863  * offset range is defined by [entry.offset, entry.offset + nr).
1864  */
1865 void free_swap_and_cache_nr(swp_entry_t entry, int nr)
1866 {
1867         const unsigned long start_offset = swp_offset(entry);
1868         const unsigned long end_offset = start_offset + nr;
1869         struct swap_info_struct *si;
1870         bool any_only_cache = false;
1871         unsigned long offset;
1872
1873         if (non_swap_entry(entry))
1874                 return;
1875
1876         si = get_swap_device(entry);
1877         if (!si)
1878                 return;
1879
1880         if (WARN_ON(end_offset > si->max))
1881                 goto out;
1882
1883         /*
1884          * First free all entries in the range.
1885          */
1886         any_only_cache = __swap_entries_free(si, entry, nr);
1887
1888         /*
1889          * Short-circuit the below loop if none of the entries had their
1890          * reference drop to zero.
1891          */
1892         if (!any_only_cache)
1893                 goto out;
1894
1895         /*
1896          * Now go back over the range trying to reclaim the swap cache. This is
1897          * more efficient for large folios because we will only try to reclaim
1898          * the swap once per folio in the common case. If we do
1899          * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the
1900          * latter will get a reference and lock the folio for every individual
1901          * page but will only succeed once the swap slot for every subpage is
1902          * zero.
1903          */
1904         for (offset = start_offset; offset < end_offset; offset += nr) {
1905                 nr = 1;
1906                 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1907                         /*
1908                          * Folios are always naturally aligned in swap so
1909                          * advance forward to the next boundary. Zero means no
1910                          * folio was found for the swap entry, so advance by 1
1911                          * in this case. Negative value means folio was found
1912                          * but could not be reclaimed. Here we can still advance
1913                          * to the next boundary.
1914                          */
1915                         nr = __try_to_reclaim_swap(si, offset,
1916                                                    TTRS_UNMAPPED | TTRS_FULL);
1917                         if (nr == 0)
1918                                 nr = 1;
1919                         else if (nr < 0)
1920                                 nr = -nr;
1921                         nr = ALIGN(offset + 1, nr) - offset;
1922                 }
1923         }
1924
1925 out:
1926         put_swap_device(si);
1927 }
1928
1929 #ifdef CONFIG_HIBERNATION
1930
1931 swp_entry_t get_swap_page_of_type(int type)
1932 {
1933         struct swap_info_struct *si = swap_type_to_swap_info(type);
1934         swp_entry_t entry = {0};
1935
1936         if (!si)
1937                 goto fail;
1938
1939         /* This is called for allocating swap entry, not cache */
1940         spin_lock(&si->lock);
1941         if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0))
1942                 atomic_long_dec(&nr_swap_pages);
1943         spin_unlock(&si->lock);
1944 fail:
1945         return entry;
1946 }
1947
1948 /*
1949  * Find the swap type that corresponds to given device (if any).
1950  *
1951  * @offset - number of the PAGE_SIZE-sized block of the device, starting
1952  * from 0, in which the swap header is expected to be located.
1953  *
1954  * This is needed for the suspend to disk (aka swsusp).
1955  */
1956 int swap_type_of(dev_t device, sector_t offset)
1957 {
1958         int type;
1959
1960         if (!device)
1961                 return -1;
1962
1963         spin_lock(&swap_lock);
1964         for (type = 0; type < nr_swapfiles; type++) {
1965                 struct swap_info_struct *sis = swap_info[type];
1966
1967                 if (!(sis->flags & SWP_WRITEOK))
1968                         continue;
1969
1970                 if (device == sis->bdev->bd_dev) {
1971                         struct swap_extent *se = first_se(sis);
1972
1973                         if (se->start_block == offset) {
1974                                 spin_unlock(&swap_lock);
1975                                 return type;
1976                         }
1977                 }
1978         }
1979         spin_unlock(&swap_lock);
1980         return -ENODEV;
1981 }
1982
1983 int find_first_swap(dev_t *device)
1984 {
1985         int type;
1986
1987         spin_lock(&swap_lock);
1988         for (type = 0; type < nr_swapfiles; type++) {
1989                 struct swap_info_struct *sis = swap_info[type];
1990
1991                 if (!(sis->flags & SWP_WRITEOK))
1992                         continue;
1993                 *device = sis->bdev->bd_dev;
1994                 spin_unlock(&swap_lock);
1995                 return type;
1996         }
1997         spin_unlock(&swap_lock);
1998         return -ENODEV;
1999 }
2000
2001 /*
2002  * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
2003  * corresponding to given index in swap_info (swap type).
2004  */
2005 sector_t swapdev_block(int type, pgoff_t offset)
2006 {
2007         struct swap_info_struct *si = swap_type_to_swap_info(type);
2008         struct swap_extent *se;
2009
2010         if (!si || !(si->flags & SWP_WRITEOK))
2011                 return 0;
2012         se = offset_to_swap_extent(si, offset);
2013         return se->start_block + (offset - se->start_page);
2014 }
2015
2016 /*
2017  * Return either the total number of swap pages of given type, or the number
2018  * of free pages of that type (depending on @free)
2019  *
2020  * This is needed for software suspend
2021  */
2022 unsigned int count_swap_pages(int type, int free)
2023 {
2024         unsigned int n = 0;
2025
2026         spin_lock(&swap_lock);
2027         if ((unsigned int)type < nr_swapfiles) {
2028                 struct swap_info_struct *sis = swap_info[type];
2029
2030                 spin_lock(&sis->lock);
2031                 if (sis->flags & SWP_WRITEOK) {
2032                         n = sis->pages;
2033                         if (free)
2034                                 n -= sis->inuse_pages;
2035                 }
2036                 spin_unlock(&sis->lock);
2037         }
2038         spin_unlock(&swap_lock);
2039         return n;
2040 }
2041 #endif /* CONFIG_HIBERNATION */
2042
2043 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
2044 {
2045         return pte_same(pte_swp_clear_flags(pte), swp_pte);
2046 }
2047
2048 /*
2049  * No need to decide whether this PTE shares the swap entry with others,
2050  * just let do_wp_page work it out if a write is requested later - to
2051  * force COW, vm_page_prot omits write permission from any private vma.
2052  */
2053 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
2054                 unsigned long addr, swp_entry_t entry, struct folio *folio)
2055 {
2056         struct page *page;
2057         struct folio *swapcache;
2058         spinlock_t *ptl;
2059         pte_t *pte, new_pte, old_pte;
2060         bool hwpoisoned = false;
2061         int ret = 1;
2062
2063         swapcache = folio;
2064         folio = ksm_might_need_to_copy(folio, vma, addr);
2065         if (unlikely(!folio))
2066                 return -ENOMEM;
2067         else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
2068                 hwpoisoned = true;
2069                 folio = swapcache;
2070         }
2071
2072         page = folio_file_page(folio, swp_offset(entry));
2073         if (PageHWPoison(page))
2074                 hwpoisoned = true;
2075
2076         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
2077         if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
2078                                                 swp_entry_to_pte(entry)))) {
2079                 ret = 0;
2080                 goto out;
2081         }
2082
2083         old_pte = ptep_get(pte);
2084
2085         if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
2086                 swp_entry_t swp_entry;
2087
2088                 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2089                 if (hwpoisoned) {
2090                         swp_entry = make_hwpoison_entry(page);
2091                 } else {
2092                         swp_entry = make_poisoned_swp_entry();
2093                 }
2094                 new_pte = swp_entry_to_pte(swp_entry);
2095                 ret = 0;
2096                 goto setpte;
2097         }
2098
2099         /*
2100          * Some architectures may have to restore extra metadata to the page
2101          * when reading from swap. This metadata may be indexed by swap entry
2102          * so this must be called before swap_free().
2103          */
2104         arch_swap_restore(folio_swap(entry, folio), folio);
2105
2106         dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2107         inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
2108         folio_get(folio);
2109         if (folio == swapcache) {
2110                 rmap_t rmap_flags = RMAP_NONE;
2111
2112                 /*
2113                  * See do_swap_page(): writeback would be problematic.
2114                  * However, we do a folio_wait_writeback() just before this
2115                  * call and have the folio locked.
2116                  */
2117                 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2118                 if (pte_swp_exclusive(old_pte))
2119                         rmap_flags |= RMAP_EXCLUSIVE;
2120                 /*
2121                  * We currently only expect small !anon folios, which are either
2122                  * fully exclusive or fully shared. If we ever get large folios
2123                  * here, we have to be careful.
2124                  */
2125                 if (!folio_test_anon(folio)) {
2126                         VM_WARN_ON_ONCE(folio_test_large(folio));
2127                         VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2128                         folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
2129                 } else {
2130                         folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
2131                 }
2132         } else { /* ksm created a completely new copy */
2133                 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
2134                 folio_add_lru_vma(folio, vma);
2135         }
2136         new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
2137         if (pte_swp_soft_dirty(old_pte))
2138                 new_pte = pte_mksoft_dirty(new_pte);
2139         if (pte_swp_uffd_wp(old_pte))
2140                 new_pte = pte_mkuffd_wp(new_pte);
2141 setpte:
2142         set_pte_at(vma->vm_mm, addr, pte, new_pte);
2143         swap_free(entry);
2144 out:
2145         if (pte)
2146                 pte_unmap_unlock(pte, ptl);
2147         if (folio != swapcache) {
2148                 folio_unlock(folio);
2149                 folio_put(folio);
2150         }
2151         return ret;
2152 }
2153
2154 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
2155                         unsigned long addr, unsigned long end,
2156                         unsigned int type)
2157 {
2158         pte_t *pte = NULL;
2159         struct swap_info_struct *si;
2160
2161         si = swap_info[type];
2162         do {
2163                 struct folio *folio;
2164                 unsigned long offset;
2165                 unsigned char swp_count;
2166                 swp_entry_t entry;
2167                 int ret;
2168                 pte_t ptent;
2169
2170                 if (!pte++) {
2171                         pte = pte_offset_map(pmd, addr);
2172                         if (!pte)
2173                                 break;
2174                 }
2175
2176                 ptent = ptep_get_lockless(pte);
2177
2178                 if (!is_swap_pte(ptent))
2179                         continue;
2180
2181                 entry = pte_to_swp_entry(ptent);
2182                 if (swp_type(entry) != type)
2183                         continue;
2184
2185                 offset = swp_offset(entry);
2186                 pte_unmap(pte);
2187                 pte = NULL;
2188
2189                 folio = swap_cache_get_folio(entry, vma, addr);
2190                 if (!folio) {
2191                         struct vm_fault vmf = {
2192                                 .vma = vma,
2193                                 .address = addr,
2194                                 .real_address = addr,
2195                                 .pmd = pmd,
2196                         };
2197
2198                         folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
2199                                                 &vmf);
2200                 }
2201                 if (!folio) {
2202                         swp_count = READ_ONCE(si->swap_map[offset]);
2203                         if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
2204                                 continue;
2205                         return -ENOMEM;
2206                 }
2207
2208                 folio_lock(folio);
2209                 folio_wait_writeback(folio);
2210                 ret = unuse_pte(vma, pmd, addr, entry, folio);
2211                 if (ret < 0) {
2212                         folio_unlock(folio);
2213                         folio_put(folio);
2214                         return ret;
2215                 }
2216
2217                 folio_free_swap(folio);
2218                 folio_unlock(folio);
2219                 folio_put(folio);
2220         } while (addr += PAGE_SIZE, addr != end);
2221
2222         if (pte)
2223                 pte_unmap(pte);
2224         return 0;
2225 }
2226
2227 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2228                                 unsigned long addr, unsigned long end,
2229                                 unsigned int type)
2230 {
2231         pmd_t *pmd;
2232         unsigned long next;
2233         int ret;
2234
2235         pmd = pmd_offset(pud, addr);
2236         do {
2237                 cond_resched();
2238                 next = pmd_addr_end(addr, end);
2239                 ret = unuse_pte_range(vma, pmd, addr, next, type);
2240                 if (ret)
2241                         return ret;
2242         } while (pmd++, addr = next, addr != end);
2243         return 0;
2244 }
2245
2246 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2247                                 unsigned long addr, unsigned long end,
2248                                 unsigned int type)
2249 {
2250         pud_t *pud;
2251         unsigned long next;
2252         int ret;
2253
2254         pud = pud_offset(p4d, addr);
2255         do {
2256                 next = pud_addr_end(addr, end);
2257                 if (pud_none_or_clear_bad(pud))
2258                         continue;
2259                 ret = unuse_pmd_range(vma, pud, addr, next, type);
2260                 if (ret)
2261                         return ret;
2262         } while (pud++, addr = next, addr != end);
2263         return 0;
2264 }
2265
2266 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2267                                 unsigned long addr, unsigned long end,
2268                                 unsigned int type)
2269 {
2270         p4d_t *p4d;
2271         unsigned long next;
2272         int ret;
2273
2274         p4d = p4d_offset(pgd, addr);
2275         do {
2276                 next = p4d_addr_end(addr, end);
2277                 if (p4d_none_or_clear_bad(p4d))
2278                         continue;
2279                 ret = unuse_pud_range(vma, p4d, addr, next, type);
2280                 if (ret)
2281                         return ret;
2282         } while (p4d++, addr = next, addr != end);
2283         return 0;
2284 }
2285
2286 static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
2287 {
2288         pgd_t *pgd;
2289         unsigned long addr, end, next;
2290         int ret;
2291
2292         addr = vma->vm_start;
2293         end = vma->vm_end;
2294
2295         pgd = pgd_offset(vma->vm_mm, addr);
2296         do {
2297                 next = pgd_addr_end(addr, end);
2298                 if (pgd_none_or_clear_bad(pgd))
2299                         continue;
2300                 ret = unuse_p4d_range(vma, pgd, addr, next, type);
2301                 if (ret)
2302                         return ret;
2303         } while (pgd++, addr = next, addr != end);
2304         return 0;
2305 }
2306
2307 static int unuse_mm(struct mm_struct *mm, unsigned int type)
2308 {
2309         struct vm_area_struct *vma;
2310         int ret = 0;
2311         VMA_ITERATOR(vmi, mm, 0);
2312
2313         mmap_read_lock(mm);
2314         for_each_vma(vmi, vma) {
2315                 if (vma->anon_vma) {
2316                         ret = unuse_vma(vma, type);
2317                         if (ret)
2318                                 break;
2319                 }
2320
2321                 cond_resched();
2322         }
2323         mmap_read_unlock(mm);
2324         return ret;
2325 }
2326
2327 /*
2328  * Scan swap_map from current position to next entry still in use.
2329  * Return 0 if there are no inuse entries after prev till end of
2330  * the map.
2331  */
2332 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2333                                         unsigned int prev)
2334 {
2335         unsigned int i;
2336         unsigned char count;
2337
2338         /*
2339          * No need for swap_lock here: we're just looking
2340          * for whether an entry is in use, not modifying it; false
2341          * hits are okay, and sys_swapoff() has already prevented new
2342          * allocations from this area (while holding swap_lock).
2343          */
2344         for (i = prev + 1; i < si->max; i++) {
2345                 count = READ_ONCE(si->swap_map[i]);
2346                 if (count && swap_count(count) != SWAP_MAP_BAD)
2347                         break;
2348                 if ((i % LATENCY_LIMIT) == 0)
2349                         cond_resched();
2350         }
2351
2352         if (i == si->max)
2353                 i = 0;
2354
2355         return i;
2356 }
2357
2358 static int try_to_unuse(unsigned int type)
2359 {
2360         struct mm_struct *prev_mm;
2361         struct mm_struct *mm;
2362         struct list_head *p;
2363         int retval = 0;
2364         struct swap_info_struct *si = swap_info[type];
2365         struct folio *folio;
2366         swp_entry_t entry;
2367         unsigned int i;
2368
2369         if (!READ_ONCE(si->inuse_pages))
2370                 goto success;
2371
2372 retry:
2373         retval = shmem_unuse(type);
2374         if (retval)
2375                 return retval;
2376
2377         prev_mm = &init_mm;
2378         mmget(prev_mm);
2379
2380         spin_lock(&mmlist_lock);
2381         p = &init_mm.mmlist;
2382         while (READ_ONCE(si->inuse_pages) &&
2383                !signal_pending(current) &&
2384                (p = p->next) != &init_mm.mmlist) {
2385
2386                 mm = list_entry(p, struct mm_struct, mmlist);
2387                 if (!mmget_not_zero(mm))
2388                         continue;
2389                 spin_unlock(&mmlist_lock);
2390                 mmput(prev_mm);
2391                 prev_mm = mm;
2392                 retval = unuse_mm(mm, type);
2393                 if (retval) {
2394                         mmput(prev_mm);
2395                         return retval;
2396                 }
2397
2398                 /*
2399                  * Make sure that we aren't completely killing
2400                  * interactive performance.
2401                  */
2402                 cond_resched();
2403                 spin_lock(&mmlist_lock);
2404         }
2405         spin_unlock(&mmlist_lock);
2406
2407         mmput(prev_mm);
2408
2409         i = 0;
2410         while (READ_ONCE(si->inuse_pages) &&
2411                !signal_pending(current) &&
2412                (i = find_next_to_unuse(si, i)) != 0) {
2413
2414                 entry = swp_entry(type, i);
2415                 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
2416                 if (IS_ERR(folio))
2417                         continue;
2418
2419                 /*
2420                  * It is conceivable that a racing task removed this folio from
2421                  * swap cache just before we acquired the page lock. The folio
2422                  * might even be back in swap cache on another swap area. But
2423                  * that is okay, folio_free_swap() only removes stale folios.
2424                  */
2425                 folio_lock(folio);
2426                 folio_wait_writeback(folio);
2427                 folio_free_swap(folio);
2428                 folio_unlock(folio);
2429                 folio_put(folio);
2430         }
2431
2432         /*
2433          * Lets check again to see if there are still swap entries in the map.
2434          * If yes, we would need to do retry the unuse logic again.
2435          * Under global memory pressure, swap entries can be reinserted back
2436          * into process space after the mmlist loop above passes over them.
2437          *
2438          * Limit the number of retries? No: when mmget_not_zero()
2439          * above fails, that mm is likely to be freeing swap from
2440          * exit_mmap(), which proceeds at its own independent pace;
2441          * and even shmem_writepage() could have been preempted after
2442          * folio_alloc_swap(), temporarily hiding that swap.  It's easy
2443          * and robust (though cpu-intensive) just to keep retrying.
2444          */
2445         if (READ_ONCE(si->inuse_pages)) {
2446                 if (!signal_pending(current))
2447                         goto retry;
2448                 return -EINTR;
2449         }
2450
2451 success:
2452         /*
2453          * Make sure that further cleanups after try_to_unuse() returns happen
2454          * after swap_range_free() reduces si->inuse_pages to 0.
2455          */
2456         smp_mb();
2457         return 0;
2458 }
2459
2460 /*
2461  * After a successful try_to_unuse, if no swap is now in use, we know
2462  * we can empty the mmlist.  swap_lock must be held on entry and exit.
2463  * Note that mmlist_lock nests inside swap_lock, and an mm must be
2464  * added to the mmlist just after page_duplicate - before would be racy.
2465  */
2466 static void drain_mmlist(void)
2467 {
2468         struct list_head *p, *next;
2469         unsigned int type;
2470
2471         for (type = 0; type < nr_swapfiles; type++)
2472                 if (swap_info[type]->inuse_pages)
2473                         return;
2474         spin_lock(&mmlist_lock);
2475         list_for_each_safe(p, next, &init_mm.mmlist)
2476                 list_del_init(p);
2477         spin_unlock(&mmlist_lock);
2478 }
2479
2480 /*
2481  * Free all of a swapdev's extent information
2482  */
2483 static void destroy_swap_extents(struct swap_info_struct *sis)
2484 {
2485         while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2486                 struct rb_node *rb = sis->swap_extent_root.rb_node;
2487                 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2488
2489                 rb_erase(rb, &sis->swap_extent_root);
2490                 kfree(se);
2491         }
2492
2493         if (sis->flags & SWP_ACTIVATED) {
2494                 struct file *swap_file = sis->swap_file;
2495                 struct address_space *mapping = swap_file->f_mapping;
2496
2497                 sis->flags &= ~SWP_ACTIVATED;
2498                 if (mapping->a_ops->swap_deactivate)
2499                         mapping->a_ops->swap_deactivate(swap_file);
2500         }
2501 }
2502
2503 /*
2504  * Add a block range (and the corresponding page range) into this swapdev's
2505  * extent tree.
2506  *
2507  * This function rather assumes that it is called in ascending page order.
2508  */
2509 int
2510 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2511                 unsigned long nr_pages, sector_t start_block)
2512 {
2513         struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2514         struct swap_extent *se;
2515         struct swap_extent *new_se;
2516
2517         /*
2518          * place the new node at the right most since the
2519          * function is called in ascending page order.
2520          */
2521         while (*link) {
2522                 parent = *link;
2523                 link = &parent->rb_right;
2524         }
2525
2526         if (parent) {
2527                 se = rb_entry(parent, struct swap_extent, rb_node);
2528                 BUG_ON(se->start_page + se->nr_pages != start_page);
2529                 if (se->start_block + se->nr_pages == start_block) {
2530                         /* Merge it */
2531                         se->nr_pages += nr_pages;
2532                         return 0;
2533                 }
2534         }
2535
2536         /* No merge, insert a new extent. */
2537         new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2538         if (new_se == NULL)
2539                 return -ENOMEM;
2540         new_se->start_page = start_page;
2541         new_se->nr_pages = nr_pages;
2542         new_se->start_block = start_block;
2543
2544         rb_link_node(&new_se->rb_node, parent, link);
2545         rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2546         return 1;
2547 }
2548 EXPORT_SYMBOL_GPL(add_swap_extent);
2549
2550 /*
2551  * A `swap extent' is a simple thing which maps a contiguous range of pages
2552  * onto a contiguous range of disk blocks.  A rbtree of swap extents is
2553  * built at swapon time and is then used at swap_writepage/swap_read_folio
2554  * time for locating where on disk a page belongs.
2555  *
2556  * If the swapfile is an S_ISBLK block device, a single extent is installed.
2557  * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2558  * swap files identically.
2559  *
2560  * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2561  * extent rbtree operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2562  * swapfiles are handled *identically* after swapon time.
2563  *
2564  * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2565  * and will parse them into a rbtree, in PAGE_SIZE chunks.  If some stray
2566  * blocks are found which do not fall within the PAGE_SIZE alignment
2567  * requirements, they are simply tossed out - we will never use those blocks
2568  * for swapping.
2569  *
2570  * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2571  * prevents users from writing to the swap device, which will corrupt memory.
2572  *
2573  * The amount of disk space which a single swap extent represents varies.
2574  * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2575  * extents in the rbtree. - akpm.
2576  */
2577 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2578 {
2579         struct file *swap_file = sis->swap_file;
2580         struct address_space *mapping = swap_file->f_mapping;
2581         struct inode *inode = mapping->host;
2582         int ret;
2583
2584         if (S_ISBLK(inode->i_mode)) {
2585                 ret = add_swap_extent(sis, 0, sis->max, 0);
2586                 *span = sis->pages;
2587                 return ret;
2588         }
2589
2590         if (mapping->a_ops->swap_activate) {
2591                 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2592                 if (ret < 0)
2593                         return ret;
2594                 sis->flags |= SWP_ACTIVATED;
2595                 if ((sis->flags & SWP_FS_OPS) &&
2596                     sio_pool_init() != 0) {
2597                         destroy_swap_extents(sis);
2598                         return -ENOMEM;
2599                 }
2600                 return ret;
2601         }
2602
2603         return generic_swapfile_activate(sis, swap_file, span);
2604 }
2605
2606 static int swap_node(struct swap_info_struct *si)
2607 {
2608         struct block_device *bdev;
2609
2610         if (si->bdev)
2611                 bdev = si->bdev;
2612         else
2613                 bdev = si->swap_file->f_inode->i_sb->s_bdev;
2614
2615         return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2616 }
2617
2618 static void setup_swap_info(struct swap_info_struct *si, int prio,
2619                             unsigned char *swap_map,
2620                             struct swap_cluster_info *cluster_info,
2621                             unsigned long *zeromap)
2622 {
2623         int i;
2624
2625         if (prio >= 0)
2626                 si->prio = prio;
2627         else
2628                 si->prio = --least_priority;
2629         /*
2630          * the plist prio is negated because plist ordering is
2631          * low-to-high, while swap ordering is high-to-low
2632          */
2633         si->list.prio = -si->prio;
2634         for_each_node(i) {
2635                 if (si->prio >= 0)
2636                         si->avail_lists[i].prio = -si->prio;
2637                 else {
2638                         if (swap_node(si) == i)
2639                                 si->avail_lists[i].prio = 1;
2640                         else
2641                                 si->avail_lists[i].prio = -si->prio;
2642                 }
2643         }
2644         si->swap_map = swap_map;
2645         si->cluster_info = cluster_info;
2646         si->zeromap = zeromap;
2647 }
2648
2649 static void _enable_swap_info(struct swap_info_struct *si)
2650 {
2651         si->flags |= SWP_WRITEOK;
2652         atomic_long_add(si->pages, &nr_swap_pages);
2653         total_swap_pages += si->pages;
2654
2655         assert_spin_locked(&swap_lock);
2656         /*
2657          * both lists are plists, and thus priority ordered.
2658          * swap_active_head needs to be priority ordered for swapoff(),
2659          * which on removal of any swap_info_struct with an auto-assigned
2660          * (i.e. negative) priority increments the auto-assigned priority
2661          * of any lower-priority swap_info_structs.
2662          * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2663          * which allocates swap pages from the highest available priority
2664          * swap_info_struct.
2665          */
2666         plist_add(&si->list, &swap_active_head);
2667
2668         /* add to available list iff swap device is not full */
2669         if (si->highest_bit)
2670                 add_to_avail_list(si);
2671 }
2672
2673 static void enable_swap_info(struct swap_info_struct *si, int prio,
2674                                 unsigned char *swap_map,
2675                                 struct swap_cluster_info *cluster_info,
2676                                 unsigned long *zeromap)
2677 {
2678         spin_lock(&swap_lock);
2679         spin_lock(&si->lock);
2680         setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
2681         spin_unlock(&si->lock);
2682         spin_unlock(&swap_lock);
2683         /*
2684          * Finished initializing swap device, now it's safe to reference it.
2685          */
2686         percpu_ref_resurrect(&si->users);
2687         spin_lock(&swap_lock);
2688         spin_lock(&si->lock);
2689         _enable_swap_info(si);
2690         spin_unlock(&si->lock);
2691         spin_unlock(&swap_lock);
2692 }
2693
2694 static void reinsert_swap_info(struct swap_info_struct *si)
2695 {
2696         spin_lock(&swap_lock);
2697         spin_lock(&si->lock);
2698         setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
2699         _enable_swap_info(si);
2700         spin_unlock(&si->lock);
2701         spin_unlock(&swap_lock);
2702 }
2703
2704 static bool __has_usable_swap(void)
2705 {
2706         return !plist_head_empty(&swap_active_head);
2707 }
2708
2709 bool has_usable_swap(void)
2710 {
2711         bool ret;
2712
2713         spin_lock(&swap_lock);
2714         ret = __has_usable_swap();
2715         spin_unlock(&swap_lock);
2716         return ret;
2717 }
2718
2719 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2720 {
2721         struct swap_info_struct *p = NULL;
2722         unsigned char *swap_map;
2723         unsigned long *zeromap;
2724         struct swap_cluster_info *cluster_info;
2725         struct file *swap_file, *victim;
2726         struct address_space *mapping;
2727         struct inode *inode;
2728         struct filename *pathname;
2729         int err, found = 0;
2730
2731         if (!capable(CAP_SYS_ADMIN))
2732                 return -EPERM;
2733
2734         BUG_ON(!current->mm);
2735
2736         pathname = getname(specialfile);
2737         if (IS_ERR(pathname))
2738                 return PTR_ERR(pathname);
2739
2740         victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2741         err = PTR_ERR(victim);
2742         if (IS_ERR(victim))
2743                 goto out;
2744
2745         mapping = victim->f_mapping;
2746         spin_lock(&swap_lock);
2747         plist_for_each_entry(p, &swap_active_head, list) {
2748                 if (p->flags & SWP_WRITEOK) {
2749                         if (p->swap_file->f_mapping == mapping) {
2750                                 found = 1;
2751                                 break;
2752                         }
2753                 }
2754         }
2755         if (!found) {
2756                 err = -EINVAL;
2757                 spin_unlock(&swap_lock);
2758                 goto out_dput;
2759         }
2760         if (!security_vm_enough_memory_mm(current->mm, p->pages))
2761                 vm_unacct_memory(p->pages);
2762         else {
2763                 err = -ENOMEM;
2764                 spin_unlock(&swap_lock);
2765                 goto out_dput;
2766         }
2767         spin_lock(&p->lock);
2768         del_from_avail_list(p);
2769         if (p->prio < 0) {
2770                 struct swap_info_struct *si = p;
2771                 int nid;
2772
2773                 plist_for_each_entry_continue(si, &swap_active_head, list) {
2774                         si->prio++;
2775                         si->list.prio--;
2776                         for_each_node(nid) {
2777                                 if (si->avail_lists[nid].prio != 1)
2778                                         si->avail_lists[nid].prio--;
2779                         }
2780                 }
2781                 least_priority++;
2782         }
2783         plist_del(&p->list, &swap_active_head);
2784         atomic_long_sub(p->pages, &nr_swap_pages);
2785         total_swap_pages -= p->pages;
2786         p->flags &= ~SWP_WRITEOK;
2787         spin_unlock(&p->lock);
2788         spin_unlock(&swap_lock);
2789
2790         disable_swap_slots_cache_lock();
2791
2792         set_current_oom_origin();
2793         err = try_to_unuse(p->type);
2794         clear_current_oom_origin();
2795
2796         if (err) {
2797                 /* re-insert swap space back into swap_list */
2798                 reinsert_swap_info(p);
2799                 reenable_swap_slots_cache_unlock();
2800                 goto out_dput;
2801         }
2802
2803         reenable_swap_slots_cache_unlock();
2804
2805         /*
2806          * Wait for swap operations protected by get/put_swap_device()
2807          * to complete.  Because of synchronize_rcu() here, all swap
2808          * operations protected by RCU reader side lock (including any
2809          * spinlock) will be waited too.  This makes it easy to
2810          * prevent folio_test_swapcache() and the following swap cache
2811          * operations from racing with swapoff.
2812          */
2813         percpu_ref_kill(&p->users);
2814         synchronize_rcu();
2815         wait_for_completion(&p->comp);
2816
2817         flush_work(&p->discard_work);
2818
2819         destroy_swap_extents(p);
2820         if (p->flags & SWP_CONTINUED)
2821                 free_swap_count_continuations(p);
2822
2823         if (!p->bdev || !bdev_nonrot(p->bdev))
2824                 atomic_dec(&nr_rotate_swap);
2825
2826         mutex_lock(&swapon_mutex);
2827         spin_lock(&swap_lock);
2828         spin_lock(&p->lock);
2829         drain_mmlist();
2830
2831         /* wait for anyone still in scan_swap_map_slots */
2832         p->highest_bit = 0;             /* cuts scans short */
2833         while (p->flags >= SWP_SCANNING) {
2834                 spin_unlock(&p->lock);
2835                 spin_unlock(&swap_lock);
2836                 schedule_timeout_uninterruptible(1);
2837                 spin_lock(&swap_lock);
2838                 spin_lock(&p->lock);
2839         }
2840
2841         swap_file = p->swap_file;
2842         p->swap_file = NULL;
2843         p->max = 0;
2844         swap_map = p->swap_map;
2845         p->swap_map = NULL;
2846         zeromap = p->zeromap;
2847         p->zeromap = NULL;
2848         cluster_info = p->cluster_info;
2849         p->cluster_info = NULL;
2850         spin_unlock(&p->lock);
2851         spin_unlock(&swap_lock);
2852         arch_swap_invalidate_area(p->type);
2853         zswap_swapoff(p->type);
2854         mutex_unlock(&swapon_mutex);
2855         free_percpu(p->percpu_cluster);
2856         p->percpu_cluster = NULL;
2857         free_percpu(p->cluster_next_cpu);
2858         p->cluster_next_cpu = NULL;
2859         vfree(swap_map);
2860         kvfree(zeromap);
2861         kvfree(cluster_info);
2862         /* Destroy swap account information */
2863         swap_cgroup_swapoff(p->type);
2864         exit_swap_address_space(p->type);
2865
2866         inode = mapping->host;
2867
2868         inode_lock(inode);
2869         inode->i_flags &= ~S_SWAPFILE;
2870         inode_unlock(inode);
2871         filp_close(swap_file, NULL);
2872
2873         /*
2874          * Clear the SWP_USED flag after all resources are freed so that swapon
2875          * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2876          * not hold p->lock after we cleared its SWP_WRITEOK.
2877          */
2878         spin_lock(&swap_lock);
2879         p->flags = 0;
2880         spin_unlock(&swap_lock);
2881
2882         err = 0;
2883         atomic_inc(&proc_poll_event);
2884         wake_up_interruptible(&proc_poll_wait);
2885
2886 out_dput:
2887         filp_close(victim, NULL);
2888 out:
2889         putname(pathname);
2890         return err;
2891 }
2892
2893 #ifdef CONFIG_PROC_FS
2894 static __poll_t swaps_poll(struct file *file, poll_table *wait)
2895 {
2896         struct seq_file *seq = file->private_data;
2897
2898         poll_wait(file, &proc_poll_wait, wait);
2899
2900         if (seq->poll_event != atomic_read(&proc_poll_event)) {
2901                 seq->poll_event = atomic_read(&proc_poll_event);
2902                 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2903         }
2904
2905         return EPOLLIN | EPOLLRDNORM;
2906 }
2907
2908 /* iterator */
2909 static void *swap_start(struct seq_file *swap, loff_t *pos)
2910 {
2911         struct swap_info_struct *si;
2912         int type;
2913         loff_t l = *pos;
2914
2915         mutex_lock(&swapon_mutex);
2916
2917         if (!l)
2918                 return SEQ_START_TOKEN;
2919
2920         for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2921                 if (!(si->flags & SWP_USED) || !si->swap_map)
2922                         continue;
2923                 if (!--l)
2924                         return si;
2925         }
2926
2927         return NULL;
2928 }
2929
2930 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2931 {
2932         struct swap_info_struct *si = v;
2933         int type;
2934
2935         if (v == SEQ_START_TOKEN)
2936                 type = 0;
2937         else
2938                 type = si->type + 1;
2939
2940         ++(*pos);
2941         for (; (si = swap_type_to_swap_info(type)); type++) {
2942                 if (!(si->flags & SWP_USED) || !si->swap_map)
2943                         continue;
2944                 return si;
2945         }
2946
2947         return NULL;
2948 }
2949
2950 static void swap_stop(struct seq_file *swap, void *v)
2951 {
2952         mutex_unlock(&swapon_mutex);
2953 }
2954
2955 static int swap_show(struct seq_file *swap, void *v)
2956 {
2957         struct swap_info_struct *si = v;
2958         struct file *file;
2959         int len;
2960         unsigned long bytes, inuse;
2961
2962         if (si == SEQ_START_TOKEN) {
2963                 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2964                 return 0;
2965         }
2966
2967         bytes = K(si->pages);
2968         inuse = K(READ_ONCE(si->inuse_pages));
2969
2970         file = si->swap_file;
2971         len = seq_file_path(swap, file, " \t\n\\");
2972         seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2973                         len < 40 ? 40 - len : 1, " ",
2974                         S_ISBLK(file_inode(file)->i_mode) ?
2975                                 "partition" : "file\t",
2976                         bytes, bytes < 10000000 ? "\t" : "",
2977                         inuse, inuse < 10000000 ? "\t" : "",
2978                         si->prio);
2979         return 0;
2980 }
2981
2982 static const struct seq_operations swaps_op = {
2983         .start =        swap_start,
2984         .next =         swap_next,
2985         .stop =         swap_stop,
2986         .show =         swap_show
2987 };
2988
2989 static int swaps_open(struct inode *inode, struct file *file)
2990 {
2991         struct seq_file *seq;
2992         int ret;
2993
2994         ret = seq_open(file, &swaps_op);
2995         if (ret)
2996                 return ret;
2997
2998         seq = file->private_data;
2999         seq->poll_event = atomic_read(&proc_poll_event);
3000         return 0;
3001 }
3002
3003 static const struct proc_ops swaps_proc_ops = {
3004         .proc_flags     = PROC_ENTRY_PERMANENT,
3005         .proc_open      = swaps_open,
3006         .proc_read      = seq_read,
3007         .proc_lseek     = seq_lseek,
3008         .proc_release   = seq_release,
3009         .proc_poll      = swaps_poll,
3010 };
3011
3012 static int __init procswaps_init(void)
3013 {
3014         proc_create("swaps", 0, NULL, &swaps_proc_ops);
3015         return 0;
3016 }
3017 __initcall(procswaps_init);
3018 #endif /* CONFIG_PROC_FS */
3019
3020 #ifdef MAX_SWAPFILES_CHECK
3021 static int __init max_swapfiles_check(void)
3022 {
3023         MAX_SWAPFILES_CHECK();
3024         return 0;
3025 }
3026 late_initcall(max_swapfiles_check);
3027 #endif
3028
3029 static struct swap_info_struct *alloc_swap_info(void)
3030 {
3031         struct swap_info_struct *p;
3032         struct swap_info_struct *defer = NULL;
3033         unsigned int type;
3034         int i;
3035
3036         p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
3037         if (!p)
3038                 return ERR_PTR(-ENOMEM);
3039
3040         if (percpu_ref_init(&p->users, swap_users_ref_free,
3041                             PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
3042                 kvfree(p);
3043                 return ERR_PTR(-ENOMEM);
3044         }
3045
3046         spin_lock(&swap_lock);
3047         for (type = 0; type < nr_swapfiles; type++) {
3048                 if (!(swap_info[type]->flags & SWP_USED))
3049                         break;
3050         }
3051         if (type >= MAX_SWAPFILES) {
3052                 spin_unlock(&swap_lock);
3053                 percpu_ref_exit(&p->users);
3054                 kvfree(p);
3055                 return ERR_PTR(-EPERM);
3056         }
3057         if (type >= nr_swapfiles) {
3058                 p->type = type;
3059                 /*
3060                  * Publish the swap_info_struct after initializing it.
3061                  * Note that kvzalloc() above zeroes all its fields.
3062                  */
3063                 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
3064                 nr_swapfiles++;
3065         } else {
3066                 defer = p;
3067                 p = swap_info[type];
3068                 /*
3069                  * Do not memset this entry: a racing procfs swap_next()
3070                  * would be relying on p->type to remain valid.
3071                  */
3072         }
3073         p->swap_extent_root = RB_ROOT;
3074         plist_node_init(&p->list, 0);
3075         for_each_node(i)
3076                 plist_node_init(&p->avail_lists[i], 0);
3077         p->flags = SWP_USED;
3078         spin_unlock(&swap_lock);
3079         if (defer) {
3080                 percpu_ref_exit(&defer->users);
3081                 kvfree(defer);
3082         }
3083         spin_lock_init(&p->lock);
3084         spin_lock_init(&p->cont_lock);
3085         init_completion(&p->comp);
3086
3087         return p;
3088 }
3089
3090 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
3091 {
3092         if (S_ISBLK(inode->i_mode)) {
3093                 si->bdev = I_BDEV(inode);
3094                 /*
3095                  * Zoned block devices contain zones that have a sequential
3096                  * write only restriction.  Hence zoned block devices are not
3097                  * suitable for swapping.  Disallow them here.
3098                  */
3099                 if (bdev_is_zoned(si->bdev))
3100                         return -EINVAL;
3101                 si->flags |= SWP_BLKDEV;
3102         } else if (S_ISREG(inode->i_mode)) {
3103                 si->bdev = inode->i_sb->s_bdev;
3104         }
3105
3106         return 0;
3107 }
3108
3109
3110 /*
3111  * Find out how many pages are allowed for a single swap device. There
3112  * are two limiting factors:
3113  * 1) the number of bits for the swap offset in the swp_entry_t type, and
3114  * 2) the number of bits in the swap pte, as defined by the different
3115  * architectures.
3116  *
3117  * In order to find the largest possible bit mask, a swap entry with
3118  * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
3119  * decoded to a swp_entry_t again, and finally the swap offset is
3120  * extracted.
3121  *
3122  * This will mask all the bits from the initial ~0UL mask that can't
3123  * be encoded in either the swp_entry_t or the architecture definition
3124  * of a swap pte.
3125  */
3126 unsigned long generic_max_swapfile_size(void)
3127 {
3128         return swp_offset(pte_to_swp_entry(
3129                         swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
3130 }
3131
3132 /* Can be overridden by an architecture for additional checks. */
3133 __weak unsigned long arch_max_swapfile_size(void)
3134 {
3135         return generic_max_swapfile_size();
3136 }
3137
3138 static unsigned long read_swap_header(struct swap_info_struct *si,
3139                                         union swap_header *swap_header,
3140                                         struct inode *inode)
3141 {
3142         int i;
3143         unsigned long maxpages;
3144         unsigned long swapfilepages;
3145         unsigned long last_page;
3146
3147         if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
3148                 pr_err("Unable to find swap-space signature\n");
3149                 return 0;
3150         }
3151
3152         /* swap partition endianness hack... */
3153         if (swab32(swap_header->info.version) == 1) {
3154                 swab32s(&swap_header->info.version);
3155                 swab32s(&swap_header->info.last_page);
3156                 swab32s(&swap_header->info.nr_badpages);
3157                 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3158                         return 0;
3159                 for (i = 0; i < swap_header->info.nr_badpages; i++)
3160                         swab32s(&swap_header->info.badpages[i]);
3161         }
3162         /* Check the swap header's sub-version */
3163         if (swap_header->info.version != 1) {
3164                 pr_warn("Unable to handle swap header version %d\n",
3165                         swap_header->info.version);
3166                 return 0;
3167         }
3168
3169         si->lowest_bit  = 1;
3170         si->cluster_next = 1;
3171         si->cluster_nr = 0;
3172
3173         maxpages = swapfile_maximum_size;
3174         last_page = swap_header->info.last_page;
3175         if (!last_page) {
3176                 pr_warn("Empty swap-file\n");
3177                 return 0;
3178         }
3179         if (last_page > maxpages) {
3180                 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
3181                         K(maxpages), K(last_page));
3182         }
3183         if (maxpages > last_page) {
3184                 maxpages = last_page + 1;
3185                 /* p->max is an unsigned int: don't overflow it */
3186                 if ((unsigned int)maxpages == 0)
3187                         maxpages = UINT_MAX;
3188         }
3189         si->highest_bit = maxpages - 1;
3190
3191         if (!maxpages)
3192                 return 0;
3193         swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3194         if (swapfilepages && maxpages > swapfilepages) {
3195                 pr_warn("Swap area shorter than signature indicates\n");
3196                 return 0;
3197         }
3198         if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3199                 return 0;
3200         if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3201                 return 0;
3202
3203         return maxpages;
3204 }
3205
3206 #define SWAP_CLUSTER_INFO_COLS                                          \
3207         DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3208 #define SWAP_CLUSTER_SPACE_COLS                                         \
3209         DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3210 #define SWAP_CLUSTER_COLS                                               \
3211         max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3212
3213 static int setup_swap_map_and_extents(struct swap_info_struct *si,
3214                                         union swap_header *swap_header,
3215                                         unsigned char *swap_map,
3216                                         unsigned long maxpages,
3217                                         sector_t *span)
3218 {
3219         unsigned int nr_good_pages;
3220         unsigned long i;
3221         int nr_extents;
3222
3223         nr_good_pages = maxpages - 1;   /* omit header page */
3224
3225         for (i = 0; i < swap_header->info.nr_badpages; i++) {
3226                 unsigned int page_nr = swap_header->info.badpages[i];
3227                 if (page_nr == 0 || page_nr > swap_header->info.last_page)
3228                         return -EINVAL;
3229                 if (page_nr < maxpages) {
3230                         swap_map[page_nr] = SWAP_MAP_BAD;
3231                         nr_good_pages--;
3232                 }
3233         }
3234
3235         if (nr_good_pages) {
3236                 swap_map[0] = SWAP_MAP_BAD;
3237                 si->max = maxpages;
3238                 si->pages = nr_good_pages;
3239                 nr_extents = setup_swap_extents(si, span);
3240                 if (nr_extents < 0)
3241                         return nr_extents;
3242                 nr_good_pages = si->pages;
3243         }
3244         if (!nr_good_pages) {
3245                 pr_warn("Empty swap-file\n");
3246                 return -EINVAL;
3247         }
3248
3249         return nr_extents;
3250 }
3251
3252 static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
3253                                                 union swap_header *swap_header,
3254                                                 unsigned long maxpages)
3255 {
3256         unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3257         unsigned long col = si->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3258         struct swap_cluster_info *cluster_info;
3259         unsigned long i, j, k, idx;
3260         int cpu, err = -ENOMEM;
3261
3262         cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
3263         if (!cluster_info)
3264                 goto err;
3265
3266         for (i = 0; i < nr_clusters; i++)
3267                 spin_lock_init(&cluster_info[i].lock);
3268
3269         si->cluster_next_cpu = alloc_percpu(unsigned int);
3270         if (!si->cluster_next_cpu)
3271                 goto err_free;
3272
3273         /* Random start position to help with wear leveling */
3274         for_each_possible_cpu(cpu)
3275                 per_cpu(*si->cluster_next_cpu, cpu) =
3276                 get_random_u32_inclusive(1, si->highest_bit);
3277
3278         si->percpu_cluster = alloc_percpu(struct percpu_cluster);
3279         if (!si->percpu_cluster)
3280                 goto err_free;
3281
3282         for_each_possible_cpu(cpu) {
3283                 struct percpu_cluster *cluster;
3284
3285                 cluster = per_cpu_ptr(si->percpu_cluster, cpu);
3286                 for (i = 0; i < SWAP_NR_ORDERS; i++)
3287                         cluster->next[i] = SWAP_NEXT_INVALID;
3288         }
3289
3290         /*
3291          * Mark unusable pages as unavailable. The clusters aren't
3292          * marked free yet, so no list operations are involved yet.
3293          *
3294          * See setup_swap_map_and_extents(): header page, bad pages,
3295          * and the EOF part of the last cluster.
3296          */
3297         inc_cluster_info_page(si, cluster_info, 0);
3298         for (i = 0; i < swap_header->info.nr_badpages; i++)
3299                 inc_cluster_info_page(si, cluster_info,
3300                                       swap_header->info.badpages[i]);
3301         for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3302                 inc_cluster_info_page(si, cluster_info, i);
3303
3304         INIT_LIST_HEAD(&si->free_clusters);
3305         INIT_LIST_HEAD(&si->full_clusters);
3306         INIT_LIST_HEAD(&si->discard_clusters);
3307
3308         for (i = 0; i < SWAP_NR_ORDERS; i++) {
3309                 INIT_LIST_HEAD(&si->nonfull_clusters[i]);
3310                 INIT_LIST_HEAD(&si->frag_clusters[i]);
3311                 si->frag_cluster_nr[i] = 0;
3312         }
3313
3314         /*
3315          * Reduce false cache line sharing between cluster_info and
3316          * sharing same address space.
3317          */
3318         for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3319                 j = (k + col) % SWAP_CLUSTER_COLS;
3320                 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3321                         struct swap_cluster_info *ci;
3322                         idx = i * SWAP_CLUSTER_COLS + j;
3323                         ci = cluster_info + idx;
3324                         if (idx >= nr_clusters)
3325                                 continue;
3326                         if (ci->count) {
3327                                 ci->flags = CLUSTER_FLAG_NONFULL;
3328                                 list_add_tail(&ci->list, &si->nonfull_clusters[0]);
3329                                 continue;
3330                         }
3331                         ci->flags = CLUSTER_FLAG_FREE;
3332                         list_add_tail(&ci->list, &si->free_clusters);
3333                 }
3334         }
3335
3336         return cluster_info;
3337
3338 err_free:
3339         kvfree(cluster_info);
3340 err:
3341         return ERR_PTR(err);
3342 }
3343
3344 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3345 {
3346         struct swap_info_struct *si;
3347         struct filename *name;
3348         struct file *swap_file = NULL;
3349         struct address_space *mapping;
3350         struct dentry *dentry;
3351         int prio;
3352         int error;
3353         union swap_header *swap_header;
3354         int nr_extents;
3355         sector_t span;
3356         unsigned long maxpages;
3357         unsigned char *swap_map = NULL;
3358         unsigned long *zeromap = NULL;
3359         struct swap_cluster_info *cluster_info = NULL;
3360         struct folio *folio = NULL;
3361         struct inode *inode = NULL;
3362         bool inced_nr_rotate_swap = false;
3363
3364         if (swap_flags & ~SWAP_FLAGS_VALID)
3365                 return -EINVAL;
3366
3367         if (!capable(CAP_SYS_ADMIN))
3368                 return -EPERM;
3369
3370         if (!swap_avail_heads)
3371                 return -ENOMEM;
3372
3373         si = alloc_swap_info();
3374         if (IS_ERR(si))
3375                 return PTR_ERR(si);
3376
3377         INIT_WORK(&si->discard_work, swap_discard_work);
3378
3379         name = getname(specialfile);
3380         if (IS_ERR(name)) {
3381                 error = PTR_ERR(name);
3382                 name = NULL;
3383                 goto bad_swap;
3384         }
3385         swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
3386         if (IS_ERR(swap_file)) {
3387                 error = PTR_ERR(swap_file);
3388                 swap_file = NULL;
3389                 goto bad_swap;
3390         }
3391
3392         si->swap_file = swap_file;
3393         mapping = swap_file->f_mapping;
3394         dentry = swap_file->f_path.dentry;
3395         inode = mapping->host;
3396
3397         error = claim_swapfile(si, inode);
3398         if (unlikely(error))
3399                 goto bad_swap;
3400
3401         inode_lock(inode);
3402         if (d_unlinked(dentry) || cant_mount(dentry)) {
3403                 error = -ENOENT;
3404                 goto bad_swap_unlock_inode;
3405         }
3406         if (IS_SWAPFILE(inode)) {
3407                 error = -EBUSY;
3408                 goto bad_swap_unlock_inode;
3409         }
3410
3411         /*
3412          * Read the swap header.
3413          */
3414         if (!mapping->a_ops->read_folio) {
3415                 error = -EINVAL;
3416                 goto bad_swap_unlock_inode;
3417         }
3418         folio = read_mapping_folio(mapping, 0, swap_file);
3419         if (IS_ERR(folio)) {
3420                 error = PTR_ERR(folio);
3421                 goto bad_swap_unlock_inode;
3422         }
3423         swap_header = kmap_local_folio(folio, 0);
3424
3425         maxpages = read_swap_header(si, swap_header, inode);
3426         if (unlikely(!maxpages)) {
3427                 error = -EINVAL;
3428                 goto bad_swap_unlock_inode;
3429         }
3430
3431         /* OK, set up the swap map and apply the bad block list */
3432         swap_map = vzalloc(maxpages);
3433         if (!swap_map) {
3434                 error = -ENOMEM;
3435                 goto bad_swap_unlock_inode;
3436         }
3437
3438         error = swap_cgroup_swapon(si->type, maxpages);
3439         if (error)
3440                 goto bad_swap_unlock_inode;
3441
3442         nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
3443                                                 maxpages, &span);
3444         if (unlikely(nr_extents < 0)) {
3445                 error = nr_extents;
3446                 goto bad_swap_unlock_inode;
3447         }
3448
3449         /*
3450          * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
3451          * be above MAX_PAGE_ORDER incase of a large swap file.
3452          */
3453         zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
3454                                     GFP_KERNEL | __GFP_ZERO);
3455         if (!zeromap) {
3456                 error = -ENOMEM;
3457                 goto bad_swap_unlock_inode;
3458         }
3459
3460         if (si->bdev && bdev_stable_writes(si->bdev))
3461                 si->flags |= SWP_STABLE_WRITES;
3462
3463         if (si->bdev && bdev_synchronous(si->bdev))
3464                 si->flags |= SWP_SYNCHRONOUS_IO;
3465
3466         if (si->bdev && bdev_nonrot(si->bdev)) {
3467                 si->flags |= SWP_SOLIDSTATE;
3468
3469                 cluster_info = setup_clusters(si, swap_header, maxpages);
3470                 if (IS_ERR(cluster_info)) {
3471                         error = PTR_ERR(cluster_info);
3472                         cluster_info = NULL;
3473                         goto bad_swap_unlock_inode;
3474                 }
3475         } else {
3476                 atomic_inc(&nr_rotate_swap);
3477                 inced_nr_rotate_swap = true;
3478         }
3479
3480         if ((swap_flags & SWAP_FLAG_DISCARD) &&
3481             si->bdev && bdev_max_discard_sectors(si->bdev)) {
3482                 /*
3483                  * When discard is enabled for swap with no particular
3484                  * policy flagged, we set all swap discard flags here in
3485                  * order to sustain backward compatibility with older
3486                  * swapon(8) releases.
3487                  */
3488                 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3489                              SWP_PAGE_DISCARD);
3490
3491                 /*
3492                  * By flagging sys_swapon, a sysadmin can tell us to
3493                  * either do single-time area discards only, or to just
3494                  * perform discards for released swap page-clusters.
3495                  * Now it's time to adjust the p->flags accordingly.
3496                  */
3497                 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3498                         si->flags &= ~SWP_PAGE_DISCARD;
3499                 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3500                         si->flags &= ~SWP_AREA_DISCARD;
3501
3502                 /* issue a swapon-time discard if it's still required */
3503                 if (si->flags & SWP_AREA_DISCARD) {
3504                         int err = discard_swap(si);
3505                         if (unlikely(err))
3506                                 pr_err("swapon: discard_swap(%p): %d\n",
3507                                         si, err);
3508                 }
3509         }
3510
3511         error = init_swap_address_space(si->type, maxpages);
3512         if (error)
3513                 goto bad_swap_unlock_inode;
3514
3515         error = zswap_swapon(si->type, maxpages);
3516         if (error)
3517                 goto free_swap_address_space;
3518
3519         /*
3520          * Flush any pending IO and dirty mappings before we start using this
3521          * swap device.
3522          */
3523         inode->i_flags |= S_SWAPFILE;
3524         error = inode_drain_writes(inode);
3525         if (error) {
3526                 inode->i_flags &= ~S_SWAPFILE;
3527                 goto free_swap_zswap;
3528         }
3529
3530         mutex_lock(&swapon_mutex);
3531         prio = -1;
3532         if (swap_flags & SWAP_FLAG_PREFER)
3533                 prio =
3534                   (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3535         enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
3536
3537         pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s\n",
3538                 K(si->pages), name->name, si->prio, nr_extents,
3539                 K((unsigned long long)span),
3540                 (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
3541                 (si->flags & SWP_DISCARDABLE) ? "D" : "",
3542                 (si->flags & SWP_AREA_DISCARD) ? "s" : "",
3543                 (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
3544
3545         mutex_unlock(&swapon_mutex);
3546         atomic_inc(&proc_poll_event);
3547         wake_up_interruptible(&proc_poll_wait);
3548
3549         error = 0;
3550         goto out;
3551 free_swap_zswap:
3552         zswap_swapoff(si->type);
3553 free_swap_address_space:
3554         exit_swap_address_space(si->type);
3555 bad_swap_unlock_inode:
3556         inode_unlock(inode);
3557 bad_swap:
3558         free_percpu(si->percpu_cluster);
3559         si->percpu_cluster = NULL;
3560         free_percpu(si->cluster_next_cpu);
3561         si->cluster_next_cpu = NULL;
3562         inode = NULL;
3563         destroy_swap_extents(si);
3564         swap_cgroup_swapoff(si->type);
3565         spin_lock(&swap_lock);
3566         si->swap_file = NULL;
3567         si->flags = 0;
3568         spin_unlock(&swap_lock);
3569         vfree(swap_map);
3570         kvfree(zeromap);
3571         kvfree(cluster_info);
3572         if (inced_nr_rotate_swap)
3573                 atomic_dec(&nr_rotate_swap);
3574         if (swap_file)
3575                 filp_close(swap_file, NULL);
3576 out:
3577         if (!IS_ERR_OR_NULL(folio))
3578                 folio_release_kmap(folio, swap_header);
3579         if (name)
3580                 putname(name);
3581         if (inode)
3582                 inode_unlock(inode);
3583         if (!error)
3584                 enable_swap_slots_cache();
3585         return error;
3586 }
3587
3588 void si_swapinfo(struct sysinfo *val)
3589 {
3590         unsigned int type;
3591         unsigned long nr_to_be_unused = 0;
3592
3593         spin_lock(&swap_lock);
3594         for (type = 0; type < nr_swapfiles; type++) {
3595                 struct swap_info_struct *si = swap_info[type];
3596
3597                 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3598                         nr_to_be_unused += READ_ONCE(si->inuse_pages);
3599         }
3600         val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3601         val->totalswap = total_swap_pages + nr_to_be_unused;
3602         spin_unlock(&swap_lock);
3603 }
3604
3605 /*
3606  * Verify that nr swap entries are valid and increment their swap map counts.
3607  *
3608  * Returns error code in following case.
3609  * - success -> 0
3610  * - swp_entry is invalid -> EINVAL
3611  * - swp_entry is migration entry -> EINVAL
3612  * - swap-cache reference is requested but there is already one. -> EEXIST
3613  * - swap-cache reference is requested but the entry is not used. -> ENOENT
3614  * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3615  */
3616 static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
3617 {
3618         struct swap_info_struct *si;
3619         struct swap_cluster_info *ci;
3620         unsigned long offset;
3621         unsigned char count;
3622         unsigned char has_cache;
3623         int err, i;
3624
3625         si = swp_swap_info(entry);
3626
3627         offset = swp_offset(entry);
3628         VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
3629         VM_WARN_ON(usage == 1 && nr > 1);
3630         ci = lock_cluster_or_swap_info(si, offset);
3631
3632         err = 0;
3633         for (i = 0; i < nr; i++) {
3634                 count = si->swap_map[offset + i];
3635
3636                 /*
3637                  * swapin_readahead() doesn't check if a swap entry is valid, so the
3638                  * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3639                  */
3640                 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3641                         err = -ENOENT;
3642                         goto unlock_out;
3643                 }
3644
3645                 has_cache = count & SWAP_HAS_CACHE;
3646                 count &= ~SWAP_HAS_CACHE;
3647
3648                 if (!count && !has_cache) {
3649                         err = -ENOENT;
3650                 } else if (usage == SWAP_HAS_CACHE) {
3651                         if (has_cache)
3652                                 err = -EEXIST;
3653                 } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
3654                         err = -EINVAL;
3655                 }
3656
3657                 if (err)
3658                         goto unlock_out;
3659         }
3660
3661         for (i = 0; i < nr; i++) {
3662                 count = si->swap_map[offset + i];
3663                 has_cache = count & SWAP_HAS_CACHE;
3664                 count &= ~SWAP_HAS_CACHE;
3665
3666                 if (usage == SWAP_HAS_CACHE)
3667                         has_cache = SWAP_HAS_CACHE;
3668                 else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3669                         count += usage;
3670                 else if (swap_count_continued(si, offset + i, count))
3671                         count = COUNT_CONTINUED;
3672                 else {
3673                         /*
3674                          * Don't need to rollback changes, because if
3675                          * usage == 1, there must be nr == 1.
3676                          */
3677                         err = -ENOMEM;
3678                         goto unlock_out;
3679                 }
3680
3681                 WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
3682         }
3683
3684 unlock_out:
3685         unlock_cluster_or_swap_info(si, ci);
3686         return err;
3687 }
3688
3689 /*
3690  * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3691  * (in which case its reference count is never incremented).
3692  */
3693 void swap_shmem_alloc(swp_entry_t entry, int nr)
3694 {
3695         __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
3696 }
3697
3698 /*
3699  * Increase reference count of swap entry by 1.
3700  * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3701  * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3702  * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3703  * might occur if a page table entry has got corrupted.
3704  */
3705 int swap_duplicate(swp_entry_t entry)
3706 {
3707         int err = 0;
3708
3709         while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
3710                 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3711         return err;
3712 }
3713
3714 /*
3715  * @entry: first swap entry from which we allocate nr swap cache.
3716  *
3717  * Called when allocating swap cache for existing swap entries,
3718  * This can return error codes. Returns 0 at success.
3719  * -EEXIST means there is a swap cache.
3720  * Note: return code is different from swap_duplicate().
3721  */
3722 int swapcache_prepare(swp_entry_t entry, int nr)
3723 {
3724         return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
3725 }
3726
3727 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
3728 {
3729         unsigned long offset = swp_offset(entry);
3730
3731         cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
3732 }
3733
3734 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3735 {
3736         return swap_type_to_swap_info(swp_type(entry));
3737 }
3738
3739 /*
3740  * out-of-line methods to avoid include hell.
3741  */
3742 struct address_space *swapcache_mapping(struct folio *folio)
3743 {
3744         return swp_swap_info(folio->swap)->swap_file->f_mapping;
3745 }
3746 EXPORT_SYMBOL_GPL(swapcache_mapping);
3747
3748 pgoff_t __folio_swap_cache_index(struct folio *folio)
3749 {
3750         return swap_cache_index(folio->swap);
3751 }
3752 EXPORT_SYMBOL_GPL(__folio_swap_cache_index);
3753
3754 /*
3755  * add_swap_count_continuation - called when a swap count is duplicated
3756  * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3757  * page of the original vmalloc'ed swap_map, to hold the continuation count
3758  * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3759  * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3760  *
3761  * These continuation pages are seldom referenced: the common paths all work
3762  * on the original swap_map, only referring to a continuation page when the
3763  * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3764  *
3765  * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3766  * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3767  * can be called after dropping locks.
3768  */
3769 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3770 {
3771         struct swap_info_struct *si;
3772         struct swap_cluster_info *ci;
3773         struct page *head;
3774         struct page *page;
3775         struct page *list_page;
3776         pgoff_t offset;
3777         unsigned char count;
3778         int ret = 0;
3779
3780         /*
3781          * When debugging, it's easier to use __GFP_ZERO here; but it's better
3782          * for latency not to zero a page while GFP_ATOMIC and holding locks.
3783          */
3784         page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3785
3786         si = get_swap_device(entry);
3787         if (!si) {
3788                 /*
3789                  * An acceptable race has occurred since the failing
3790                  * __swap_duplicate(): the swap device may be swapoff
3791                  */
3792                 goto outer;
3793         }
3794         spin_lock(&si->lock);
3795
3796         offset = swp_offset(entry);
3797
3798         ci = lock_cluster(si, offset);
3799
3800         count = swap_count(si->swap_map[offset]);
3801
3802         if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3803                 /*
3804                  * The higher the swap count, the more likely it is that tasks
3805                  * will race to add swap count continuation: we need to avoid
3806                  * over-provisioning.
3807                  */
3808                 goto out;
3809         }
3810
3811         if (!page) {
3812                 ret = -ENOMEM;
3813                 goto out;
3814         }
3815
3816         head = vmalloc_to_page(si->swap_map + offset);
3817         offset &= ~PAGE_MASK;
3818
3819         spin_lock(&si->cont_lock);
3820         /*
3821          * Page allocation does not initialize the page's lru field,
3822          * but it does always reset its private field.
3823          */
3824         if (!page_private(head)) {
3825                 BUG_ON(count & COUNT_CONTINUED);
3826                 INIT_LIST_HEAD(&head->lru);
3827                 set_page_private(head, SWP_CONTINUED);
3828                 si->flags |= SWP_CONTINUED;
3829         }
3830
3831         list_for_each_entry(list_page, &head->lru, lru) {
3832                 unsigned char *map;
3833
3834                 /*
3835                  * If the previous map said no continuation, but we've found
3836                  * a continuation page, free our allocation and use this one.
3837                  */
3838                 if (!(count & COUNT_CONTINUED))
3839                         goto out_unlock_cont;
3840
3841                 map = kmap_local_page(list_page) + offset;
3842                 count = *map;
3843                 kunmap_local(map);
3844
3845                 /*
3846                  * If this continuation count now has some space in it,
3847                  * free our allocation and use this one.
3848                  */
3849                 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3850                         goto out_unlock_cont;
3851         }
3852
3853         list_add_tail(&page->lru, &head->lru);
3854         page = NULL;                    /* now it's attached, don't free it */
3855 out_unlock_cont:
3856         spin_unlock(&si->cont_lock);
3857 out:
3858         unlock_cluster(ci);
3859         spin_unlock(&si->lock);
3860         put_swap_device(si);
3861 outer:
3862         if (page)
3863                 __free_page(page);
3864         return ret;
3865 }
3866
3867 /*
3868  * swap_count_continued - when the original swap_map count is incremented
3869  * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3870  * into, carry if so, or else fail until a new continuation page is allocated;
3871  * when the original swap_map count is decremented from 0 with continuation,
3872  * borrow from the continuation and report whether it still holds more.
3873  * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3874  * lock.
3875  */
3876 static bool swap_count_continued(struct swap_info_struct *si,
3877                                  pgoff_t offset, unsigned char count)
3878 {
3879         struct page *head;
3880         struct page *page;
3881         unsigned char *map;
3882         bool ret;
3883
3884         head = vmalloc_to_page(si->swap_map + offset);
3885         if (page_private(head) != SWP_CONTINUED) {
3886                 BUG_ON(count & COUNT_CONTINUED);
3887                 return false;           /* need to add count continuation */
3888         }
3889
3890         spin_lock(&si->cont_lock);
3891         offset &= ~PAGE_MASK;
3892         page = list_next_entry(head, lru);
3893         map = kmap_local_page(page) + offset;
3894
3895         if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
3896                 goto init_map;          /* jump over SWAP_CONT_MAX checks */
3897
3898         if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3899                 /*
3900                  * Think of how you add 1 to 999
3901                  */
3902                 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3903                         kunmap_local(map);
3904                         page = list_next_entry(page, lru);
3905                         BUG_ON(page == head);
3906                         map = kmap_local_page(page) + offset;
3907                 }
3908                 if (*map == SWAP_CONT_MAX) {
3909                         kunmap_local(map);
3910                         page = list_next_entry(page, lru);
3911                         if (page == head) {
3912                                 ret = false;    /* add count continuation */
3913                                 goto out;
3914                         }
3915                         map = kmap_local_page(page) + offset;
3916 init_map:               *map = 0;               /* we didn't zero the page */
3917                 }
3918                 *map += 1;
3919                 kunmap_local(map);
3920                 while ((page = list_prev_entry(page, lru)) != head) {
3921                         map = kmap_local_page(page) + offset;
3922                         *map = COUNT_CONTINUED;
3923                         kunmap_local(map);
3924                 }
3925                 ret = true;                     /* incremented */
3926
3927         } else {                                /* decrementing */
3928                 /*
3929                  * Think of how you subtract 1 from 1000
3930                  */
3931                 BUG_ON(count != COUNT_CONTINUED);
3932                 while (*map == COUNT_CONTINUED) {
3933                         kunmap_local(map);
3934                         page = list_next_entry(page, lru);
3935                         BUG_ON(page == head);
3936                         map = kmap_local_page(page) + offset;
3937                 }
3938                 BUG_ON(*map == 0);
3939                 *map -= 1;
3940                 if (*map == 0)
3941                         count = 0;
3942                 kunmap_local(map);
3943                 while ((page = list_prev_entry(page, lru)) != head) {
3944                         map = kmap_local_page(page) + offset;
3945                         *map = SWAP_CONT_MAX | count;
3946                         count = COUNT_CONTINUED;
3947                         kunmap_local(map);
3948                 }
3949                 ret = count == COUNT_CONTINUED;
3950         }
3951 out:
3952         spin_unlock(&si->cont_lock);
3953         return ret;
3954 }
3955
3956 /*
3957  * free_swap_count_continuations - swapoff free all the continuation pages
3958  * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3959  */
3960 static void free_swap_count_continuations(struct swap_info_struct *si)
3961 {
3962         pgoff_t offset;
3963
3964         for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3965                 struct page *head;
3966                 head = vmalloc_to_page(si->swap_map + offset);
3967                 if (page_private(head)) {
3968                         struct page *page, *next;
3969
3970                         list_for_each_entry_safe(page, next, &head->lru, lru) {
3971                                 list_del(&page->lru);
3972                                 __free_page(page);
3973                         }
3974                 }
3975         }
3976 }
3977
3978 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3979 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
3980 {
3981         struct swap_info_struct *si, *next;
3982         int nid = folio_nid(folio);
3983
3984         if (!(gfp & __GFP_IO))
3985                 return;
3986
3987         if (!__has_usable_swap())
3988                 return;
3989
3990         if (!blk_cgroup_congested())
3991                 return;
3992
3993         /*
3994          * We've already scheduled a throttle, avoid taking the global swap
3995          * lock.
3996          */
3997         if (current->throttle_disk)
3998                 return;
3999
4000         spin_lock(&swap_avail_lock);
4001         plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
4002                                   avail_lists[nid]) {
4003                 if (si->bdev) {
4004                         blkcg_schedule_throttle(si->bdev->bd_disk, true);
4005                         break;
4006                 }
4007         }
4008         spin_unlock(&swap_avail_lock);
4009 }
4010 #endif
4011
4012 static int __init swapfile_init(void)
4013 {
4014         int nid;
4015
4016         swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
4017                                          GFP_KERNEL);
4018         if (!swap_avail_heads) {
4019                 pr_emerg("Not enough memory for swap heads, swap is disabled\n");
4020                 return -ENOMEM;
4021         }
4022
4023         for_each_node(nid)
4024                 plist_head_init(&swap_avail_heads[nid]);
4025
4026         swapfile_maximum_size = arch_max_swapfile_size();
4027
4028 #ifdef CONFIG_MIGRATION
4029         if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
4030                 swap_migration_ad_supported = true;
4031 #endif  /* CONFIG_MIGRATION */
4032
4033         return 0;
4034 }
4035 subsys_initcall(swapfile_init);
This page took 0.25303 seconds and 4 git commands to generate.