1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright 2013 Red Hat Inc.
8 * Heterogeneous Memory Management (HMM)
10 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
11 * is for. Here we focus on the HMM API description, with some explanation of
12 * the underlying implementation.
14 * Short description: HMM provides a set of helpers to share a virtual address
15 * space between CPU and a device, so that the device can access any valid
16 * address of the process (while still obeying memory protection). HMM also
17 * provides helpers to migrate process memory to device memory, and back. Each
18 * set of functionality (address space mirroring, and migration to and from
19 * device memory) can be used independently of the other.
22 * HMM address space mirroring API:
24 * Use HMM address space mirroring if you want to mirror a range of the CPU
25 * page tables of a process into a device page table. Here, "mirror" means "keep
26 * synchronized". Prerequisites: the device must provide the ability to write-
27 * protect its page tables (at PAGE_SIZE granularity), and must be able to
28 * recover from the resulting potential page faults.
30 * HMM guarantees that at any point in time, a given virtual address points to
31 * either the same memory in both CPU and device page tables (that is: CPU and
32 * device page tables each point to the same pages), or that one page table (CPU
33 * or device) points to no entry, while the other still points to the old page
34 * for the address. The latter case happens when the CPU page table update
35 * happens first, and then the update is mirrored over to the device page table.
36 * This does not cause any issue, because the CPU page table cannot start
37 * pointing to a new page until the device page table is invalidated.
39 * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
40 * updates to each device driver that has registered a mirror. It also provides
41 * some API calls to help with taking a snapshot of the CPU page table, and to
42 * synchronize with any updates that might happen concurrently.
45 * HMM migration to and from device memory:
47 * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
48 * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
49 * of the device memory, and allows the device driver to manage its memory
50 * using those struct pages. Having struct pages for device memory makes
51 * migration easier. Because that memory is not addressable by the CPU it must
52 * never be pinned to the device; in other words, any CPU page fault can always
53 * cause the device memory to be migrated (copied/moved) back to regular memory.
55 * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
56 * allows use of a device DMA engine to perform the copy operation between
57 * regular system memory and device memory.
62 #include <linux/kconfig.h>
63 #include <asm/pgtable.h>
65 #ifdef CONFIG_HMM_MIRROR
67 #include <linux/device.h>
68 #include <linux/migrate.h>
69 #include <linux/memremap.h>
70 #include <linux/completion.h>
71 #include <linux/mmu_notifier.h>
75 * struct hmm - HMM per mm struct
77 * @mm: mm struct this HMM struct is bound to
78 * @lock: lock protecting ranges list
79 * @ranges: list of range being snapshotted
80 * @mirrors: list of mirrors for this mm
81 * @mmu_notifier: mmu notifier to track updates to CPU page table
82 * @mirrors_sem: read/write semaphore protecting the mirrors list
83 * @wq: wait queue for user waiting on a range invalidation
84 * @notifiers: count of active mmu notifiers
87 struct mmu_notifier mmu_notifier;
88 spinlock_t ranges_lock;
89 struct list_head ranges;
90 struct list_head mirrors;
91 struct rw_semaphore mirrors_sem;
97 * hmm_pfn_flag_e - HMM flag enums
100 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
101 * HMM_PFN_WRITE: CPU page table has write permission set
102 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
104 * The driver provides a flags array for mapping page protections to device
105 * PTE bits. If the driver valid bit for an entry is bit 3,
106 * i.e., (entry & (1 << 3)), then the driver must provide
107 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
108 * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
109 * except that this is per device driver rather than per architecture.
111 enum hmm_pfn_flag_e {
114 HMM_PFN_DEVICE_PRIVATE,
119 * hmm_pfn_value_e - HMM pfn special value
122 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
123 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
124 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
125 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
126 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
127 * set and the pfn value is undefined.
129 * Driver provides values for none entry, error entry, and special entry.
130 * Driver can alias (i.e., use same value) error and special, but
131 * it should not alias none with error or special.
133 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
134 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
135 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
136 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
138 enum hmm_pfn_value_e {
146 * struct hmm_range - track invalidation lock on virtual address range
148 * @hmm: the core HMM structure this range is active against
149 * @vma: the vm area struct for the range
150 * @list: all range lock are on a list
151 * @start: range virtual start address (inclusive)
152 * @end: range virtual end address (exclusive)
153 * @pfns: array of pfns (big enough for the range)
154 * @flags: pfn flags to match device driver page table
155 * @values: pfn value for some special case (none, special, error, ...)
156 * @default_flags: default flags for the range (write, read, ... see hmm doc)
157 * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
158 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
159 * @valid: pfns array did not change since it has been fill by an HMM function
163 struct list_head list;
167 const uint64_t *flags;
168 const uint64_t *values;
169 uint64_t default_flags;
170 uint64_t pfn_flags_mask;
176 * hmm_range_wait_until_valid() - wait for range to be valid
177 * @range: range affected by invalidation to wait on
178 * @timeout: time out for wait in ms (ie abort wait after that period of time)
179 * Return: true if the range is valid, false otherwise.
181 static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
182 unsigned long timeout)
184 return wait_event_timeout(range->hmm->wq, range->valid,
185 msecs_to_jiffies(timeout)) != 0;
189 * hmm_range_valid() - test if a range is valid or not
191 * Return: true if the range is valid, false otherwise.
193 static inline bool hmm_range_valid(struct hmm_range *range)
199 * hmm_device_entry_to_page() - return struct page pointed to by a device entry
200 * @range: range use to decode device entry value
201 * @entry: device entry value to get corresponding struct page from
202 * Return: struct page pointer if entry is a valid, NULL otherwise
204 * If the device entry is valid (ie valid flag set) then return the struct page
205 * matching the entry value. Otherwise return NULL.
207 static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
210 if (entry == range->values[HMM_PFN_NONE])
212 if (entry == range->values[HMM_PFN_ERROR])
214 if (entry == range->values[HMM_PFN_SPECIAL])
216 if (!(entry & range->flags[HMM_PFN_VALID]))
218 return pfn_to_page(entry >> range->pfn_shift);
222 * hmm_device_entry_to_pfn() - return pfn value store in a device entry
223 * @range: range use to decode device entry value
224 * @entry: device entry to extract pfn from
225 * Return: pfn value if device entry is valid, -1UL otherwise
227 static inline unsigned long
228 hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
230 if (pfn == range->values[HMM_PFN_NONE])
232 if (pfn == range->values[HMM_PFN_ERROR])
234 if (pfn == range->values[HMM_PFN_SPECIAL])
236 if (!(pfn & range->flags[HMM_PFN_VALID]))
238 return (pfn >> range->pfn_shift);
242 * hmm_device_entry_from_page() - create a valid device entry for a page
243 * @range: range use to encode HMM pfn value
244 * @page: page for which to create the device entry
245 * Return: valid device entry for the page
247 static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
250 return (page_to_pfn(page) << range->pfn_shift) |
251 range->flags[HMM_PFN_VALID];
255 * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
256 * @range: range use to encode HMM pfn value
257 * @pfn: pfn value for which to create the device entry
258 * Return: valid device entry for the pfn
260 static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
263 return (pfn << range->pfn_shift) |
264 range->flags[HMM_PFN_VALID];
268 * Mirroring: how to synchronize device page table with CPU page table.
270 * A device driver that is participating in HMM mirroring must always
271 * synchronize with CPU page table updates. For this, device drivers can either
272 * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
273 * drivers can decide to register one mirror per device per process, or just
274 * one mirror per process for a group of devices. The pattern is:
276 * int device_bind_address_space(..., struct mm_struct *mm, ...)
278 * struct device_address_space *das;
280 * // Device driver specific initialization, and allocation of das
281 * // which contains an hmm_mirror struct as one of its fields.
284 * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
286 * // Cleanup on error
290 * // Other device driver specific initialization
294 * Once an hmm_mirror is registered for an address space, the device driver
295 * will get callbacks through sync_cpu_device_pagetables() operation (see
296 * hmm_mirror_ops struct).
298 * Device driver must not free the struct containing the hmm_mirror struct
299 * before calling hmm_mirror_unregister(). The expected usage is to do that when
300 * the device driver is unbinding from an address space.
303 * void device_unbind_address_space(struct device_address_space *das)
305 * // Device driver specific cleanup
308 * hmm_mirror_unregister(&das->mirror);
310 * // Other device driver specific cleanup, and now das can be freed
318 * struct hmm_mirror_ops - HMM mirror device operations callback
320 * @update: callback to update range on a device
322 struct hmm_mirror_ops {
323 /* release() - release hmm_mirror
325 * @mirror: pointer to struct hmm_mirror
327 * This is called when the mm_struct is being released. The callback
328 * must ensure that all access to any pages obtained from this mirror
329 * is halted before the callback returns. All future access should
332 void (*release)(struct hmm_mirror *mirror);
334 /* sync_cpu_device_pagetables() - synchronize page tables
336 * @mirror: pointer to struct hmm_mirror
337 * @update: update information (see struct mmu_notifier_range)
338 * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
339 * and callback needs to block, 0 otherwise.
341 * This callback ultimately originates from mmu_notifiers when the CPU
342 * page table is updated. The device driver must update its page table
343 * in response to this callback. The update argument tells what action
346 * The device driver must not return from this callback until the device
347 * page tables are completely updated (TLBs flushed, etc); this is a
350 int (*sync_cpu_device_pagetables)(
351 struct hmm_mirror *mirror,
352 const struct mmu_notifier_range *update);
356 * struct hmm_mirror - mirror struct for a device driver
358 * @hmm: pointer to struct hmm (which is unique per mm_struct)
359 * @ops: device driver callback for HMM mirror operations
360 * @list: for list of mirrors of a given mm
362 * Each address space (mm_struct) being mirrored by a device must register one
363 * instance of an hmm_mirror struct with HMM. HMM will track the list of all
364 * mirrors for each mm_struct.
368 const struct hmm_mirror_ops *ops;
369 struct list_head list;
372 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
373 void hmm_mirror_unregister(struct hmm_mirror *mirror);
376 * Please see Documentation/vm/hmm.rst for how to use the range API.
378 int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
379 void hmm_range_unregister(struct hmm_range *range);
382 * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
384 #define HMM_FAULT_ALLOW_RETRY (1 << 0)
386 /* Don't fault in missing PTEs, just snapshot the current state. */
387 #define HMM_FAULT_SNAPSHOT (1 << 1)
389 long hmm_range_fault(struct hmm_range *range, unsigned int flags);
391 long hmm_range_dma_map(struct hmm_range *range,
392 struct device *device,
395 long hmm_range_dma_unmap(struct hmm_range *range,
396 struct device *device,
401 * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
403 * When waiting for mmu notifiers we need some kind of time out otherwise we
404 * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
407 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
409 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
411 #endif /* LINUX_HMM_H */