2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/export.h>
40 #include <linux/slab.h>
41 #include <linux/pagemap.h>
42 #include <rdma/ib_umem_odp.h>
46 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
48 struct sg_page_iter sg_iter;
52 ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
55 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
56 page = sg_page_iter_page(&sg_iter);
57 if (!PageDirty(page) && umem->writable && dirty)
58 set_page_dirty_lock(page);
62 sg_free_table(&umem->sg_head);
65 /* ib_umem_add_sg_table - Add N contiguous pages to scatter table
67 * sg: current scatterlist entry
68 * page_list: array of npage struct page pointers
69 * npages: number of pages in page_list
70 * max_seg_sz: maximum segment size in bytes
71 * nents: [out] number of entries in the scatterlist
73 * Return new end of scatterlist
75 static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
76 struct page **page_list,
78 unsigned int max_seg_sz,
81 unsigned long first_pfn;
83 bool update_cur_sg = false;
84 bool first = !sg_page(sg);
86 /* Check if new page_list is contiguous with end of previous page_list.
87 * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
89 if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
90 page_to_pfn(page_list[0])))
95 struct page *first_page = page_list[i];
97 first_pfn = page_to_pfn(first_page);
99 /* Compute the number of contiguous pages we have starting
102 for (len = 0; i != npages &&
103 first_pfn + len == page_to_pfn(page_list[i]) &&
104 len < (max_seg_sz >> PAGE_SHIFT);
108 /* Squash N contiguous pages from page_list into current sge */
110 if ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT)) {
111 sg_set_page(sg, sg_page(sg),
112 sg->length + (len << PAGE_SHIFT),
114 update_cur_sg = false;
117 update_cur_sg = false;
120 /* Squash N contiguous pages into next sge or first sge */
125 sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
133 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
136 * @pgsz_bitmap: bitmap of HW supported page sizes
139 * This helper is intended for HW that support multiple page
140 * sizes but can do only a single page size in an MR.
142 * Returns 0 if the umem requires page sizes not supported by
143 * the driver to be mapped. Drivers always supporting PAGE_SIZE
144 * or smaller will never see a 0 result.
146 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
147 unsigned long pgsz_bitmap,
150 struct scatterlist *sg;
151 unsigned int best_pg_bit;
152 unsigned long va, pgoff;
156 /* At minimum, drivers must support PAGE_SIZE or smaller */
157 if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
161 /* max page size not to exceed MR length */
162 mask = roundup_pow_of_two(umem->length);
163 /* offset into first SGL */
164 pgoff = umem->address & ~PAGE_MASK;
166 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
167 /* Walk SGL and reduce max page size if VA/PA bits differ
170 mask |= (sg_dma_address(sg) + pgoff) ^ va;
171 if (i && i != (umem->nmap - 1))
172 /* restrict by length as well for interior SGEs */
173 mask |= sg_dma_len(sg);
174 va += sg_dma_len(sg) - pgoff;
177 best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
179 return BIT_ULL(best_pg_bit);
181 EXPORT_SYMBOL(ib_umem_find_best_pgsz);
184 * ib_umem_get - Pin and DMA map userspace memory.
186 * If access flags indicate ODP memory, avoid pinning. Instead, stores
187 * the mm for future page fault handling in conjunction with MMU notifiers.
189 * @udata: userspace context to pin memory for
190 * @addr: userspace virtual address to start at
191 * @size: length of region to pin
192 * @access: IB_ACCESS_xxx flags for memory being pinned
193 * @dmasync: flush in-flight DMA when the memory region is written
195 struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
196 size_t size, int access, int dmasync)
198 struct ib_ucontext *context;
199 struct ib_umem *umem;
200 struct page **page_list;
201 unsigned long lock_limit;
202 unsigned long new_pinned;
203 unsigned long cur_base;
204 struct mm_struct *mm;
205 unsigned long npages;
207 unsigned long dma_attrs = 0;
208 struct scatterlist *sg;
209 unsigned int gup_flags = FOLL_WRITE;
212 return ERR_PTR(-EIO);
214 context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
217 return ERR_PTR(-EIO);
220 dma_attrs |= DMA_ATTR_WRITE_BARRIER;
223 * If the combination of the addr and size requested for this memory
224 * region causes an integer overflow, return error.
226 if (((addr + size) < addr) ||
227 PAGE_ALIGN(addr + size) < (addr + size))
228 return ERR_PTR(-EINVAL);
231 return ERR_PTR(-EPERM);
233 if (access & IB_ACCESS_ON_DEMAND) {
234 umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
236 return ERR_PTR(-ENOMEM);
239 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
241 return ERR_PTR(-ENOMEM);
244 umem->context = context;
246 umem->address = addr;
247 umem->page_shift = PAGE_SHIFT;
248 umem->writable = ib_access_writable(access);
249 umem->owning_mm = mm = current->mm;
252 if (access & IB_ACCESS_ON_DEMAND) {
253 if (WARN_ON_ONCE(!context->invalidate_range)) {
258 ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
264 page_list = (struct page **) __get_free_page(GFP_KERNEL);
270 npages = ib_umem_num_pages(umem);
271 if (npages == 0 || npages > UINT_MAX) {
276 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
278 new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
279 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
280 atomic64_sub(npages, &mm->pinned_vm);
285 cur_base = addr & PAGE_MASK;
287 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
292 gup_flags |= FOLL_FORCE;
294 sg = umem->sg_head.sgl;
297 down_read(&mm->mmap_sem);
298 ret = get_user_pages(cur_base,
299 min_t(unsigned long, npages,
300 PAGE_SIZE / sizeof (struct page *)),
301 gup_flags | FOLL_LONGTERM,
304 up_read(&mm->mmap_sem);
308 cur_base += ret * PAGE_SIZE;
311 sg = ib_umem_add_sg_table(sg, page_list, ret,
312 dma_get_max_seg_size(context->device->dma_device),
315 up_read(&mm->mmap_sem);
320 umem->nmap = ib_dma_map_sg_attrs(context->device,
335 __ib_umem_release(context->device, umem, 0);
337 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
339 free_page((unsigned long) page_list);
342 mmdrop(umem->owning_mm);
345 return ret ? ERR_PTR(ret) : umem;
347 EXPORT_SYMBOL(ib_umem_get);
349 static void __ib_umem_release_tail(struct ib_umem *umem)
351 mmdrop(umem->owning_mm);
353 kfree(to_ib_umem_odp(umem));
359 * ib_umem_release - release memory pinned with ib_umem_get
360 * @umem: umem struct to release
362 void ib_umem_release(struct ib_umem *umem)
365 ib_umem_odp_release(to_ib_umem_odp(umem));
366 __ib_umem_release_tail(umem);
370 __ib_umem_release(umem->context->device, umem, 1);
372 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
373 __ib_umem_release_tail(umem);
375 EXPORT_SYMBOL(ib_umem_release);
377 int ib_umem_page_count(struct ib_umem *umem)
381 struct scatterlist *sg;
384 return ib_umem_num_pages(umem);
387 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
388 n += sg_dma_len(sg) >> umem->page_shift;
392 EXPORT_SYMBOL(ib_umem_page_count);
395 * Copy from the given ib_umem's pages to the given buffer.
397 * umem - the umem to copy from
398 * offset - offset to start copying from
399 * dst - destination buffer
400 * length - buffer length
402 * Returns 0 on success, or an error code.
404 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
407 size_t end = offset + length;
410 if (offset > umem->length || length > umem->length - offset) {
411 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
412 offset, umem->length, end);
416 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
417 offset + ib_umem_offset(umem));
421 else if (ret != length)
426 EXPORT_SYMBOL(ib_umem_copy_from);