]>
Commit | Line | Data |
---|---|---|
8cdd312c | 1 | /* |
6cf0a15f | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
8cdd312c HE |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
7bdf65d4 HE |
33 | #include <rdma/ib_umem.h> |
34 | #include <rdma/ib_umem_odp.h> | |
e980b441 | 35 | #include <linux/kernel.h> |
7bdf65d4 | 36 | |
8cdd312c | 37 | #include "mlx5_ib.h" |
81713d37 | 38 | #include "cmd.h" |
8cdd312c | 39 | |
eab668a6 HE |
40 | #define MAX_PREFETCH_LEN (4*1024*1024U) |
41 | ||
b4cfe447 HE |
42 | /* Timeout in ms to wait for an active mmu notifier to complete when handling |
43 | * a pagefault. */ | |
44 | #define MMU_NOTIFIER_TIMEOUT 1000 | |
45 | ||
81713d37 AK |
46 | #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT) |
47 | #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT) | |
48 | #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS) | |
49 | #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT) | |
50 | #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1)) | |
51 | ||
52 | #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT | |
53 | ||
54 | static u64 mlx5_imr_ksm_entries; | |
55 | ||
56 | static int check_parent(struct ib_umem_odp *odp, | |
57 | struct mlx5_ib_mr *parent) | |
58 | { | |
59 | struct mlx5_ib_mr *mr = odp->private; | |
60 | ||
523791d7 | 61 | return mr && mr->parent == parent && !odp->dying; |
81713d37 AK |
62 | } |
63 | ||
c9990ab3 JG |
64 | struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr) |
65 | { | |
66 | if (WARN_ON(!mr || !mr->umem || !mr->umem->is_odp)) | |
67 | return NULL; | |
68 | ||
69 | return to_ib_umem_odp(mr->umem)->per_mm; | |
70 | } | |
71 | ||
81713d37 AK |
72 | static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp) |
73 | { | |
74 | struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent; | |
c9990ab3 | 75 | struct ib_ucontext_per_mm *per_mm = odp->per_mm; |
81713d37 AK |
76 | struct rb_node *rb; |
77 | ||
c9990ab3 | 78 | down_read(&per_mm->umem_rwsem); |
81713d37 AK |
79 | while (1) { |
80 | rb = rb_next(&odp->interval_tree.rb); | |
81 | if (!rb) | |
82 | goto not_found; | |
83 | odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb); | |
84 | if (check_parent(odp, parent)) | |
85 | goto end; | |
86 | } | |
87 | not_found: | |
88 | odp = NULL; | |
89 | end: | |
c9990ab3 | 90 | up_read(&per_mm->umem_rwsem); |
81713d37 AK |
91 | return odp; |
92 | } | |
93 | ||
c9990ab3 | 94 | static struct ib_umem_odp *odp_lookup(u64 start, u64 length, |
81713d37 AK |
95 | struct mlx5_ib_mr *parent) |
96 | { | |
c9990ab3 | 97 | struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent); |
81713d37 AK |
98 | struct ib_umem_odp *odp; |
99 | struct rb_node *rb; | |
100 | ||
c9990ab3 JG |
101 | down_read(&per_mm->umem_rwsem); |
102 | odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length); | |
81713d37 AK |
103 | if (!odp) |
104 | goto end; | |
105 | ||
106 | while (1) { | |
107 | if (check_parent(odp, parent)) | |
108 | goto end; | |
109 | rb = rb_next(&odp->interval_tree.rb); | |
110 | if (!rb) | |
111 | goto not_found; | |
112 | odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb); | |
41b4deea | 113 | if (ib_umem_start(&odp->umem) > start + length) |
81713d37 AK |
114 | goto not_found; |
115 | } | |
116 | not_found: | |
117 | odp = NULL; | |
118 | end: | |
c9990ab3 | 119 | up_read(&per_mm->umem_rwsem); |
81713d37 AK |
120 | return odp; |
121 | } | |
122 | ||
123 | void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, | |
124 | size_t nentries, struct mlx5_ib_mr *mr, int flags) | |
125 | { | |
126 | struct ib_pd *pd = mr->ibmr.pd; | |
81713d37 AK |
127 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
128 | struct ib_umem_odp *odp; | |
129 | unsigned long va; | |
130 | int i; | |
131 | ||
132 | if (flags & MLX5_IB_UPD_XLT_ZAP) { | |
133 | for (i = 0; i < nentries; i++, pklm++) { | |
134 | pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); | |
135 | pklm->key = cpu_to_be32(dev->null_mkey); | |
136 | pklm->va = 0; | |
137 | } | |
138 | return; | |
139 | } | |
140 | ||
c9990ab3 JG |
141 | odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE, |
142 | nentries * MLX5_IMR_MTT_SIZE, mr); | |
81713d37 AK |
143 | |
144 | for (i = 0; i < nentries; i++, pklm++) { | |
145 | pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); | |
146 | va = (offset + i) * MLX5_IMR_MTT_SIZE; | |
41b4deea | 147 | if (odp && odp->umem.address == va) { |
81713d37 AK |
148 | struct mlx5_ib_mr *mtt = odp->private; |
149 | ||
150 | pklm->key = cpu_to_be32(mtt->ibmr.lkey); | |
151 | odp = odp_next(odp); | |
152 | } else { | |
153 | pklm->key = cpu_to_be32(dev->null_mkey); | |
154 | } | |
155 | mlx5_ib_dbg(dev, "[%d] va %lx key %x\n", | |
156 | i, va, be32_to_cpu(pklm->key)); | |
157 | } | |
158 | } | |
159 | ||
160 | static void mr_leaf_free_action(struct work_struct *work) | |
161 | { | |
162 | struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work); | |
41b4deea | 163 | int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT; |
81713d37 AK |
164 | struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent; |
165 | ||
166 | mr->parent = NULL; | |
167 | synchronize_srcu(&mr->dev->mr_srcu); | |
168 | ||
41b4deea | 169 | ib_umem_release(&odp->umem); |
81713d37 AK |
170 | if (imr->live) |
171 | mlx5_ib_update_xlt(imr, idx, 1, 0, | |
172 | MLX5_IB_UPD_XLT_INDIRECT | | |
173 | MLX5_IB_UPD_XLT_ATOMIC); | |
174 | mlx5_mr_cache_free(mr->dev, mr); | |
175 | ||
176 | if (atomic_dec_and_test(&imr->num_leaf_free)) | |
177 | wake_up(&imr->q_leaf_free); | |
178 | } | |
179 | ||
b5231b01 | 180 | void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, |
b4cfe447 HE |
181 | unsigned long end) |
182 | { | |
183 | struct mlx5_ib_mr *mr; | |
31616255 AK |
184 | const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / |
185 | sizeof(struct mlx5_mtt)) - 1; | |
b4cfe447 | 186 | u64 idx = 0, blk_start_idx = 0; |
b5231b01 | 187 | struct ib_umem *umem; |
b4cfe447 HE |
188 | int in_block = 0; |
189 | u64 addr; | |
190 | ||
b5231b01 | 191 | if (!umem_odp) { |
b4cfe447 HE |
192 | pr_err("invalidation called on NULL umem or non-ODP umem\n"); |
193 | return; | |
194 | } | |
41b4deea | 195 | umem = &umem_odp->umem; |
b4cfe447 | 196 | |
b5231b01 | 197 | mr = umem_odp->private; |
b4cfe447 HE |
198 | |
199 | if (!mr || !mr->ibmr.pd) | |
200 | return; | |
201 | ||
202 | start = max_t(u64, ib_umem_start(umem), start); | |
203 | end = min_t(u64, ib_umem_end(umem), end); | |
204 | ||
205 | /* | |
206 | * Iteration one - zap the HW's MTTs. The notifiers_count ensures that | |
207 | * while we are doing the invalidation, no page fault will attempt to | |
208 | * overwrite the same MTTs. Concurent invalidations might race us, | |
209 | * but they will write 0s as well, so no difference in the end result. | |
210 | */ | |
211 | ||
3e7e1193 | 212 | for (addr = start; addr < end; addr += BIT(umem->page_shift)) { |
b2ac9188 | 213 | idx = (addr - ib_umem_start(umem)) >> umem->page_shift; |
b4cfe447 HE |
214 | /* |
215 | * Strive to write the MTTs in chunks, but avoid overwriting | |
216 | * non-existing MTTs. The huristic here can be improved to | |
217 | * estimate the cost of another UMR vs. the cost of bigger | |
218 | * UMR. | |
219 | */ | |
b5231b01 | 220 | if (umem_odp->dma_list[idx] & |
b4cfe447 HE |
221 | (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) { |
222 | if (!in_block) { | |
223 | blk_start_idx = idx; | |
224 | in_block = 1; | |
225 | } | |
226 | } else { | |
227 | u64 umr_offset = idx & umr_block_mask; | |
228 | ||
229 | if (in_block && umr_offset == 0) { | |
7d0cc6ed | 230 | mlx5_ib_update_xlt(mr, blk_start_idx, |
b2ac9188 | 231 | idx - blk_start_idx, 0, |
7d0cc6ed AK |
232 | MLX5_IB_UPD_XLT_ZAP | |
233 | MLX5_IB_UPD_XLT_ATOMIC); | |
b4cfe447 HE |
234 | in_block = 0; |
235 | } | |
236 | } | |
237 | } | |
238 | if (in_block) | |
7d0cc6ed | 239 | mlx5_ib_update_xlt(mr, blk_start_idx, |
b2ac9188 | 240 | idx - blk_start_idx + 1, 0, |
7d0cc6ed AK |
241 | MLX5_IB_UPD_XLT_ZAP | |
242 | MLX5_IB_UPD_XLT_ATOMIC); | |
b4cfe447 HE |
243 | /* |
244 | * We are now sure that the device will not access the | |
245 | * memory. We can safely unmap it, and mark it as dirty if | |
246 | * needed. | |
247 | */ | |
248 | ||
b5231b01 | 249 | ib_umem_odp_unmap_dma_pages(umem_odp, start, end); |
81713d37 AK |
250 | |
251 | if (unlikely(!umem->npages && mr->parent && | |
b5231b01 JG |
252 | !umem_odp->dying)) { |
253 | WRITE_ONCE(umem_odp->dying, 1); | |
81713d37 | 254 | atomic_inc(&mr->parent->num_leaf_free); |
b5231b01 | 255 | schedule_work(&umem_odp->work); |
81713d37 | 256 | } |
b4cfe447 HE |
257 | } |
258 | ||
938fe83c | 259 | void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) |
8cdd312c | 260 | { |
8cdd312c HE |
261 | struct ib_odp_caps *caps = &dev->odp_caps; |
262 | ||
263 | memset(caps, 0, sizeof(*caps)); | |
264 | ||
938fe83c SM |
265 | if (!MLX5_CAP_GEN(dev->mdev, pg)) |
266 | return; | |
8cdd312c | 267 | |
b4cfe447 | 268 | caps->general_caps = IB_ODP_SUPPORT; |
938fe83c | 269 | |
c438fde1 AK |
270 | if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) |
271 | dev->odp_max_size = U64_MAX; | |
272 | else | |
273 | dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT); | |
274 | ||
938fe83c SM |
275 | if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send)) |
276 | caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; | |
277 | ||
278 | if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send)) | |
279 | caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND; | |
280 | ||
281 | if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive)) | |
282 | caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV; | |
283 | ||
284 | if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write)) | |
285 | caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE; | |
286 | ||
287 | if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read)) | |
288 | caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; | |
289 | ||
17d2f88f AK |
290 | if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic)) |
291 | caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; | |
292 | ||
81713d37 AK |
293 | if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && |
294 | MLX5_CAP_GEN(dev->mdev, null_mkey) && | |
295 | MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) | |
296 | caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; | |
297 | ||
938fe83c | 298 | return; |
8cdd312c | 299 | } |
6aec21f6 | 300 | |
d9aaed83 AK |
301 | static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev, |
302 | struct mlx5_pagefault *pfault, | |
19098df2 | 303 | int error) |
304 | { | |
d9aaed83 AK |
305 | int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ? |
306 | pfault->wqe.wq_num : pfault->token; | |
19098df2 | 307 | int ret = mlx5_core_page_fault_resume(dev->mdev, |
d9aaed83 AK |
308 | pfault->token, |
309 | wq_num, | |
310 | pfault->type, | |
6aec21f6 HE |
311 | error); |
312 | if (ret) | |
d9aaed83 AK |
313 | mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n", |
314 | wq_num); | |
6aec21f6 HE |
315 | } |
316 | ||
81713d37 AK |
317 | static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd, |
318 | struct ib_umem *umem, | |
319 | bool ksm, int access_flags) | |
320 | { | |
321 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
322 | struct mlx5_ib_mr *mr; | |
323 | int err; | |
324 | ||
325 | mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY : | |
326 | MLX5_IMR_MTT_CACHE_ENTRY); | |
327 | ||
328 | if (IS_ERR(mr)) | |
329 | return mr; | |
330 | ||
331 | mr->ibmr.pd = pd; | |
332 | ||
333 | mr->dev = dev; | |
334 | mr->access_flags = access_flags; | |
335 | mr->mmkey.iova = 0; | |
336 | mr->umem = umem; | |
337 | ||
338 | if (ksm) { | |
339 | err = mlx5_ib_update_xlt(mr, 0, | |
340 | mlx5_imr_ksm_entries, | |
341 | MLX5_KSM_PAGE_SHIFT, | |
342 | MLX5_IB_UPD_XLT_INDIRECT | | |
343 | MLX5_IB_UPD_XLT_ZAP | | |
344 | MLX5_IB_UPD_XLT_ENABLE); | |
345 | ||
346 | } else { | |
347 | err = mlx5_ib_update_xlt(mr, 0, | |
348 | MLX5_IMR_MTT_ENTRIES, | |
349 | PAGE_SHIFT, | |
350 | MLX5_IB_UPD_XLT_ZAP | | |
351 | MLX5_IB_UPD_XLT_ENABLE | | |
352 | MLX5_IB_UPD_XLT_ATOMIC); | |
353 | } | |
354 | ||
355 | if (err) | |
356 | goto fail; | |
357 | ||
358 | mr->ibmr.lkey = mr->mmkey.key; | |
359 | mr->ibmr.rkey = mr->mmkey.key; | |
360 | ||
361 | mr->live = 1; | |
362 | ||
363 | mlx5_ib_dbg(dev, "key %x dev %p mr %p\n", | |
364 | mr->mmkey.key, dev->mdev, mr); | |
365 | ||
366 | return mr; | |
367 | ||
368 | fail: | |
369 | mlx5_ib_err(dev, "Failed to register MKEY %d\n", err); | |
370 | mlx5_mr_cache_free(dev, mr); | |
371 | ||
372 | return ERR_PTR(err); | |
373 | } | |
374 | ||
375 | static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr, | |
376 | u64 io_virt, size_t bcnt) | |
377 | { | |
81713d37 AK |
378 | struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device); |
379 | struct ib_umem_odp *odp, *result = NULL; | |
597ecc5a | 380 | struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); |
81713d37 AK |
381 | u64 addr = io_virt & MLX5_IMR_MTT_MASK; |
382 | int nentries = 0, start_idx = 0, ret; | |
383 | struct mlx5_ib_mr *mtt; | |
81713d37 | 384 | |
597ecc5a | 385 | mutex_lock(&odp_mr->umem_mutex); |
c9990ab3 | 386 | odp = odp_lookup(addr, 1, mr); |
81713d37 AK |
387 | |
388 | mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n", | |
389 | io_virt, bcnt, addr, odp); | |
390 | ||
391 | next_mr: | |
392 | if (likely(odp)) { | |
393 | if (nentries) | |
394 | nentries++; | |
395 | } else { | |
f27a0d50 | 396 | odp = ib_alloc_odp_umem(odp_mr->per_mm, addr, |
c9990ab3 | 397 | MLX5_IMR_MTT_SIZE); |
b5231b01 | 398 | if (IS_ERR(odp)) { |
597ecc5a | 399 | mutex_unlock(&odp_mr->umem_mutex); |
b5231b01 | 400 | return ERR_CAST(odp); |
81713d37 AK |
401 | } |
402 | ||
41b4deea | 403 | mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0, |
b5231b01 | 404 | mr->access_flags); |
81713d37 | 405 | if (IS_ERR(mtt)) { |
597ecc5a | 406 | mutex_unlock(&odp_mr->umem_mutex); |
41b4deea | 407 | ib_umem_release(&odp->umem); |
81713d37 AK |
408 | return ERR_CAST(mtt); |
409 | } | |
410 | ||
81713d37 | 411 | odp->private = mtt; |
41b4deea | 412 | mtt->umem = &odp->umem; |
81713d37 AK |
413 | mtt->mmkey.iova = addr; |
414 | mtt->parent = mr; | |
415 | INIT_WORK(&odp->work, mr_leaf_free_action); | |
416 | ||
417 | if (!nentries) | |
418 | start_idx = addr >> MLX5_IMR_MTT_SHIFT; | |
419 | nentries++; | |
420 | } | |
421 | ||
81713d37 AK |
422 | /* Return first odp if region not covered by single one */ |
423 | if (likely(!result)) | |
424 | result = odp; | |
425 | ||
426 | addr += MLX5_IMR_MTT_SIZE; | |
427 | if (unlikely(addr < io_virt + bcnt)) { | |
428 | odp = odp_next(odp); | |
41b4deea | 429 | if (odp && odp->umem.address != addr) |
81713d37 AK |
430 | odp = NULL; |
431 | goto next_mr; | |
432 | } | |
433 | ||
434 | if (unlikely(nentries)) { | |
435 | ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0, | |
436 | MLX5_IB_UPD_XLT_INDIRECT | | |
437 | MLX5_IB_UPD_XLT_ATOMIC); | |
438 | if (ret) { | |
439 | mlx5_ib_err(dev, "Failed to update PAS\n"); | |
440 | result = ERR_PTR(ret); | |
441 | } | |
442 | } | |
443 | ||
597ecc5a | 444 | mutex_unlock(&odp_mr->umem_mutex); |
81713d37 AK |
445 | return result; |
446 | } | |
447 | ||
448 | struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, | |
449 | int access_flags) | |
450 | { | |
451 | struct ib_ucontext *ctx = pd->ibpd.uobject->context; | |
452 | struct mlx5_ib_mr *imr; | |
453 | struct ib_umem *umem; | |
454 | ||
455 | umem = ib_umem_get(ctx, 0, 0, IB_ACCESS_ON_DEMAND, 0); | |
456 | if (IS_ERR(umem)) | |
457 | return ERR_CAST(umem); | |
458 | ||
459 | imr = implicit_mr_alloc(&pd->ibpd, umem, 1, access_flags); | |
460 | if (IS_ERR(imr)) { | |
461 | ib_umem_release(umem); | |
462 | return ERR_CAST(imr); | |
463 | } | |
464 | ||
465 | imr->umem = umem; | |
466 | init_waitqueue_head(&imr->q_leaf_free); | |
467 | atomic_set(&imr->num_leaf_free, 0); | |
468 | ||
469 | return imr; | |
470 | } | |
471 | ||
b5231b01 JG |
472 | static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end, |
473 | void *cookie) | |
81713d37 | 474 | { |
b5231b01 | 475 | struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie; |
41b4deea | 476 | struct ib_umem *umem = &umem_odp->umem; |
81713d37 AK |
477 | |
478 | if (mr->parent != imr) | |
479 | return 0; | |
480 | ||
b5231b01 | 481 | ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem), |
81713d37 AK |
482 | ib_umem_end(umem)); |
483 | ||
b5231b01 | 484 | if (umem_odp->dying) |
81713d37 AK |
485 | return 0; |
486 | ||
b5231b01 | 487 | WRITE_ONCE(umem_odp->dying, 1); |
81713d37 | 488 | atomic_inc(&imr->num_leaf_free); |
b5231b01 | 489 | schedule_work(&umem_odp->work); |
81713d37 AK |
490 | |
491 | return 0; | |
492 | } | |
493 | ||
494 | void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) | |
495 | { | |
c9990ab3 | 496 | struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr); |
81713d37 | 497 | |
c9990ab3 JG |
498 | down_read(&per_mm->umem_rwsem); |
499 | rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX, | |
93065ac7 | 500 | mr_leaf_free, true, imr); |
c9990ab3 | 501 | up_read(&per_mm->umem_rwsem); |
81713d37 AK |
502 | |
503 | wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free)); | |
504 | } | |
505 | ||
1b7dbc26 AK |
506 | static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, |
507 | u64 io_virt, size_t bcnt, u32 *bytes_mapped) | |
7bdf65d4 | 508 | { |
597ecc5a | 509 | struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); |
7bdf65d4 | 510 | u64 access_mask = ODP_READ_ALLOWED_BIT; |
1b7dbc26 AK |
511 | int npages = 0, page_shift, np; |
512 | u64 start_idx, page_mask; | |
81713d37 | 513 | struct ib_umem_odp *odp; |
1b7dbc26 | 514 | int current_seq; |
81713d37 | 515 | size_t size; |
1b7dbc26 | 516 | int ret; |
7bdf65d4 | 517 | |
597ecc5a | 518 | if (!odp_mr->page_list) { |
81713d37 AK |
519 | odp = implicit_mr_get_data(mr, io_virt, bcnt); |
520 | ||
1b7dbc26 AK |
521 | if (IS_ERR(odp)) |
522 | return PTR_ERR(odp); | |
81713d37 | 523 | mr = odp->private; |
81713d37 AK |
524 | |
525 | } else { | |
597ecc5a | 526 | odp = odp_mr; |
81713d37 AK |
527 | } |
528 | ||
1b7dbc26 | 529 | next_mr: |
41b4deea | 530 | size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt); |
1b7dbc26 | 531 | |
b2ac9188 AK |
532 | page_shift = mr->umem->page_shift; |
533 | page_mask = ~(BIT(page_shift) - 1); | |
1b7dbc26 AK |
534 | start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; |
535 | ||
536 | if (mr->umem->writable) | |
537 | access_mask |= ODP_WRITE_ALLOWED_BIT; | |
b2ac9188 | 538 | |
81713d37 AK |
539 | current_seq = READ_ONCE(odp->notifiers_seq); |
540 | /* | |
541 | * Ensure the sequence number is valid for some time before we call | |
542 | * gup. | |
543 | */ | |
544 | smp_rmb(); | |
545 | ||
b5231b01 | 546 | ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size, |
81713d37 AK |
547 | access_mask, current_seq); |
548 | ||
549 | if (ret < 0) | |
1b7dbc26 | 550 | goto out; |
7bdf65d4 | 551 | |
1b7dbc26 AK |
552 | np = ret; |
553 | ||
554 | mutex_lock(&odp->umem_mutex); | |
b5231b01 JG |
555 | if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem), |
556 | current_seq)) { | |
1b7dbc26 AK |
557 | /* |
558 | * No need to check whether the MTTs really belong to | |
559 | * this MR, since ib_umem_odp_map_dma_pages already | |
560 | * checks this. | |
561 | */ | |
562 | ret = mlx5_ib_update_xlt(mr, start_idx, np, | |
563 | page_shift, MLX5_IB_UPD_XLT_ATOMIC); | |
564 | } else { | |
565 | ret = -EAGAIN; | |
566 | } | |
567 | mutex_unlock(&odp->umem_mutex); | |
568 | ||
569 | if (ret < 0) { | |
570 | if (ret != -EAGAIN) | |
571 | mlx5_ib_err(dev, "Failed to update mkey page tables\n"); | |
572 | goto out; | |
573 | } | |
81713d37 | 574 | |
1b7dbc26 AK |
575 | if (bytes_mapped) { |
576 | u32 new_mappings = (np << page_shift) - | |
577 | (io_virt - round_down(io_virt, 1 << page_shift)); | |
578 | *bytes_mapped += min_t(u32, new_mappings, size); | |
81713d37 AK |
579 | } |
580 | ||
1b7dbc26 | 581 | npages += np << (page_shift - PAGE_SHIFT); |
81713d37 | 582 | bcnt -= size; |
1b7dbc26 | 583 | |
81713d37 AK |
584 | if (unlikely(bcnt)) { |
585 | struct ib_umem_odp *next; | |
586 | ||
587 | io_virt += size; | |
588 | next = odp_next(odp); | |
41b4deea | 589 | if (unlikely(!next || next->umem.address != io_virt)) { |
81713d37 AK |
590 | mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n", |
591 | io_virt, next); | |
1b7dbc26 | 592 | return -EAGAIN; |
81713d37 AK |
593 | } |
594 | odp = next; | |
595 | mr = odp->private; | |
596 | goto next_mr; | |
7bdf65d4 HE |
597 | } |
598 | ||
1b7dbc26 AK |
599 | return npages; |
600 | ||
601 | out: | |
b4cfe447 | 602 | if (ret == -EAGAIN) { |
1b7dbc26 | 603 | if (mr->parent || !odp->dying) { |
b4cfe447 HE |
604 | unsigned long timeout = |
605 | msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); | |
606 | ||
607 | if (!wait_for_completion_timeout( | |
81713d37 | 608 | &odp->notifier_completion, |
b4cfe447 | 609 | timeout)) { |
81713d37 AK |
610 | mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d\n", |
611 | current_seq, odp->notifiers_seq); | |
b4cfe447 HE |
612 | } |
613 | } else { | |
614 | /* The MR is being killed, kill the QP as well. */ | |
615 | ret = -EFAULT; | |
616 | } | |
617 | } | |
81713d37 | 618 | |
1b7dbc26 AK |
619 | return ret; |
620 | } | |
621 | ||
db570d7d AK |
622 | struct pf_frame { |
623 | struct pf_frame *next; | |
624 | u32 key; | |
625 | u64 io_virt; | |
626 | size_t bcnt; | |
627 | int depth; | |
628 | }; | |
629 | ||
1b7dbc26 AK |
630 | /* |
631 | * Handle a single data segment in a page-fault WQE or RDMA region. | |
632 | * | |
633 | * Returns number of OS pages retrieved on success. The caller may continue to | |
634 | * the next data segment. | |
635 | * Can return the following error codes: | |
636 | * -EAGAIN to designate a temporary error. The caller will abort handling the | |
637 | * page fault and resolve it. | |
638 | * -EFAULT when there's an error mapping the requested pages. The caller will | |
639 | * abort the page fault handling. | |
640 | */ | |
641 | static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, | |
642 | u32 key, u64 io_virt, size_t bcnt, | |
643 | u32 *bytes_committed, | |
644 | u32 *bytes_mapped) | |
645 | { | |
db570d7d AK |
646 | int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0; |
647 | struct pf_frame *head = NULL, *frame; | |
648 | struct mlx5_core_mkey *mmkey; | |
649 | struct mlx5_ib_mw *mw; | |
1b7dbc26 | 650 | struct mlx5_ib_mr *mr; |
db570d7d AK |
651 | struct mlx5_klm *pklm; |
652 | u32 *out = NULL; | |
653 | size_t offset; | |
1b7dbc26 AK |
654 | |
655 | srcu_key = srcu_read_lock(&dev->mr_srcu); | |
db570d7d AK |
656 | |
657 | io_virt += *bytes_committed; | |
658 | bcnt -= *bytes_committed; | |
659 | ||
660 | next_mr: | |
661 | mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key)); | |
662 | if (!mmkey || mmkey->key != key) { | |
663 | mlx5_ib_dbg(dev, "failed to find mkey %x\n", key); | |
1b7dbc26 AK |
664 | ret = -EFAULT; |
665 | goto srcu_unlock; | |
666 | } | |
db570d7d AK |
667 | |
668 | switch (mmkey->type) { | |
669 | case MLX5_MKEY_MR: | |
670 | mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); | |
671 | if (!mr->live || !mr->ibmr.pd) { | |
672 | mlx5_ib_dbg(dev, "got dead MR\n"); | |
673 | ret = -EFAULT; | |
674 | goto srcu_unlock; | |
675 | } | |
676 | ||
4d5422a3 AK |
677 | if (!mr->umem->is_odp) { |
678 | mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", | |
679 | key); | |
680 | if (bytes_mapped) | |
681 | *bytes_mapped += bcnt; | |
7bca603a | 682 | ret = 0; |
4d5422a3 AK |
683 | goto srcu_unlock; |
684 | } | |
685 | ||
db570d7d AK |
686 | ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped); |
687 | if (ret < 0) | |
688 | goto srcu_unlock; | |
689 | ||
690 | npages += ret; | |
691 | ret = 0; | |
692 | break; | |
693 | ||
694 | case MLX5_MKEY_MW: | |
695 | mw = container_of(mmkey, struct mlx5_ib_mw, mmkey); | |
696 | ||
697 | if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { | |
698 | mlx5_ib_dbg(dev, "indirection level exceeded\n"); | |
699 | ret = -EFAULT; | |
700 | goto srcu_unlock; | |
701 | } | |
702 | ||
703 | outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + | |
704 | sizeof(*pklm) * (mw->ndescs - 2); | |
705 | ||
706 | if (outlen > cur_outlen) { | |
707 | kfree(out); | |
708 | out = kzalloc(outlen, GFP_KERNEL); | |
709 | if (!out) { | |
710 | ret = -ENOMEM; | |
711 | goto srcu_unlock; | |
712 | } | |
713 | cur_outlen = outlen; | |
714 | } | |
715 | ||
716 | pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, | |
717 | bsf0_klm0_pas_mtt0_1); | |
718 | ||
719 | ret = mlx5_core_query_mkey(dev->mdev, &mw->mmkey, out, outlen); | |
720 | if (ret) | |
721 | goto srcu_unlock; | |
722 | ||
723 | offset = io_virt - MLX5_GET64(query_mkey_out, out, | |
724 | memory_key_mkey_entry.start_addr); | |
725 | ||
726 | for (i = 0; bcnt && i < mw->ndescs; i++, pklm++) { | |
727 | if (offset >= be32_to_cpu(pklm->bcount)) { | |
728 | offset -= be32_to_cpu(pklm->bcount); | |
729 | continue; | |
730 | } | |
731 | ||
732 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); | |
733 | if (!frame) { | |
734 | ret = -ENOMEM; | |
735 | goto srcu_unlock; | |
736 | } | |
737 | ||
738 | frame->key = be32_to_cpu(pklm->key); | |
739 | frame->io_virt = be64_to_cpu(pklm->va) + offset; | |
740 | frame->bcnt = min_t(size_t, bcnt, | |
741 | be32_to_cpu(pklm->bcount) - offset); | |
742 | frame->depth = depth + 1; | |
743 | frame->next = head; | |
744 | head = frame; | |
745 | ||
746 | bcnt -= frame->bcnt; | |
75b7b86b | 747 | offset = 0; |
db570d7d AK |
748 | } |
749 | break; | |
750 | ||
751 | default: | |
752 | mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type); | |
753 | ret = -EFAULT; | |
1b7dbc26 AK |
754 | goto srcu_unlock; |
755 | } | |
756 | ||
db570d7d AK |
757 | if (head) { |
758 | frame = head; | |
759 | head = frame->next; | |
760 | ||
761 | key = frame->key; | |
762 | io_virt = frame->io_virt; | |
763 | bcnt = frame->bcnt; | |
764 | depth = frame->depth; | |
765 | kfree(frame); | |
1b7dbc26 | 766 | |
db570d7d AK |
767 | goto next_mr; |
768 | } | |
1b7dbc26 AK |
769 | |
770 | srcu_unlock: | |
db570d7d AK |
771 | while (head) { |
772 | frame = head; | |
773 | head = frame->next; | |
774 | kfree(frame); | |
775 | } | |
776 | kfree(out); | |
777 | ||
81713d37 | 778 | srcu_read_unlock(&dev->mr_srcu, srcu_key); |
d9aaed83 | 779 | *bytes_committed = 0; |
7bdf65d4 HE |
780 | return ret ? ret : npages; |
781 | } | |
782 | ||
783 | /** | |
784 | * Parse a series of data segments for page fault handling. | |
785 | * | |
786 | * @qp the QP on which the fault occurred. | |
787 | * @pfault contains page fault information. | |
788 | * @wqe points at the first data segment in the WQE. | |
789 | * @wqe_end points after the end of the WQE. | |
790 | * @bytes_mapped receives the number of bytes that the function was able to | |
791 | * map. This allows the caller to decide intelligently whether | |
792 | * enough memory was mapped to resolve the page fault | |
793 | * successfully (e.g. enough for the next MTU, or the entire | |
794 | * WQE). | |
795 | * @total_wqe_bytes receives the total data size of this WQE in bytes (minus | |
796 | * the committed bytes). | |
797 | * | |
798 | * Returns the number of pages loaded if positive, zero for an empty WQE, or a | |
799 | * negative error code. | |
800 | */ | |
d9aaed83 AK |
801 | static int pagefault_data_segments(struct mlx5_ib_dev *dev, |
802 | struct mlx5_pagefault *pfault, | |
803 | struct mlx5_ib_qp *qp, void *wqe, | |
7bdf65d4 HE |
804 | void *wqe_end, u32 *bytes_mapped, |
805 | u32 *total_wqe_bytes, int receive_queue) | |
806 | { | |
807 | int ret = 0, npages = 0; | |
808 | u64 io_virt; | |
809 | u32 key; | |
810 | u32 byte_count; | |
811 | size_t bcnt; | |
812 | int inline_segment; | |
813 | ||
814 | /* Skip SRQ next-WQE segment. */ | |
815 | if (receive_queue && qp->ibqp.srq) | |
816 | wqe += sizeof(struct mlx5_wqe_srq_next_seg); | |
817 | ||
818 | if (bytes_mapped) | |
819 | *bytes_mapped = 0; | |
820 | if (total_wqe_bytes) | |
821 | *total_wqe_bytes = 0; | |
822 | ||
823 | while (wqe < wqe_end) { | |
824 | struct mlx5_wqe_data_seg *dseg = wqe; | |
825 | ||
826 | io_virt = be64_to_cpu(dseg->addr); | |
827 | key = be32_to_cpu(dseg->lkey); | |
828 | byte_count = be32_to_cpu(dseg->byte_count); | |
829 | inline_segment = !!(byte_count & MLX5_INLINE_SEG); | |
830 | bcnt = byte_count & ~MLX5_INLINE_SEG; | |
831 | ||
832 | if (inline_segment) { | |
833 | bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK; | |
834 | wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, | |
835 | 16); | |
836 | } else { | |
837 | wqe += sizeof(*dseg); | |
838 | } | |
839 | ||
840 | /* receive WQE end of sg list. */ | |
841 | if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY && | |
842 | io_virt == 0) | |
843 | break; | |
844 | ||
845 | if (!inline_segment && total_wqe_bytes) { | |
846 | *total_wqe_bytes += bcnt - min_t(size_t, bcnt, | |
d9aaed83 | 847 | pfault->bytes_committed); |
7bdf65d4 HE |
848 | } |
849 | ||
850 | /* A zero length data segment designates a length of 2GB. */ | |
851 | if (bcnt == 0) | |
852 | bcnt = 1U << 31; | |
853 | ||
d9aaed83 AK |
854 | if (inline_segment || bcnt <= pfault->bytes_committed) { |
855 | pfault->bytes_committed -= | |
7bdf65d4 | 856 | min_t(size_t, bcnt, |
d9aaed83 | 857 | pfault->bytes_committed); |
7bdf65d4 HE |
858 | continue; |
859 | } | |
860 | ||
d9aaed83 AK |
861 | ret = pagefault_single_data_segment(dev, key, io_virt, bcnt, |
862 | &pfault->bytes_committed, | |
863 | bytes_mapped); | |
7bdf65d4 HE |
864 | if (ret < 0) |
865 | break; | |
866 | npages += ret; | |
867 | } | |
868 | ||
869 | return ret < 0 ? ret : npages; | |
870 | } | |
871 | ||
17d2f88f AK |
872 | static const u32 mlx5_ib_odp_opcode_cap[] = { |
873 | [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND, | |
874 | [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND, | |
875 | [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND, | |
876 | [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE, | |
877 | [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE, | |
878 | [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ, | |
879 | [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC, | |
880 | [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC, | |
881 | }; | |
882 | ||
7bdf65d4 HE |
883 | /* |
884 | * Parse initiator WQE. Advances the wqe pointer to point at the | |
885 | * scatter-gather list, and set wqe_end to the end of the WQE. | |
886 | */ | |
887 | static int mlx5_ib_mr_initiator_pfault_handler( | |
d9aaed83 AK |
888 | struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, |
889 | struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) | |
7bdf65d4 | 890 | { |
7bdf65d4 | 891 | struct mlx5_wqe_ctrl_seg *ctrl = *wqe; |
d9aaed83 | 892 | u16 wqe_index = pfault->wqe.wqe_index; |
17d2f88f AK |
893 | u32 transport_caps; |
894 | struct mlx5_base_av *av; | |
7bdf65d4 HE |
895 | unsigned ds, opcode; |
896 | #if defined(DEBUG) | |
897 | u32 ctrl_wqe_index, ctrl_qpn; | |
898 | #endif | |
19098df2 | 899 | u32 qpn = qp->trans_qp.base.mqp.qpn; |
7bdf65d4 HE |
900 | |
901 | ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; | |
902 | if (ds * MLX5_WQE_DS_UNITS > wqe_length) { | |
903 | mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n", | |
904 | ds, wqe_length); | |
905 | return -EFAULT; | |
906 | } | |
907 | ||
908 | if (ds == 0) { | |
909 | mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n", | |
19098df2 | 910 | wqe_index, qpn); |
7bdf65d4 HE |
911 | return -EFAULT; |
912 | } | |
913 | ||
914 | #if defined(DEBUG) | |
915 | ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) & | |
916 | MLX5_WQE_CTRL_WQE_INDEX_MASK) >> | |
917 | MLX5_WQE_CTRL_WQE_INDEX_SHIFT; | |
918 | if (wqe_index != ctrl_wqe_index) { | |
919 | mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n", | |
19098df2 | 920 | wqe_index, qpn, |
7bdf65d4 HE |
921 | ctrl_wqe_index); |
922 | return -EFAULT; | |
923 | } | |
924 | ||
925 | ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >> | |
926 | MLX5_WQE_CTRL_QPN_SHIFT; | |
19098df2 | 927 | if (qpn != ctrl_qpn) { |
7bdf65d4 | 928 | mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n", |
19098df2 | 929 | wqe_index, qpn, |
7bdf65d4 HE |
930 | ctrl_qpn); |
931 | return -EFAULT; | |
932 | } | |
933 | #endif /* DEBUG */ | |
934 | ||
935 | *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; | |
936 | *wqe += sizeof(*ctrl); | |
937 | ||
938 | opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & | |
939 | MLX5_WQE_CTRL_OPCODE_MASK; | |
17d2f88f | 940 | |
7bdf65d4 HE |
941 | switch (qp->ibqp.qp_type) { |
942 | case IB_QPT_RC: | |
17d2f88f | 943 | transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps; |
7bdf65d4 HE |
944 | break; |
945 | case IB_QPT_UD: | |
17d2f88f | 946 | transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps; |
7bdf65d4 HE |
947 | break; |
948 | default: | |
17d2f88f AK |
949 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n", |
950 | qp->ibqp.qp_type); | |
7bdf65d4 HE |
951 | return -EFAULT; |
952 | } | |
953 | ||
e980b441 JL |
954 | if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) || |
955 | !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { | |
17d2f88f AK |
956 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n", |
957 | opcode); | |
958 | return -EFAULT; | |
959 | } | |
960 | ||
961 | if (qp->ibqp.qp_type != IB_QPT_RC) { | |
962 | av = *wqe; | |
931b3c1a | 963 | if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) |
17d2f88f AK |
964 | *wqe += sizeof(struct mlx5_av); |
965 | else | |
966 | *wqe += sizeof(struct mlx5_base_av); | |
967 | } | |
968 | ||
969 | switch (opcode) { | |
970 | case MLX5_OPCODE_RDMA_WRITE: | |
971 | case MLX5_OPCODE_RDMA_WRITE_IMM: | |
972 | case MLX5_OPCODE_RDMA_READ: | |
973 | *wqe += sizeof(struct mlx5_wqe_raddr_seg); | |
974 | break; | |
975 | case MLX5_OPCODE_ATOMIC_CS: | |
976 | case MLX5_OPCODE_ATOMIC_FA: | |
977 | *wqe += sizeof(struct mlx5_wqe_raddr_seg); | |
978 | *wqe += sizeof(struct mlx5_wqe_atomic_seg); | |
979 | break; | |
980 | } | |
981 | ||
7bdf65d4 HE |
982 | return 0; |
983 | } | |
984 | ||
985 | /* | |
986 | * Parse responder WQE. Advances the wqe pointer to point at the | |
987 | * scatter-gather list, and set wqe_end to the end of the WQE. | |
988 | */ | |
989 | static int mlx5_ib_mr_responder_pfault_handler( | |
d9aaed83 AK |
990 | struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, |
991 | struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) | |
7bdf65d4 | 992 | { |
7bdf65d4 HE |
993 | struct mlx5_ib_wq *wq = &qp->rq; |
994 | int wqe_size = 1 << wq->wqe_shift; | |
995 | ||
996 | if (qp->ibqp.srq) { | |
997 | mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n"); | |
998 | return -EFAULT; | |
999 | } | |
1000 | ||
1001 | if (qp->wq_sig) { | |
1002 | mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n"); | |
1003 | return -EFAULT; | |
1004 | } | |
1005 | ||
1006 | if (wqe_size > wqe_length) { | |
1007 | mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n"); | |
1008 | return -EFAULT; | |
1009 | } | |
1010 | ||
1011 | switch (qp->ibqp.qp_type) { | |
1012 | case IB_QPT_RC: | |
1013 | if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & | |
1014 | IB_ODP_SUPPORT_RECV)) | |
1015 | goto invalid_transport_or_opcode; | |
1016 | break; | |
1017 | default: | |
1018 | invalid_transport_or_opcode: | |
1019 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n", | |
1020 | qp->ibqp.qp_type); | |
1021 | return -EFAULT; | |
1022 | } | |
1023 | ||
1024 | *wqe_end = *wqe + wqe_size; | |
1025 | ||
1026 | return 0; | |
1027 | } | |
1028 | ||
d9aaed83 AK |
1029 | static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev, |
1030 | u32 wq_num) | |
1031 | { | |
1032 | struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num); | |
1033 | ||
1034 | if (!mqp) { | |
1035 | mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num); | |
1036 | return NULL; | |
1037 | } | |
1038 | ||
1039 | return to_mibqp(mqp); | |
1040 | } | |
1041 | ||
1042 | static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, | |
1043 | struct mlx5_pagefault *pfault) | |
7bdf65d4 | 1044 | { |
7bdf65d4 HE |
1045 | int ret; |
1046 | void *wqe, *wqe_end; | |
1047 | u32 bytes_mapped, total_wqe_bytes; | |
1048 | char *buffer = NULL; | |
d9aaed83 AK |
1049 | int resume_with_error = 1; |
1050 | u16 wqe_index = pfault->wqe.wqe_index; | |
1051 | int requestor = pfault->type & MLX5_PFAULT_REQUESTOR; | |
1052 | struct mlx5_ib_qp *qp; | |
7bdf65d4 HE |
1053 | |
1054 | buffer = (char *)__get_free_page(GFP_KERNEL); | |
1055 | if (!buffer) { | |
1056 | mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); | |
7bdf65d4 HE |
1057 | goto resolve_page_fault; |
1058 | } | |
1059 | ||
d9aaed83 AK |
1060 | qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num); |
1061 | if (!qp) | |
1062 | goto resolve_page_fault; | |
1063 | ||
7bdf65d4 | 1064 | ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, |
19098df2 | 1065 | PAGE_SIZE, &qp->trans_qp.base); |
7bdf65d4 | 1066 | if (ret < 0) { |
d9aaed83 AK |
1067 | mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n", |
1068 | ret, wqe_index, pfault->token); | |
7bdf65d4 HE |
1069 | goto resolve_page_fault; |
1070 | } | |
1071 | ||
1072 | wqe = buffer; | |
1073 | if (requestor) | |
d9aaed83 | 1074 | ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe, |
7bdf65d4 HE |
1075 | &wqe_end, ret); |
1076 | else | |
d9aaed83 | 1077 | ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe, |
7bdf65d4 | 1078 | &wqe_end, ret); |
d9aaed83 | 1079 | if (ret < 0) |
7bdf65d4 | 1080 | goto resolve_page_fault; |
7bdf65d4 HE |
1081 | |
1082 | if (wqe >= wqe_end) { | |
1083 | mlx5_ib_err(dev, "ODP fault on invalid WQE.\n"); | |
7bdf65d4 HE |
1084 | goto resolve_page_fault; |
1085 | } | |
1086 | ||
d9aaed83 AK |
1087 | ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end, |
1088 | &bytes_mapped, &total_wqe_bytes, | |
1089 | !requestor); | |
7bdf65d4 | 1090 | if (ret == -EAGAIN) { |
d9aaed83 | 1091 | resume_with_error = 0; |
7bdf65d4 HE |
1092 | goto resolve_page_fault; |
1093 | } else if (ret < 0 || total_wqe_bytes > bytes_mapped) { | |
7bdf65d4 HE |
1094 | goto resolve_page_fault; |
1095 | } | |
1096 | ||
d9aaed83 | 1097 | resume_with_error = 0; |
7bdf65d4 | 1098 | resolve_page_fault: |
d9aaed83 AK |
1099 | mlx5_ib_page_fault_resume(dev, pfault, resume_with_error); |
1100 | mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n", | |
81713d37 | 1101 | pfault->wqe.wq_num, resume_with_error, |
d9aaed83 | 1102 | pfault->type); |
7bdf65d4 HE |
1103 | free_page((unsigned long)buffer); |
1104 | } | |
1105 | ||
eab668a6 HE |
1106 | static int pages_in_range(u64 address, u32 length) |
1107 | { | |
1108 | return (ALIGN(address + length, PAGE_SIZE) - | |
1109 | (address & PAGE_MASK)) >> PAGE_SHIFT; | |
1110 | } | |
1111 | ||
d9aaed83 AK |
1112 | static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, |
1113 | struct mlx5_pagefault *pfault) | |
eab668a6 | 1114 | { |
eab668a6 HE |
1115 | u64 address; |
1116 | u32 length; | |
d9aaed83 | 1117 | u32 prefetch_len = pfault->bytes_committed; |
eab668a6 | 1118 | int prefetch_activated = 0; |
d9aaed83 | 1119 | u32 rkey = pfault->rdma.r_key; |
eab668a6 HE |
1120 | int ret; |
1121 | ||
1122 | /* The RDMA responder handler handles the page fault in two parts. | |
1123 | * First it brings the necessary pages for the current packet | |
1124 | * (and uses the pfault context), and then (after resuming the QP) | |
1125 | * prefetches more pages. The second operation cannot use the pfault | |
1126 | * context and therefore uses the dummy_pfault context allocated on | |
1127 | * the stack */ | |
d9aaed83 AK |
1128 | pfault->rdma.rdma_va += pfault->bytes_committed; |
1129 | pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, | |
1130 | pfault->rdma.rdma_op_len); | |
1131 | pfault->bytes_committed = 0; | |
eab668a6 | 1132 | |
d9aaed83 AK |
1133 | address = pfault->rdma.rdma_va; |
1134 | length = pfault->rdma.rdma_op_len; | |
eab668a6 HE |
1135 | |
1136 | /* For some operations, the hardware cannot tell the exact message | |
1137 | * length, and in those cases it reports zero. Use prefetch | |
1138 | * logic. */ | |
1139 | if (length == 0) { | |
1140 | prefetch_activated = 1; | |
d9aaed83 | 1141 | length = pfault->rdma.packet_size; |
eab668a6 HE |
1142 | prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len); |
1143 | } | |
1144 | ||
d9aaed83 AK |
1145 | ret = pagefault_single_data_segment(dev, rkey, address, length, |
1146 | &pfault->bytes_committed, NULL); | |
eab668a6 HE |
1147 | if (ret == -EAGAIN) { |
1148 | /* We're racing with an invalidation, don't prefetch */ | |
1149 | prefetch_activated = 0; | |
1150 | } else if (ret < 0 || pages_in_range(address, length) > ret) { | |
d9aaed83 AK |
1151 | mlx5_ib_page_fault_resume(dev, pfault, 1); |
1152 | if (ret != -ENOENT) | |
4df4a5ba AK |
1153 | mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n", |
1154 | ret, pfault->token, pfault->type); | |
eab668a6 HE |
1155 | return; |
1156 | } | |
1157 | ||
d9aaed83 AK |
1158 | mlx5_ib_page_fault_resume(dev, pfault, 0); |
1159 | mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n", | |
1160 | pfault->token, pfault->type, | |
1161 | prefetch_activated); | |
eab668a6 HE |
1162 | |
1163 | /* At this point, there might be a new pagefault already arriving in | |
1164 | * the eq, switch to the dummy pagefault for the rest of the | |
1165 | * processing. We're still OK with the objects being alive as the | |
1166 | * work-queue is being fenced. */ | |
1167 | ||
1168 | if (prefetch_activated) { | |
d9aaed83 AK |
1169 | u32 bytes_committed = 0; |
1170 | ||
1171 | ret = pagefault_single_data_segment(dev, rkey, address, | |
eab668a6 | 1172 | prefetch_len, |
d9aaed83 | 1173 | &bytes_committed, NULL); |
81713d37 | 1174 | if (ret < 0 && ret != -EAGAIN) { |
4df4a5ba AK |
1175 | mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n", |
1176 | ret, pfault->token, address, prefetch_len); | |
eab668a6 HE |
1177 | } |
1178 | } | |
1179 | } | |
1180 | ||
d9aaed83 AK |
1181 | void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, |
1182 | struct mlx5_pagefault *pfault) | |
6aec21f6 | 1183 | { |
d9aaed83 AK |
1184 | struct mlx5_ib_dev *dev = context; |
1185 | u8 event_subtype = pfault->event_subtype; | |
6aec21f6 HE |
1186 | |
1187 | switch (event_subtype) { | |
7bdf65d4 | 1188 | case MLX5_PFAULT_SUBTYPE_WQE: |
d9aaed83 | 1189 | mlx5_ib_mr_wqe_pfault_handler(dev, pfault); |
7bdf65d4 | 1190 | break; |
eab668a6 | 1191 | case MLX5_PFAULT_SUBTYPE_RDMA: |
d9aaed83 | 1192 | mlx5_ib_mr_rdma_pfault_handler(dev, pfault); |
eab668a6 | 1193 | break; |
6aec21f6 | 1194 | default: |
d9aaed83 AK |
1195 | mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n", |
1196 | event_subtype); | |
1197 | mlx5_ib_page_fault_resume(dev, pfault, 1); | |
6aec21f6 HE |
1198 | } |
1199 | } | |
1200 | ||
81713d37 AK |
1201 | void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) |
1202 | { | |
1203 | if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) | |
1204 | return; | |
1205 | ||
1206 | switch (ent->order - 2) { | |
1207 | case MLX5_IMR_MTT_CACHE_ENTRY: | |
1208 | ent->page = PAGE_SHIFT; | |
1209 | ent->xlt = MLX5_IMR_MTT_ENTRIES * | |
1210 | sizeof(struct mlx5_mtt) / | |
1211 | MLX5_IB_UMR_OCTOWORD; | |
1212 | ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; | |
1213 | ent->limit = 0; | |
1214 | break; | |
1215 | ||
1216 | case MLX5_IMR_KSM_CACHE_ENTRY: | |
1217 | ent->page = MLX5_KSM_PAGE_SHIFT; | |
1218 | ent->xlt = mlx5_imr_ksm_entries * | |
1219 | sizeof(struct mlx5_klm) / | |
1220 | MLX5_IB_UMR_OCTOWORD; | |
1221 | ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM; | |
1222 | ent->limit = 0; | |
1223 | break; | |
1224 | } | |
1225 | } | |
1226 | ||
1227 | int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) | |
6aec21f6 HE |
1228 | { |
1229 | int ret; | |
1230 | ||
81713d37 AK |
1231 | if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { |
1232 | ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); | |
1233 | if (ret) { | |
1234 | mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret); | |
1235 | return ret; | |
1236 | } | |
1237 | } | |
1238 | ||
6aec21f6 HE |
1239 | return 0; |
1240 | } | |
1241 | ||
81713d37 | 1242 | int mlx5_ib_odp_init(void) |
6aec21f6 | 1243 | { |
81713d37 AK |
1244 | mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) - |
1245 | MLX5_IMR_MTT_BITS); | |
1246 | ||
1247 | return 0; | |
6aec21f6 HE |
1248 | } |
1249 |