]>
Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
51a379d0 | 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
4 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
225c7b1f | 35 | #include <linux/errno.h> |
ee40fa06 | 36 | #include <linux/export.h> |
5a0e3ad6 | 37 | #include <linux/slab.h> |
ea51b377 | 38 | #include <linux/kernel.h> |
89dd86db | 39 | #include <linux/vmalloc.h> |
225c7b1f RD |
40 | |
41 | #include <linux/mlx4/cmd.h> | |
42 | ||
43 | #include "mlx4.h" | |
44 | #include "icm.h" | |
45 | ||
225c7b1f RD |
46 | static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) |
47 | { | |
48 | int o; | |
49 | int m; | |
50 | u32 seg; | |
51 | ||
52 | spin_lock(&buddy->lock); | |
53 | ||
e4044cfc RD |
54 | for (o = order; o <= buddy->max_order; ++o) |
55 | if (buddy->num_free[o]) { | |
56 | m = 1 << (buddy->max_order - o); | |
57 | seg = find_first_bit(buddy->bits[o], m); | |
58 | if (seg < m) | |
59 | goto found; | |
60 | } | |
225c7b1f RD |
61 | |
62 | spin_unlock(&buddy->lock); | |
63 | return -1; | |
64 | ||
65 | found: | |
66 | clear_bit(seg, buddy->bits[o]); | |
e4044cfc | 67 | --buddy->num_free[o]; |
225c7b1f RD |
68 | |
69 | while (o > order) { | |
70 | --o; | |
71 | seg <<= 1; | |
72 | set_bit(seg ^ 1, buddy->bits[o]); | |
e4044cfc | 73 | ++buddy->num_free[o]; |
225c7b1f RD |
74 | } |
75 | ||
76 | spin_unlock(&buddy->lock); | |
77 | ||
78 | seg <<= order; | |
79 | ||
80 | return seg; | |
81 | } | |
82 | ||
83 | static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) | |
84 | { | |
85 | seg >>= order; | |
86 | ||
87 | spin_lock(&buddy->lock); | |
88 | ||
89 | while (test_bit(seg ^ 1, buddy->bits[order])) { | |
90 | clear_bit(seg ^ 1, buddy->bits[order]); | |
e4044cfc | 91 | --buddy->num_free[order]; |
225c7b1f RD |
92 | seg >>= 1; |
93 | ++order; | |
94 | } | |
95 | ||
96 | set_bit(seg, buddy->bits[order]); | |
e4044cfc | 97 | ++buddy->num_free[order]; |
225c7b1f RD |
98 | |
99 | spin_unlock(&buddy->lock); | |
100 | } | |
101 | ||
e8f9b2ed | 102 | static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) |
225c7b1f RD |
103 | { |
104 | int i, s; | |
105 | ||
106 | buddy->max_order = max_order; | |
107 | spin_lock_init(&buddy->lock); | |
108 | ||
31975e27 | 109 | buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), |
225c7b1f | 110 | GFP_KERNEL); |
31975e27 | 111 | buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free), |
e4044cfc RD |
112 | GFP_KERNEL); |
113 | if (!buddy->bits || !buddy->num_free) | |
225c7b1f RD |
114 | goto err_out; |
115 | ||
116 | for (i = 0; i <= buddy->max_order; ++i) { | |
117 | s = BITS_TO_LONGS(1 << (buddy->max_order - i)); | |
752ade68 MH |
118 | buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); |
119 | if (!buddy->bits[i]) | |
120 | goto err_out_free; | |
225c7b1f RD |
121 | } |
122 | ||
123 | set_bit(0, buddy->bits[buddy->max_order]); | |
e4044cfc | 124 | buddy->num_free[buddy->max_order] = 1; |
225c7b1f RD |
125 | |
126 | return 0; | |
127 | ||
128 | err_out_free: | |
129 | for (i = 0; i <= buddy->max_order; ++i) | |
914efb02 | 130 | kvfree(buddy->bits[i]); |
225c7b1f | 131 | |
e4044cfc | 132 | err_out: |
225c7b1f | 133 | kfree(buddy->bits); |
e4044cfc | 134 | kfree(buddy->num_free); |
225c7b1f | 135 | |
225c7b1f RD |
136 | return -ENOMEM; |
137 | } | |
138 | ||
139 | static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) | |
140 | { | |
141 | int i; | |
142 | ||
143 | for (i = 0; i <= buddy->max_order; ++i) | |
914efb02 | 144 | kvfree(buddy->bits[i]); |
225c7b1f RD |
145 | |
146 | kfree(buddy->bits); | |
e4044cfc | 147 | kfree(buddy->num_free); |
225c7b1f RD |
148 | } |
149 | ||
c82e9aa0 | 150 | u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) |
225c7b1f RD |
151 | { |
152 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | |
153 | u32 seg; | |
2b8fb286 MA |
154 | int seg_order; |
155 | u32 offset; | |
225c7b1f | 156 | |
2b8fb286 MA |
157 | seg_order = max_t(int, order - log_mtts_per_seg, 0); |
158 | ||
159 | seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); | |
225c7b1f RD |
160 | if (seg == -1) |
161 | return -1; | |
162 | ||
2b8fb286 MA |
163 | offset = seg * (1 << log_mtts_per_seg); |
164 | ||
165 | if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, | |
166 | offset + (1 << order) - 1)) { | |
167 | mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); | |
225c7b1f RD |
168 | return -1; |
169 | } | |
170 | ||
2b8fb286 | 171 | return offset; |
225c7b1f RD |
172 | } |
173 | ||
ea51b377 JM |
174 | static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) |
175 | { | |
e7dbeba8 | 176 | u64 in_param = 0; |
ea51b377 JM |
177 | u64 out_param; |
178 | int err; | |
179 | ||
180 | if (mlx4_is_mfunc(dev)) { | |
181 | set_param_l(&in_param, order); | |
182 | err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, | |
183 | RES_OP_RESERVE_AND_MAP, | |
184 | MLX4_CMD_ALLOC_RES, | |
185 | MLX4_CMD_TIME_CLASS_A, | |
186 | MLX4_CMD_WRAPPED); | |
187 | if (err) | |
188 | return -1; | |
189 | return get_param_l(&out_param); | |
190 | } | |
191 | return __mlx4_alloc_mtt_range(dev, order); | |
192 | } | |
193 | ||
225c7b1f RD |
194 | int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, |
195 | struct mlx4_mtt *mtt) | |
196 | { | |
197 | int i; | |
198 | ||
199 | if (!npages) { | |
200 | mtt->order = -1; | |
201 | mtt->page_shift = MLX4_ICM_PAGE_SHIFT; | |
202 | return 0; | |
203 | } else | |
204 | mtt->page_shift = page_shift; | |
205 | ||
2b8fb286 | 206 | for (mtt->order = 0, i = 1; i < npages; i <<= 1) |
225c7b1f RD |
207 | ++mtt->order; |
208 | ||
2b8fb286 MA |
209 | mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); |
210 | if (mtt->offset == -1) | |
225c7b1f RD |
211 | return -ENOMEM; |
212 | ||
213 | return 0; | |
214 | } | |
215 | EXPORT_SYMBOL_GPL(mlx4_mtt_init); | |
216 | ||
2b8fb286 | 217 | void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) |
225c7b1f | 218 | { |
2b8fb286 MA |
219 | u32 first_seg; |
220 | int seg_order; | |
225c7b1f RD |
221 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; |
222 | ||
2b8fb286 MA |
223 | seg_order = max_t(int, order - log_mtts_per_seg, 0); |
224 | first_seg = offset / (1 << log_mtts_per_seg); | |
225 | ||
226 | mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); | |
1e27ca69 MA |
227 | mlx4_table_put_range(dev, &mr_table->mtt_table, offset, |
228 | offset + (1 << order) - 1); | |
ea51b377 JM |
229 | } |
230 | ||
2b8fb286 | 231 | static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) |
ea51b377 | 232 | { |
e7dbeba8 | 233 | u64 in_param = 0; |
ea51b377 JM |
234 | int err; |
235 | ||
236 | if (mlx4_is_mfunc(dev)) { | |
2b8fb286 | 237 | set_param_l(&in_param, offset); |
ea51b377 JM |
238 | set_param_h(&in_param, order); |
239 | err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, | |
240 | MLX4_CMD_FREE_RES, | |
241 | MLX4_CMD_TIME_CLASS_A, | |
242 | MLX4_CMD_WRAPPED); | |
243 | if (err) | |
1a91de28 JP |
244 | mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n", |
245 | offset, order); | |
ea51b377 JM |
246 | return; |
247 | } | |
5d4de16c | 248 | __mlx4_free_mtt_range(dev, offset, order); |
ea51b377 JM |
249 | } |
250 | ||
251 | void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) | |
252 | { | |
225c7b1f RD |
253 | if (mtt->order < 0) |
254 | return; | |
255 | ||
2b8fb286 | 256 | mlx4_free_mtt_range(dev, mtt->offset, mtt->order); |
225c7b1f RD |
257 | } |
258 | EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); | |
259 | ||
260 | u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) | |
261 | { | |
2b8fb286 | 262 | return (u64) mtt->offset * dev->caps.mtt_entry_sz; |
225c7b1f RD |
263 | } |
264 | EXPORT_SYMBOL_GPL(mlx4_mtt_addr); | |
265 | ||
266 | static u32 hw_index_to_key(u32 ind) | |
267 | { | |
268 | return (ind >> 24) | (ind << 8); | |
269 | } | |
270 | ||
271 | static u32 key_to_hw_index(u32 key) | |
272 | { | |
273 | return (key << 24) | (key >> 8); | |
274 | } | |
275 | ||
276 | static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
277 | int mpt_index) | |
278 | { | |
eb41049f | 279 | return mlx4_cmd(dev, mailbox->dma, mpt_index, |
ea51b377 JM |
280 | 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, |
281 | MLX4_CMD_WRAPPED); | |
225c7b1f RD |
282 | } |
283 | ||
284 | static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
285 | int mpt_index) | |
286 | { | |
287 | return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, | |
f9baff50 JM |
288 | !mailbox, MLX4_CMD_HW2SW_MPT, |
289 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | |
225c7b1f RD |
290 | } |
291 | ||
4ff0acca | 292 | /* Must protect against concurrent access */ |
e630664c MB |
293 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, |
294 | struct mlx4_mpt_entry ***mpt_entry) | |
295 | { | |
296 | int err; | |
297 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | |
298 | struct mlx4_cmd_mailbox *mailbox = NULL; | |
299 | ||
e630664c MB |
300 | if (mmr->enabled != MLX4_MPT_EN_HW) |
301 | return -EINVAL; | |
302 | ||
303 | err = mlx4_HW2SW_MPT(dev, NULL, key); | |
e630664c MB |
304 | if (err) { |
305 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); | |
306 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); | |
307 | return err; | |
308 | } | |
309 | ||
310 | mmr->enabled = MLX4_MPT_EN_SW; | |
311 | ||
312 | if (!mlx4_is_mfunc(dev)) { | |
313 | **mpt_entry = mlx4_table_find( | |
314 | &mlx4_priv(dev)->mr_table.dmpt_table, | |
315 | key, NULL); | |
316 | } else { | |
317 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
175f8d67 | 318 | if (IS_ERR(mailbox)) |
e630664c MB |
319 | return PTR_ERR(mailbox); |
320 | ||
321 | err = mlx4_cmd_box(dev, 0, mailbox->dma, key, | |
322 | 0, MLX4_CMD_QUERY_MPT, | |
323 | MLX4_CMD_TIME_CLASS_B, | |
324 | MLX4_CMD_WRAPPED); | |
e630664c MB |
325 | if (err) |
326 | goto free_mailbox; | |
327 | ||
328 | *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; | |
329 | } | |
330 | ||
331 | if (!(*mpt_entry) || !(**mpt_entry)) { | |
332 | err = -ENOMEM; | |
333 | goto free_mailbox; | |
334 | } | |
335 | ||
336 | return 0; | |
337 | ||
338 | free_mailbox: | |
339 | mlx4_free_cmd_mailbox(dev, mailbox); | |
340 | return err; | |
341 | } | |
342 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); | |
343 | ||
344 | int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |
345 | struct mlx4_mpt_entry **mpt_entry) | |
346 | { | |
347 | int err; | |
348 | ||
349 | if (!mlx4_is_mfunc(dev)) { | |
350 | /* Make sure any changes to this entry are flushed */ | |
351 | wmb(); | |
352 | ||
353 | *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; | |
354 | ||
355 | /* Make sure the new status is written */ | |
356 | wmb(); | |
357 | ||
358 | err = mlx4_SYNC_TPT(dev); | |
359 | } else { | |
360 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | |
361 | ||
362 | struct mlx4_cmd_mailbox *mailbox = | |
363 | container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, | |
364 | buf); | |
365 | ||
bd85fbc2 | 366 | (*mpt_entry)->lkey = 0; |
e630664c MB |
367 | err = mlx4_SW2HW_MPT(dev, mailbox, key); |
368 | } | |
369 | ||
4ff0acca MB |
370 | if (!err) { |
371 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; | |
e630664c | 372 | mmr->enabled = MLX4_MPT_EN_HW; |
4ff0acca | 373 | } |
e630664c MB |
374 | return err; |
375 | } | |
376 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); | |
377 | ||
378 | void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, | |
379 | struct mlx4_mpt_entry **mpt_entry) | |
380 | { | |
381 | if (mlx4_is_mfunc(dev)) { | |
382 | struct mlx4_cmd_mailbox *mailbox = | |
383 | container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, | |
384 | buf); | |
385 | mlx4_free_cmd_mailbox(dev, mailbox); | |
386 | } | |
387 | } | |
388 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); | |
389 | ||
390 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | |
391 | u32 pdn) | |
392 | { | |
4ff0acca | 393 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; |
e630664c MB |
394 | /* The wrapper function will put the slave's id here */ |
395 | if (mlx4_is_mfunc(dev)) | |
396 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; | |
4ff0acca MB |
397 | |
398 | mpt_entry->pd_flags = cpu_to_be32(pd_flags | | |
e630664c MB |
399 | (pdn & MLX4_MPT_PD_MASK) |
400 | | MLX4_MPT_PD_FLAG_EN_INV); | |
401 | return 0; | |
402 | } | |
403 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); | |
404 | ||
405 | int mlx4_mr_hw_change_access(struct mlx4_dev *dev, | |
406 | struct mlx4_mpt_entry *mpt_entry, | |
407 | u32 access) | |
408 | { | |
409 | u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | | |
410 | (access & MLX4_PERM_MASK); | |
411 | ||
412 | mpt_entry->flags = cpu_to_be32(flags); | |
413 | return 0; | |
414 | } | |
415 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); | |
416 | ||
66431a7d | 417 | static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, |
ea51b377 JM |
418 | u64 iova, u64 size, u32 access, int npages, |
419 | int page_shift, struct mlx4_mr *mr) | |
420 | { | |
225c7b1f RD |
421 | mr->iova = iova; |
422 | mr->size = size; | |
423 | mr->pd = pd; | |
424 | mr->access = access; | |
b20e519a | 425 | mr->enabled = MLX4_MPT_DISABLED; |
ea51b377 JM |
426 | mr->key = hw_index_to_key(mridx); |
427 | ||
428 | return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | |
429 | } | |
ea51b377 JM |
430 | |
431 | static int mlx4_WRITE_MTT(struct mlx4_dev *dev, | |
432 | struct mlx4_cmd_mailbox *mailbox, | |
433 | int num_entries) | |
434 | { | |
435 | return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, | |
436 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); | |
437 | } | |
438 | ||
b20e519a | 439 | int __mlx4_mpt_reserve(struct mlx4_dev *dev) |
ea51b377 JM |
440 | { |
441 | struct mlx4_priv *priv = mlx4_priv(dev); | |
442 | ||
443 | return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); | |
444 | } | |
225c7b1f | 445 | |
b20e519a | 446 | static int mlx4_mpt_reserve(struct mlx4_dev *dev) |
ea51b377 JM |
447 | { |
448 | u64 out_param; | |
449 | ||
450 | if (mlx4_is_mfunc(dev)) { | |
451 | if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, | |
452 | MLX4_CMD_ALLOC_RES, | |
453 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) | |
454 | return -1; | |
455 | return get_param_l(&out_param); | |
456 | } | |
b20e519a | 457 | return __mlx4_mpt_reserve(dev); |
ea51b377 JM |
458 | } |
459 | ||
b20e519a | 460 | void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) |
ea51b377 JM |
461 | { |
462 | struct mlx4_priv *priv = mlx4_priv(dev); | |
463 | ||
7c6d74d2 | 464 | mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR); |
ea51b377 JM |
465 | } |
466 | ||
b20e519a | 467 | static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) |
ea51b377 | 468 | { |
e7dbeba8 | 469 | u64 in_param = 0; |
ea51b377 JM |
470 | |
471 | if (mlx4_is_mfunc(dev)) { | |
472 | set_param_l(&in_param, index); | |
473 | if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, | |
474 | MLX4_CMD_FREE_RES, | |
475 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) | |
476 | mlx4_warn(dev, "Failed to release mr index:%d\n", | |
477 | index); | |
478 | return; | |
479 | } | |
b20e519a | 480 | __mlx4_mpt_release(dev, index); |
ea51b377 JM |
481 | } |
482 | ||
8900b894 | 483 | int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) |
ea51b377 JM |
484 | { |
485 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | |
486 | ||
8900b894 | 487 | return mlx4_table_get(dev, &mr_table->dmpt_table, index); |
ea51b377 JM |
488 | } |
489 | ||
8900b894 | 490 | static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) |
ea51b377 | 491 | { |
e7dbeba8 | 492 | u64 param = 0; |
ea51b377 JM |
493 | |
494 | if (mlx4_is_mfunc(dev)) { | |
495 | set_param_l(¶m, index); | |
496 | return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, | |
497 | MLX4_CMD_ALLOC_RES, | |
498 | MLX4_CMD_TIME_CLASS_A, | |
499 | MLX4_CMD_WRAPPED); | |
500 | } | |
8900b894 | 501 | return __mlx4_mpt_alloc_icm(dev, index); |
ea51b377 JM |
502 | } |
503 | ||
b20e519a | 504 | void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) |
ea51b377 JM |
505 | { |
506 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | |
507 | ||
508 | mlx4_table_put(dev, &mr_table->dmpt_table, index); | |
509 | } | |
510 | ||
b20e519a | 511 | static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) |
ea51b377 | 512 | { |
e7dbeba8 | 513 | u64 in_param = 0; |
ea51b377 JM |
514 | |
515 | if (mlx4_is_mfunc(dev)) { | |
516 | set_param_l(&in_param, index); | |
517 | if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, | |
518 | MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, | |
519 | MLX4_CMD_WRAPPED)) | |
520 | mlx4_warn(dev, "Failed to free icm of mr index:%d\n", | |
521 | index); | |
522 | return; | |
523 | } | |
b20e519a | 524 | return __mlx4_mpt_free_icm(dev, index); |
ea51b377 JM |
525 | } |
526 | ||
527 | int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, | |
528 | int npages, int page_shift, struct mlx4_mr *mr) | |
529 | { | |
530 | u32 index; | |
531 | int err; | |
532 | ||
b20e519a | 533 | index = mlx4_mpt_reserve(dev); |
ea51b377 JM |
534 | if (index == -1) |
535 | return -ENOMEM; | |
536 | ||
537 | err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, | |
538 | access, npages, page_shift, mr); | |
225c7b1f | 539 | if (err) |
b20e519a | 540 | mlx4_mpt_release(dev, index); |
225c7b1f | 541 | |
225c7b1f RD |
542 | return err; |
543 | } | |
544 | EXPORT_SYMBOL_GPL(mlx4_mr_alloc); | |
545 | ||
61083720 | 546 | static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) |
225c7b1f | 547 | { |
225c7b1f RD |
548 | int err; |
549 | ||
b20e519a | 550 | if (mr->enabled == MLX4_MPT_EN_HW) { |
225c7b1f RD |
551 | err = mlx4_HW2SW_MPT(dev, NULL, |
552 | key_to_hw_index(mr->key) & | |
553 | (dev->caps.num_mpts - 1)); | |
61083720 | 554 | if (err) { |
1a91de28 JP |
555 | mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n", |
556 | err); | |
61083720 SM |
557 | return err; |
558 | } | |
225c7b1f | 559 | |
b20e519a | 560 | mr->enabled = MLX4_MPT_EN_SW; |
ea51b377 | 561 | } |
225c7b1f | 562 | mlx4_mtt_cleanup(dev, &mr->mtt); |
61083720 SM |
563 | |
564 | return 0; | |
ea51b377 | 565 | } |
ea51b377 | 566 | |
61083720 | 567 | int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) |
ea51b377 | 568 | { |
61083720 SM |
569 | int ret; |
570 | ||
571 | ret = mlx4_mr_free_reserved(dev, mr); | |
572 | if (ret) | |
573 | return ret; | |
ea51b377 | 574 | if (mr->enabled) |
b20e519a SM |
575 | mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); |
576 | mlx4_mpt_release(dev, key_to_hw_index(mr->key)); | |
61083720 SM |
577 | |
578 | return 0; | |
225c7b1f RD |
579 | } |
580 | EXPORT_SYMBOL_GPL(mlx4_mr_free); | |
581 | ||
e630664c MB |
582 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) |
583 | { | |
584 | mlx4_mtt_cleanup(dev, &mr->mtt); | |
a51e0df4 | 585 | mr->mtt.order = -1; |
e630664c MB |
586 | } |
587 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); | |
588 | ||
589 | int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |
590 | u64 iova, u64 size, int npages, | |
591 | int page_shift, struct mlx4_mpt_entry *mpt_entry) | |
592 | { | |
593 | int err; | |
594 | ||
e630664c MB |
595 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); |
596 | if (err) | |
597 | return err; | |
598 | ||
b332068c MG |
599 | mpt_entry->start = cpu_to_be64(iova); |
600 | mpt_entry->length = cpu_to_be64(size); | |
601 | mpt_entry->entity_size = cpu_to_be32(page_shift); | |
602 | mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE | | |
603 | MLX4_MPT_FLAG_SW_OWNS)); | |
e630664c MB |
604 | if (mr->mtt.order < 0) { |
605 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | |
606 | mpt_entry->mtt_addr = 0; | |
607 | } else { | |
608 | mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, | |
609 | &mr->mtt)); | |
610 | if (mr->mtt.page_shift == 0) | |
611 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); | |
612 | } | |
4ff0acca MB |
613 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { |
614 | /* fast register MR in free state */ | |
615 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | |
616 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | |
617 | MLX4_MPT_PD_FLAG_RAE); | |
618 | } else { | |
619 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | |
620 | } | |
e630664c MB |
621 | mr->enabled = MLX4_MPT_EN_SW; |
622 | ||
623 | return 0; | |
624 | } | |
625 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); | |
626 | ||
225c7b1f RD |
627 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) |
628 | { | |
225c7b1f RD |
629 | struct mlx4_cmd_mailbox *mailbox; |
630 | struct mlx4_mpt_entry *mpt_entry; | |
631 | int err; | |
632 | ||
8900b894 | 633 | err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key)); |
225c7b1f RD |
634 | if (err) |
635 | return err; | |
636 | ||
637 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
638 | if (IS_ERR(mailbox)) { | |
639 | err = PTR_ERR(mailbox); | |
640 | goto err_table; | |
641 | } | |
642 | mpt_entry = mailbox->buf; | |
95d04f07 | 643 | mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | |
225c7b1f RD |
644 | MLX4_MPT_FLAG_REGION | |
645 | mr->access); | |
225c7b1f RD |
646 | |
647 | mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); | |
95d04f07 | 648 | mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); |
225c7b1f RD |
649 | mpt_entry->start = cpu_to_be64(mr->iova); |
650 | mpt_entry->length = cpu_to_be64(mr->size); | |
651 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | |
95d04f07 | 652 | |
b2d9308a JM |
653 | if (mr->mtt.order < 0) { |
654 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | |
2b8fb286 | 655 | mpt_entry->mtt_addr = 0; |
95d04f07 | 656 | } else { |
2b8fb286 MA |
657 | mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, |
658 | &mr->mtt)); | |
95d04f07 RD |
659 | } |
660 | ||
661 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { | |
662 | /* fast register MR in free state */ | |
663 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | |
c9257433 VS |
664 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | |
665 | MLX4_MPT_PD_FLAG_RAE); | |
2b8fb286 | 666 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); |
95d04f07 RD |
667 | } else { |
668 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | |
669 | } | |
225c7b1f RD |
670 | |
671 | err = mlx4_SW2HW_MPT(dev, mailbox, | |
672 | key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); | |
673 | if (err) { | |
674 | mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); | |
675 | goto err_cmd; | |
676 | } | |
b20e519a | 677 | mr->enabled = MLX4_MPT_EN_HW; |
225c7b1f RD |
678 | |
679 | mlx4_free_cmd_mailbox(dev, mailbox); | |
680 | ||
681 | return 0; | |
682 | ||
683 | err_cmd: | |
684 | mlx4_free_cmd_mailbox(dev, mailbox); | |
685 | ||
686 | err_table: | |
b20e519a | 687 | mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); |
225c7b1f RD |
688 | return err; |
689 | } | |
690 | EXPORT_SYMBOL_GPL(mlx4_mr_enable); | |
691 | ||
d7bb58fb JM |
692 | static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
693 | int start_index, int npages, u64 *page_list) | |
225c7b1f | 694 | { |
d7bb58fb JM |
695 | struct mlx4_priv *priv = mlx4_priv(dev); |
696 | __be64 *mtts; | |
697 | dma_addr_t dma_handle; | |
698 | int i; | |
d7bb58fb | 699 | |
2b8fb286 MA |
700 | mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + |
701 | start_index, &dma_handle); | |
d7bb58fb | 702 | |
d7bb58fb JM |
703 | if (!mtts) |
704 | return -ENOMEM; | |
705 | ||
872bf2fb | 706 | dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, |
31975e27 | 707 | npages * sizeof(u64), DMA_TO_DEVICE); |
e727f5cd | 708 | |
d7bb58fb JM |
709 | for (i = 0; i < npages; ++i) |
710 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | |
711 | ||
872bf2fb | 712 | dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, |
31975e27 | 713 | npages * sizeof(u64), DMA_TO_DEVICE); |
d7bb58fb JM |
714 | |
715 | return 0; | |
225c7b1f RD |
716 | } |
717 | ||
c82e9aa0 | 718 | int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
ea51b377 | 719 | int start_index, int npages, u64 *page_list) |
225c7b1f | 720 | { |
ea51b377 | 721 | int err = 0; |
d7bb58fb | 722 | int chunk; |
2b8fb286 MA |
723 | int mtts_per_page; |
724 | int max_mtts_first_page; | |
725 | ||
726 | /* compute how may mtts fit in the first page */ | |
727 | mtts_per_page = PAGE_SIZE / sizeof(u64); | |
728 | max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) | |
729 | % mtts_per_page; | |
730 | ||
731 | chunk = min_t(int, max_mtts_first_page, npages); | |
225c7b1f | 732 | |
225c7b1f | 733 | while (npages > 0) { |
d7bb58fb | 734 | err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); |
225c7b1f | 735 | if (err) |
d7bb58fb | 736 | return err; |
d7bb58fb JM |
737 | npages -= chunk; |
738 | start_index += chunk; | |
739 | page_list += chunk; | |
2b8fb286 MA |
740 | |
741 | chunk = min_t(int, mtts_per_page, npages); | |
225c7b1f | 742 | } |
ea51b377 JM |
743 | return err; |
744 | } | |
225c7b1f | 745 | |
ea51b377 JM |
746 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
747 | int start_index, int npages, u64 *page_list) | |
748 | { | |
749 | struct mlx4_cmd_mailbox *mailbox = NULL; | |
750 | __be64 *inbox = NULL; | |
751 | int chunk; | |
752 | int err = 0; | |
753 | int i; | |
754 | ||
755 | if (mtt->order < 0) | |
756 | return -EINVAL; | |
757 | ||
758 | if (mlx4_is_mfunc(dev)) { | |
759 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
760 | if (IS_ERR(mailbox)) | |
761 | return PTR_ERR(mailbox); | |
762 | inbox = mailbox->buf; | |
763 | ||
764 | while (npages > 0) { | |
2b8fb286 MA |
765 | chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, |
766 | npages); | |
767 | inbox[0] = cpu_to_be64(mtt->offset + start_index); | |
ea51b377 JM |
768 | inbox[1] = 0; |
769 | for (i = 0; i < chunk; ++i) | |
770 | inbox[i + 2] = cpu_to_be64(page_list[i] | | |
771 | MLX4_MTT_FLAG_PRESENT); | |
772 | err = mlx4_WRITE_MTT(dev, mailbox, chunk); | |
773 | if (err) { | |
774 | mlx4_free_cmd_mailbox(dev, mailbox); | |
775 | return err; | |
776 | } | |
777 | ||
778 | npages -= chunk; | |
779 | start_index += chunk; | |
780 | page_list += chunk; | |
781 | } | |
782 | mlx4_free_cmd_mailbox(dev, mailbox); | |
783 | return err; | |
784 | } | |
785 | ||
786 | return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); | |
225c7b1f RD |
787 | } |
788 | EXPORT_SYMBOL_GPL(mlx4_write_mtt); | |
789 | ||
790 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |
8900b894 | 791 | struct mlx4_buf *buf) |
225c7b1f RD |
792 | { |
793 | u64 *page_list; | |
794 | int err; | |
795 | int i; | |
796 | ||
8900b894 | 797 | page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL); |
225c7b1f RD |
798 | if (!page_list) |
799 | return -ENOMEM; | |
800 | ||
801 | for (i = 0; i < buf->npages; ++i) | |
802 | if (buf->nbufs == 1) | |
b57aacfa | 803 | page_list[i] = buf->direct.map + (i << buf->page_shift); |
225c7b1f | 804 | else |
b57aacfa | 805 | page_list[i] = buf->page_list[i].map; |
225c7b1f RD |
806 | |
807 | err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); | |
808 | ||
809 | kfree(page_list); | |
810 | return err; | |
811 | } | |
812 | EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); | |
813 | ||
804d6a89 SM |
814 | int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, |
815 | struct mlx4_mw *mw) | |
816 | { | |
817 | u32 index; | |
818 | ||
819 | if ((type == MLX4_MW_TYPE_1 && | |
820 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || | |
821 | (type == MLX4_MW_TYPE_2 && | |
822 | !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) | |
423b3aec | 823 | return -EOPNOTSUPP; |
804d6a89 SM |
824 | |
825 | index = mlx4_mpt_reserve(dev); | |
826 | if (index == -1) | |
827 | return -ENOMEM; | |
828 | ||
829 | mw->key = hw_index_to_key(index); | |
830 | mw->pd = pd; | |
831 | mw->type = type; | |
832 | mw->enabled = MLX4_MPT_DISABLED; | |
833 | ||
834 | return 0; | |
835 | } | |
836 | EXPORT_SYMBOL_GPL(mlx4_mw_alloc); | |
837 | ||
838 | int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) | |
839 | { | |
840 | struct mlx4_cmd_mailbox *mailbox; | |
841 | struct mlx4_mpt_entry *mpt_entry; | |
842 | int err; | |
843 | ||
8900b894 | 844 | err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); |
804d6a89 SM |
845 | if (err) |
846 | return err; | |
847 | ||
848 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
849 | if (IS_ERR(mailbox)) { | |
850 | err = PTR_ERR(mailbox); | |
851 | goto err_table; | |
852 | } | |
853 | mpt_entry = mailbox->buf; | |
854 | ||
804d6a89 SM |
855 | /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned |
856 | * off, thus creating a memory window and not a memory region. | |
857 | */ | |
858 | mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); | |
859 | mpt_entry->pd_flags = cpu_to_be32(mw->pd); | |
860 | if (mw->type == MLX4_MW_TYPE_2) { | |
861 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | |
862 | mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); | |
863 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); | |
864 | } | |
865 | ||
866 | err = mlx4_SW2HW_MPT(dev, mailbox, | |
867 | key_to_hw_index(mw->key) & | |
868 | (dev->caps.num_mpts - 1)); | |
869 | if (err) { | |
870 | mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); | |
871 | goto err_cmd; | |
872 | } | |
873 | mw->enabled = MLX4_MPT_EN_HW; | |
874 | ||
875 | mlx4_free_cmd_mailbox(dev, mailbox); | |
876 | ||
877 | return 0; | |
878 | ||
879 | err_cmd: | |
880 | mlx4_free_cmd_mailbox(dev, mailbox); | |
881 | ||
882 | err_table: | |
883 | mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); | |
884 | return err; | |
885 | } | |
886 | EXPORT_SYMBOL_GPL(mlx4_mw_enable); | |
887 | ||
888 | void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) | |
889 | { | |
890 | int err; | |
891 | ||
892 | if (mw->enabled == MLX4_MPT_EN_HW) { | |
893 | err = mlx4_HW2SW_MPT(dev, NULL, | |
894 | key_to_hw_index(mw->key) & | |
895 | (dev->caps.num_mpts - 1)); | |
896 | if (err) | |
897 | mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); | |
898 | ||
899 | mw->enabled = MLX4_MPT_EN_SW; | |
900 | } | |
901 | if (mw->enabled) | |
902 | mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); | |
903 | mlx4_mpt_release(dev, key_to_hw_index(mw->key)); | |
904 | } | |
905 | EXPORT_SYMBOL_GPL(mlx4_mw_free); | |
906 | ||
3d73c288 | 907 | int mlx4_init_mr_table(struct mlx4_dev *dev) |
225c7b1f | 908 | { |
ea51b377 JM |
909 | struct mlx4_priv *priv = mlx4_priv(dev); |
910 | struct mlx4_mr_table *mr_table = &priv->mr_table; | |
225c7b1f RD |
911 | int err; |
912 | ||
ea51b377 JM |
913 | /* Nothing to do for slaves - all MR handling is forwarded |
914 | * to the master */ | |
915 | if (mlx4_is_slave(dev)) | |
916 | return 0; | |
917 | ||
a30f1bc5 JM |
918 | if (!is_power_of_2(dev->caps.num_mpts)) |
919 | return -EINVAL; | |
920 | ||
225c7b1f | 921 | err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, |
93fc9e1b | 922 | ~0, dev->caps.reserved_mrws, 0); |
225c7b1f RD |
923 | if (err) |
924 | return err; | |
925 | ||
926 | err = mlx4_buddy_init(&mr_table->mtt_buddy, | |
3de819e6 | 927 | ilog2((u32)dev->caps.num_mtts / |
2b8fb286 | 928 | (1 << log_mtts_per_seg))); |
225c7b1f RD |
929 | if (err) |
930 | goto err_buddy; | |
931 | ||
932 | if (dev->caps.reserved_mtts) { | |
ea51b377 JM |
933 | priv->reserved_mtts = |
934 | mlx4_alloc_mtt_range(dev, | |
935 | fls(dev->caps.reserved_mtts - 1)); | |
936 | if (priv->reserved_mtts < 0) { | |
1a91de28 | 937 | mlx4_warn(dev, "MTT table of order %u is too small\n", |
225c7b1f RD |
938 | mr_table->mtt_buddy.max_order); |
939 | err = -ENOMEM; | |
940 | goto err_reserve_mtts; | |
941 | } | |
942 | } | |
943 | ||
944 | return 0; | |
945 | ||
946 | err_reserve_mtts: | |
947 | mlx4_buddy_cleanup(&mr_table->mtt_buddy); | |
948 | ||
949 | err_buddy: | |
950 | mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); | |
951 | ||
952 | return err; | |
953 | } | |
954 | ||
955 | void mlx4_cleanup_mr_table(struct mlx4_dev *dev) | |
956 | { | |
ea51b377 JM |
957 | struct mlx4_priv *priv = mlx4_priv(dev); |
958 | struct mlx4_mr_table *mr_table = &priv->mr_table; | |
225c7b1f | 959 | |
ea51b377 JM |
960 | if (mlx4_is_slave(dev)) |
961 | return; | |
962 | if (priv->reserved_mtts >= 0) | |
963 | mlx4_free_mtt_range(dev, priv->reserved_mtts, | |
964 | fls(dev->caps.reserved_mtts - 1)); | |
225c7b1f RD |
965 | mlx4_buddy_cleanup(&mr_table->mtt_buddy); |
966 | mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); | |
967 | } | |
8ad11fb6 JM |
968 | |
969 | static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, | |
970 | int npages, u64 iova) | |
971 | { | |
972 | int i, page_mask; | |
973 | ||
974 | if (npages > fmr->max_pages) | |
975 | return -EINVAL; | |
976 | ||
977 | page_mask = (1 << fmr->page_shift) - 1; | |
978 | ||
979 | /* We are getting page lists, so va must be page aligned. */ | |
980 | if (iova & page_mask) | |
981 | return -EINVAL; | |
982 | ||
983 | /* Trust the user not to pass misaligned data in page_list */ | |
984 | if (0) | |
985 | for (i = 0; i < npages; ++i) { | |
986 | if (page_list[i] & ~page_mask) | |
987 | return -EINVAL; | |
988 | } | |
989 | ||
990 | if (fmr->maps >= fmr->max_maps) | |
991 | return -EINVAL; | |
992 | ||
993 | return 0; | |
994 | } | |
995 | ||
996 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, | |
997 | int npages, u64 iova, u32 *lkey, u32 *rkey) | |
998 | { | |
999 | u32 key; | |
1000 | int i, err; | |
1001 | ||
1002 | err = mlx4_check_fmr(fmr, page_list, npages, iova); | |
1003 | if (err) | |
1004 | return err; | |
1005 | ||
1006 | ++fmr->maps; | |
1007 | ||
1008 | key = key_to_hw_index(fmr->mr.key); | |
1009 | key += dev->caps.num_mpts; | |
1010 | *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); | |
1011 | ||
1012 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; | |
1013 | ||
1014 | /* Make sure MPT status is visible before writing MTT entries */ | |
1015 | wmb(); | |
1016 | ||
872bf2fb | 1017 | dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle, |
e727f5cd RD |
1018 | npages * sizeof(u64), DMA_TO_DEVICE); |
1019 | ||
8ad11fb6 JM |
1020 | for (i = 0; i < npages; ++i) |
1021 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | |
1022 | ||
872bf2fb | 1023 | dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle, |
e727f5cd | 1024 | npages * sizeof(u64), DMA_TO_DEVICE); |
8ad11fb6 JM |
1025 | |
1026 | fmr->mpt->key = cpu_to_be32(key); | |
1027 | fmr->mpt->lkey = cpu_to_be32(key); | |
1028 | fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); | |
1029 | fmr->mpt->start = cpu_to_be64(iova); | |
1030 | ||
1031 | /* Make MTT entries are visible before setting MPT status */ | |
1032 | wmb(); | |
1033 | ||
1034 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; | |
1035 | ||
1036 | /* Make sure MPT status is visible before consumer can use FMR */ | |
1037 | wmb(); | |
1038 | ||
1039 | return 0; | |
1040 | } | |
1041 | EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); | |
1042 | ||
1043 | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | |
1044 | int max_maps, u8 page_shift, struct mlx4_fmr *fmr) | |
1045 | { | |
1046 | struct mlx4_priv *priv = mlx4_priv(dev); | |
8ad11fb6 JM |
1047 | int err = -ENOMEM; |
1048 | ||
a5bbe892 EC |
1049 | if (max_maps > dev->caps.max_fmr_maps) |
1050 | return -EINVAL; | |
1051 | ||
c5057ddc | 1052 | if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) |
8ad11fb6 JM |
1053 | return -EINVAL; |
1054 | ||
1055 | /* All MTTs must fit in the same page */ | |
31975e27 | 1056 | if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE) |
8ad11fb6 JM |
1057 | return -EINVAL; |
1058 | ||
1059 | fmr->page_shift = page_shift; | |
1060 | fmr->max_pages = max_pages; | |
1061 | fmr->max_maps = max_maps; | |
1062 | fmr->maps = 0; | |
1063 | ||
1064 | err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, | |
1065 | page_shift, &fmr->mr); | |
1066 | if (err) | |
1067 | return err; | |
1068 | ||
8ad11fb6 | 1069 | fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, |
2b8fb286 | 1070 | fmr->mr.mtt.offset, |
8ad11fb6 | 1071 | &fmr->dma_handle); |
2b8fb286 | 1072 | |
8ad11fb6 JM |
1073 | if (!fmr->mtts) { |
1074 | err = -ENOMEM; | |
1075 | goto err_free; | |
1076 | } | |
1077 | ||
8ad11fb6 JM |
1078 | return 0; |
1079 | ||
1080 | err_free: | |
61083720 | 1081 | (void) mlx4_mr_free(dev, &fmr->mr); |
8ad11fb6 JM |
1082 | return err; |
1083 | } | |
1084 | EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); | |
1085 | ||
1086 | int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) | |
1087 | { | |
11e75a74 JM |
1088 | struct mlx4_priv *priv = mlx4_priv(dev); |
1089 | int err; | |
1090 | ||
1091 | err = mlx4_mr_enable(dev, &fmr->mr); | |
1092 | if (err) | |
1093 | return err; | |
1094 | ||
1095 | fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, | |
1096 | key_to_hw_index(fmr->mr.key), NULL); | |
1097 | if (!fmr->mpt) | |
1098 | return -ENOMEM; | |
1099 | ||
1100 | return 0; | |
8ad11fb6 JM |
1101 | } |
1102 | EXPORT_SYMBOL_GPL(mlx4_fmr_enable); | |
1103 | ||
1104 | void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | |
1105 | u32 *lkey, u32 *rkey) | |
1106 | { | |
8ad11fb6 JM |
1107 | if (!fmr->maps) |
1108 | return; | |
1109 | ||
fd4a3e28 TT |
1110 | /* To unmap: it is sufficient to take back ownership from HW */ |
1111 | *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; | |
8ad11fb6 | 1112 | |
fd4a3e28 TT |
1113 | /* Make sure MPT status is visible */ |
1114 | wmb(); | |
ea51b377 | 1115 | |
fd4a3e28 | 1116 | fmr->maps = 0; |
8ad11fb6 JM |
1117 | } |
1118 | EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); | |
1119 | ||
1120 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) | |
1121 | { | |
61083720 SM |
1122 | int ret; |
1123 | ||
8ad11fb6 JM |
1124 | if (fmr->maps) |
1125 | return -EBUSY; | |
fd4a3e28 TT |
1126 | if (fmr->mr.enabled == MLX4_MPT_EN_HW) { |
1127 | /* In case of FMR was enabled and unmapped | |
1128 | * make sure to give ownership of MPT back to HW | |
1129 | * so HW2SW_MPT command will success. | |
1130 | */ | |
1131 | *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; | |
1132 | /* Make sure MPT status is visible before changing MPT fields */ | |
1133 | wmb(); | |
1134 | fmr->mpt->length = 0; | |
1135 | fmr->mpt->start = 0; | |
1136 | /* Make sure MPT data is visible after changing MPT status */ | |
1137 | wmb(); | |
1138 | *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW; | |
1139 | /* make sure MPT status is visible */ | |
1140 | wmb(); | |
1141 | } | |
8ad11fb6 | 1142 | |
61083720 SM |
1143 | ret = mlx4_mr_free(dev, &fmr->mr); |
1144 | if (ret) | |
1145 | return ret; | |
b20e519a | 1146 | fmr->mr.enabled = MLX4_MPT_DISABLED; |
8ad11fb6 JM |
1147 | |
1148 | return 0; | |
1149 | } | |
1150 | EXPORT_SYMBOL_GPL(mlx4_fmr_free); | |
1151 | ||
1152 | int mlx4_SYNC_TPT(struct mlx4_dev *dev) | |
1153 | { | |
5a031086 JM |
1154 | return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, |
1155 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); | |
8ad11fb6 JM |
1156 | } |
1157 | EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); |