]>
Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
302bdf68 | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
adec640e | 33 | #include <linux/highmem.h> |
e126ba97 EC |
34 | #include <linux/kernel.h> |
35 | #include <linux/module.h> | |
fc50db98 | 36 | #include <linux/delay.h> |
e126ba97 EC |
37 | #include <linux/mlx5/driver.h> |
38 | #include <linux/mlx5/cmd.h> | |
39 | #include "mlx5_core.h" | |
40 | ||
41 | enum { | |
42 | MLX5_PAGES_CANT_GIVE = 0, | |
43 | MLX5_PAGES_GIVE = 1, | |
44 | MLX5_PAGES_TAKE = 2 | |
45 | }; | |
46 | ||
47 | struct mlx5_pages_req { | |
48 | struct mlx5_core_dev *dev; | |
f241e749 | 49 | u16 func_id; |
0a324f31 | 50 | s32 npages; |
e126ba97 EC |
51 | struct work_struct work; |
52 | }; | |
53 | ||
54 | struct fw_page { | |
bf0bf77f EC |
55 | struct rb_node rb_node; |
56 | u64 addr; | |
57 | struct page *page; | |
58 | u16 func_id; | |
59 | unsigned long bitmask; | |
60 | struct list_head list; | |
61 | unsigned free_count; | |
e126ba97 EC |
62 | }; |
63 | ||
dabed0e6 EC |
64 | enum { |
65 | MAX_RECLAIM_TIME_MSECS = 5000, | |
fc50db98 | 66 | MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60, |
dabed0e6 EC |
67 | }; |
68 | ||
bf0bf77f EC |
69 | enum { |
70 | MLX5_MAX_RECLAIM_TIME_MILI = 5000, | |
05bdb2ab | 71 | MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, |
bf0bf77f EC |
72 | }; |
73 | ||
e126ba97 EC |
74 | static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) |
75 | { | |
76 | struct rb_root *root = &dev->priv.page_root; | |
77 | struct rb_node **new = &root->rb_node; | |
78 | struct rb_node *parent = NULL; | |
79 | struct fw_page *nfp; | |
80 | struct fw_page *tfp; | |
bf0bf77f | 81 | int i; |
e126ba97 EC |
82 | |
83 | while (*new) { | |
84 | parent = *new; | |
85 | tfp = rb_entry(parent, struct fw_page, rb_node); | |
86 | if (tfp->addr < addr) | |
87 | new = &parent->rb_left; | |
88 | else if (tfp->addr > addr) | |
89 | new = &parent->rb_right; | |
90 | else | |
91 | return -EEXIST; | |
92 | } | |
93 | ||
bf0bf77f | 94 | nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); |
e126ba97 EC |
95 | if (!nfp) |
96 | return -ENOMEM; | |
97 | ||
98 | nfp->addr = addr; | |
99 | nfp->page = page; | |
100 | nfp->func_id = func_id; | |
bf0bf77f EC |
101 | nfp->free_count = MLX5_NUM_4K_IN_PAGE; |
102 | for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) | |
103 | set_bit(i, &nfp->bitmask); | |
e126ba97 EC |
104 | |
105 | rb_link_node(&nfp->rb_node, parent, new); | |
106 | rb_insert_color(&nfp->rb_node, root); | |
bf0bf77f | 107 | list_add(&nfp->list, &dev->priv.free_list); |
e126ba97 EC |
108 | |
109 | return 0; | |
110 | } | |
111 | ||
bf0bf77f | 112 | static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) |
e126ba97 EC |
113 | { |
114 | struct rb_root *root = &dev->priv.page_root; | |
115 | struct rb_node *tmp = root->rb_node; | |
bf0bf77f | 116 | struct fw_page *result = NULL; |
e126ba97 EC |
117 | struct fw_page *tfp; |
118 | ||
119 | while (tmp) { | |
120 | tfp = rb_entry(tmp, struct fw_page, rb_node); | |
121 | if (tfp->addr < addr) { | |
122 | tmp = tmp->rb_left; | |
123 | } else if (tfp->addr > addr) { | |
124 | tmp = tmp->rb_right; | |
125 | } else { | |
bf0bf77f | 126 | result = tfp; |
e126ba97 EC |
127 | break; |
128 | } | |
129 | } | |
130 | ||
131 | return result; | |
132 | } | |
133 | ||
134 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |
0a324f31 | 135 | s32 *npages, int boot) |
e126ba97 | 136 | { |
a533ed5e SM |
137 | u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0}; |
138 | u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0}; | |
e126ba97 EC |
139 | int err; |
140 | ||
a533ed5e SM |
141 | MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); |
142 | MLX5_SET(query_pages_in, in, op_mod, boot ? | |
143 | MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : | |
144 | MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); | |
0a324f31 | 145 | |
a533ed5e | 146 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
e126ba97 EC |
147 | if (err) |
148 | return err; | |
149 | ||
a533ed5e SM |
150 | *npages = MLX5_GET(query_pages_out, out, num_pages); |
151 | *func_id = MLX5_GET(query_pages_out, out, function_id); | |
e126ba97 EC |
152 | |
153 | return err; | |
154 | } | |
155 | ||
bf0bf77f EC |
156 | static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) |
157 | { | |
158 | struct fw_page *fp; | |
159 | unsigned n; | |
160 | ||
24e42754 | 161 | if (list_empty(&dev->priv.free_list)) |
bf0bf77f | 162 | return -ENOMEM; |
bf0bf77f EC |
163 | |
164 | fp = list_entry(dev->priv.free_list.next, struct fw_page, list); | |
165 | n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); | |
166 | if (n >= MLX5_NUM_4K_IN_PAGE) { | |
167 | mlx5_core_warn(dev, "alloc 4k bug\n"); | |
168 | return -ENOENT; | |
169 | } | |
170 | clear_bit(n, &fp->bitmask); | |
171 | fp->free_count--; | |
172 | if (!fp->free_count) | |
173 | list_del(&fp->list); | |
174 | ||
05bdb2ab | 175 | *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; |
bf0bf77f EC |
176 | |
177 | return 0; | |
178 | } | |
179 | ||
59d2d18c HL |
180 | #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) |
181 | ||
bf0bf77f EC |
182 | static void free_4k(struct mlx5_core_dev *dev, u64 addr) |
183 | { | |
184 | struct fw_page *fwp; | |
185 | int n; | |
186 | ||
59d2d18c | 187 | fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); |
bf0bf77f EC |
188 | if (!fwp) { |
189 | mlx5_core_warn(dev, "page not found\n"); | |
190 | return; | |
191 | } | |
192 | ||
59d2d18c | 193 | n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; |
bf0bf77f EC |
194 | fwp->free_count++; |
195 | set_bit(n, &fwp->bitmask); | |
196 | if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { | |
197 | rb_erase(&fwp->rb_node, &dev->priv.page_root); | |
2b136d02 EC |
198 | if (fwp->free_count != 1) |
199 | list_del(&fwp->list); | |
59d2d18c HL |
200 | dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, |
201 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
bf0bf77f EC |
202 | __free_page(fwp->page); |
203 | kfree(fwp); | |
204 | } else if (fwp->free_count == 1) { | |
205 | list_add(&fwp->list, &dev->priv.free_list); | |
206 | } | |
207 | } | |
208 | ||
209 | static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) | |
210 | { | |
211 | struct page *page; | |
6b276190 | 212 | u64 zero_addr = 1; |
bf0bf77f EC |
213 | u64 addr; |
214 | int err; | |
ad189106 | 215 | int nid = dev_to_node(&dev->pdev->dev); |
bf0bf77f | 216 | |
ad189106 | 217 | page = alloc_pages_node(nid, GFP_HIGHUSER, 0); |
bf0bf77f EC |
218 | if (!page) { |
219 | mlx5_core_warn(dev, "failed to allocate page\n"); | |
220 | return -ENOMEM; | |
221 | } | |
6b276190 | 222 | map: |
bf0bf77f EC |
223 | addr = dma_map_page(&dev->pdev->dev, page, 0, |
224 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
225 | if (dma_mapping_error(&dev->pdev->dev, addr)) { | |
226 | mlx5_core_warn(dev, "failed dma mapping page\n"); | |
227 | err = -ENOMEM; | |
6b276190 | 228 | goto err_mapping; |
bf0bf77f | 229 | } |
6b276190 NO |
230 | |
231 | /* Firmware doesn't support page with physical address 0 */ | |
232 | if (addr == 0) { | |
233 | zero_addr = addr; | |
234 | goto map; | |
235 | } | |
236 | ||
bf0bf77f EC |
237 | err = insert_page(dev, addr, page, func_id); |
238 | if (err) { | |
239 | mlx5_core_err(dev, "failed to track allocated page\n"); | |
6b276190 NO |
240 | dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, |
241 | DMA_BIDIRECTIONAL); | |
bf0bf77f EC |
242 | } |
243 | ||
6b276190 NO |
244 | err_mapping: |
245 | if (err) | |
246 | __free_page(page); | |
bf0bf77f | 247 | |
6b276190 NO |
248 | if (zero_addr == 0) |
249 | dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, | |
250 | DMA_BIDIRECTIONAL); | |
bf0bf77f EC |
251 | |
252 | return err; | |
253 | } | |
a8ffe63e EC |
254 | |
255 | static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) | |
256 | { | |
a533ed5e SM |
257 | u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; |
258 | u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; | |
a8ffe63e EC |
259 | int err; |
260 | ||
a533ed5e SM |
261 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
262 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); | |
263 | MLX5_SET(manage_pages_in, in, function_id, func_id); | |
c4f287c4 | 264 | |
a533ed5e | 265 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
a8ffe63e | 266 | if (err) |
a533ed5e SM |
267 | mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", |
268 | func_id, err); | |
a8ffe63e EC |
269 | } |
270 | ||
e126ba97 EC |
271 | static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, |
272 | int notify_fail) | |
273 | { | |
a533ed5e SM |
274 | u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; |
275 | int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); | |
e126ba97 EC |
276 | u64 addr; |
277 | int err; | |
a533ed5e | 278 | u32 *in; |
e126ba97 EC |
279 | int i; |
280 | ||
a533ed5e | 281 | inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); |
1b9a07ee | 282 | in = kvzalloc(inlen, GFP_KERNEL); |
e126ba97 | 283 | if (!in) { |
a8ffe63e | 284 | err = -ENOMEM; |
e126ba97 | 285 | mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); |
a8ffe63e | 286 | goto out_free; |
e126ba97 | 287 | } |
e126ba97 EC |
288 | |
289 | for (i = 0; i < npages; i++) { | |
bf0bf77f EC |
290 | retry: |
291 | err = alloc_4k(dev, &addr); | |
e126ba97 | 292 | if (err) { |
bf0bf77f EC |
293 | if (err == -ENOMEM) |
294 | err = alloc_system_page(dev, func_id); | |
295 | if (err) | |
296 | goto out_4k; | |
297 | ||
298 | goto retry; | |
e126ba97 | 299 | } |
b8a4ddb2 | 300 | MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr); |
e126ba97 EC |
301 | } |
302 | ||
a533ed5e SM |
303 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
304 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); | |
305 | MLX5_SET(manage_pages_in, in, function_id, func_id); | |
306 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); | |
307 | ||
308 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | |
e126ba97 | 309 | if (err) { |
1a91de28 JP |
310 | mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", |
311 | func_id, npages, err); | |
a8ffe63e | 312 | goto out_4k; |
e126ba97 | 313 | } |
e126ba97 | 314 | |
fc50db98 EC |
315 | dev->priv.fw_pages += npages; |
316 | if (func_id) | |
317 | dev->priv.vfs_pages += npages; | |
318 | ||
e126ba97 EC |
319 | mlx5_core_dbg(dev, "err %d\n", err); |
320 | ||
a8ffe63e EC |
321 | kvfree(in); |
322 | return 0; | |
952f5f6e | 323 | |
bf0bf77f EC |
324 | out_4k: |
325 | for (i--; i >= 0; i--) | |
a533ed5e | 326 | free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); |
e126ba97 | 327 | out_free: |
479163f4 | 328 | kvfree(in); |
a8ffe63e EC |
329 | if (notify_fail) |
330 | page_notify_fail(dev, func_id); | |
e126ba97 EC |
331 | return err; |
332 | } | |
333 | ||
5adff6a0 | 334 | static int reclaim_pages_cmd(struct mlx5_core_dev *dev, |
a533ed5e | 335 | u32 *in, int in_size, u32 *out, int out_size) |
5adff6a0 DJ |
336 | { |
337 | struct fw_page *fwp; | |
338 | struct rb_node *p; | |
d62292e8 | 339 | u32 func_id; |
5adff6a0 DJ |
340 | u32 npages; |
341 | u32 i = 0; | |
342 | ||
c4f287c4 SM |
343 | if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) |
344 | return mlx5_cmd_exec(dev, in, in_size, out, out_size); | |
a533ed5e SM |
345 | |
346 | /* No hard feelings, we want our pages back! */ | |
347 | npages = MLX5_GET(manage_pages_in, in, input_num_entries); | |
d62292e8 | 348 | func_id = MLX5_GET(manage_pages_in, in, function_id); |
5adff6a0 DJ |
349 | |
350 | p = rb_first(&dev->priv.page_root); | |
351 | while (p && i < npages) { | |
352 | fwp = rb_entry(p, struct fw_page, rb_node); | |
5adff6a0 | 353 | p = rb_next(p); |
d62292e8 MHY |
354 | if (fwp->func_id != func_id) |
355 | continue; | |
356 | ||
b8a4ddb2 | 357 | MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); |
5adff6a0 DJ |
358 | i++; |
359 | } | |
360 | ||
a533ed5e | 361 | MLX5_SET(manage_pages_out, out, output_num_entries, i); |
5adff6a0 DJ |
362 | return 0; |
363 | } | |
364 | ||
e126ba97 EC |
365 | static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, |
366 | int *nclaimed) | |
367 | { | |
a533ed5e SM |
368 | int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); |
369 | u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; | |
e126ba97 | 370 | int num_claimed; |
a533ed5e | 371 | u32 *out; |
e126ba97 EC |
372 | int err; |
373 | int i; | |
374 | ||
dabed0e6 EC |
375 | if (nclaimed) |
376 | *nclaimed = 0; | |
377 | ||
a533ed5e | 378 | outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); |
1b9a07ee | 379 | out = kvzalloc(outlen, GFP_KERNEL); |
e126ba97 EC |
380 | if (!out) |
381 | return -ENOMEM; | |
382 | ||
a533ed5e SM |
383 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
384 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); | |
385 | MLX5_SET(manage_pages_in, in, function_id, func_id); | |
386 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); | |
387 | ||
e126ba97 | 388 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
a533ed5e | 389 | err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); |
e126ba97 | 390 | if (err) { |
5adff6a0 | 391 | mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); |
e126ba97 EC |
392 | goto out_free; |
393 | } | |
394 | ||
a533ed5e | 395 | num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries); |
fc50db98 EC |
396 | if (num_claimed > npages) { |
397 | mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n", | |
398 | num_claimed, npages); | |
399 | err = -EINVAL; | |
400 | goto out_free; | |
401 | } | |
e126ba97 | 402 | |
a533ed5e SM |
403 | for (i = 0; i < num_claimed; i++) |
404 | free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); | |
405 | ||
5adff6a0 DJ |
406 | if (nclaimed) |
407 | *nclaimed = num_claimed; | |
408 | ||
fc50db98 EC |
409 | dev->priv.fw_pages -= num_claimed; |
410 | if (func_id) | |
411 | dev->priv.vfs_pages -= num_claimed; | |
e126ba97 EC |
412 | |
413 | out_free: | |
479163f4 | 414 | kvfree(out); |
e126ba97 EC |
415 | return err; |
416 | } | |
417 | ||
418 | static void pages_work_handler(struct work_struct *work) | |
419 | { | |
420 | struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); | |
421 | struct mlx5_core_dev *dev = req->dev; | |
422 | int err = 0; | |
423 | ||
424 | if (req->npages < 0) | |
425 | err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); | |
426 | else if (req->npages > 0) | |
427 | err = give_pages(dev, req->func_id, req->npages, 1); | |
428 | ||
429 | if (err) | |
1a91de28 JP |
430 | mlx5_core_warn(dev, "%s fail %d\n", |
431 | req->npages < 0 ? "reclaim" : "give", err); | |
e126ba97 EC |
432 | |
433 | kfree(req); | |
434 | } | |
435 | ||
436 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |
0a324f31 | 437 | s32 npages) |
e126ba97 EC |
438 | { |
439 | struct mlx5_pages_req *req; | |
440 | ||
441 | req = kzalloc(sizeof(*req), GFP_ATOMIC); | |
442 | if (!req) { | |
443 | mlx5_core_warn(dev, "failed to allocate pages request\n"); | |
444 | return; | |
445 | } | |
446 | ||
447 | req->dev = dev; | |
448 | req->func_id = func_id; | |
449 | req->npages = npages; | |
450 | INIT_WORK(&req->work, pages_work_handler); | |
451 | queue_work(dev->priv.pg_wq, &req->work); | |
452 | } | |
453 | ||
cd23b14b | 454 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
e126ba97 | 455 | { |
e126ba97 | 456 | u16 uninitialized_var(func_id); |
0a324f31 | 457 | s32 uninitialized_var(npages); |
e126ba97 EC |
458 | int err; |
459 | ||
0a324f31 | 460 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
e126ba97 EC |
461 | if (err) |
462 | return err; | |
463 | ||
0a324f31 ML |
464 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", |
465 | npages, boot ? "boot" : "init", func_id); | |
e126ba97 | 466 | |
0a324f31 | 467 | return give_pages(dev, func_id, npages, 0); |
e126ba97 EC |
468 | } |
469 | ||
4e3d677b ML |
470 | enum { |
471 | MLX5_BLKS_FOR_RECLAIM_PAGES = 12 | |
472 | }; | |
473 | ||
e126ba97 EC |
474 | static int optimal_reclaimed_pages(void) |
475 | { | |
476 | struct mlx5_cmd_prot_block *block; | |
477 | struct mlx5_cmd_layout *lay; | |
478 | int ret; | |
479 | ||
4e3d677b | 480 | ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - |
a533ed5e SM |
481 | MLX5_ST_SZ_BYTES(manage_pages_out)) / |
482 | MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); | |
e126ba97 EC |
483 | |
484 | return ret; | |
485 | } | |
486 | ||
487 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) | |
488 | { | |
dabed0e6 | 489 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
e126ba97 EC |
490 | struct fw_page *fwp; |
491 | struct rb_node *p; | |
dabed0e6 | 492 | int nclaimed = 0; |
89d44f0a | 493 | int err = 0; |
e126ba97 EC |
494 | |
495 | do { | |
496 | p = rb_first(&dev->priv.page_root); | |
497 | if (p) { | |
498 | fwp = rb_entry(p, struct fw_page, rb_node); | |
5adff6a0 DJ |
499 | err = reclaim_pages(dev, fwp->func_id, |
500 | optimal_reclaimed_pages(), | |
501 | &nclaimed); | |
502 | ||
e126ba97 | 503 | if (err) { |
1a91de28 JP |
504 | mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", |
505 | err); | |
e126ba97 EC |
506 | return err; |
507 | } | |
dabed0e6 EC |
508 | if (nclaimed) |
509 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); | |
e126ba97 EC |
510 | } |
511 | if (time_after(jiffies, end)) { | |
512 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); | |
513 | break; | |
514 | } | |
515 | } while (p); | |
516 | ||
5adff6a0 DJ |
517 | WARN(dev->priv.fw_pages, |
518 | "FW pages counter is %d after reclaiming all pages\n", | |
519 | dev->priv.fw_pages); | |
520 | WARN(dev->priv.vfs_pages, | |
521 | "VFs FW pages counter is %d after reclaiming all pages\n", | |
522 | dev->priv.vfs_pages); | |
523 | ||
e126ba97 EC |
524 | return 0; |
525 | } | |
526 | ||
527 | void mlx5_pagealloc_init(struct mlx5_core_dev *dev) | |
528 | { | |
529 | dev->priv.page_root = RB_ROOT; | |
bf0bf77f | 530 | INIT_LIST_HEAD(&dev->priv.free_list); |
e126ba97 EC |
531 | } |
532 | ||
533 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) | |
534 | { | |
535 | /* nothing */ | |
536 | } | |
537 | ||
538 | int mlx5_pagealloc_start(struct mlx5_core_dev *dev) | |
539 | { | |
540 | dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); | |
541 | if (!dev->priv.pg_wq) | |
542 | return -ENOMEM; | |
543 | ||
544 | return 0; | |
545 | } | |
546 | ||
547 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) | |
548 | { | |
549 | destroy_workqueue(dev->priv.pg_wq); | |
550 | } | |
fc50db98 EC |
551 | |
552 | int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) | |
553 | { | |
554 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); | |
555 | int prev_vfs_pages = dev->priv.vfs_pages; | |
556 | ||
d62292e8 MHY |
557 | /* In case of internal error we will free the pages manually later */ |
558 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | |
559 | mlx5_core_warn(dev, "Skipping wait for vf pages stage"); | |
560 | return 0; | |
561 | } | |
562 | ||
fc50db98 EC |
563 | mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, |
564 | dev->priv.name); | |
565 | while (dev->priv.vfs_pages) { | |
566 | if (time_after(jiffies, end)) { | |
567 | mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); | |
568 | return -ETIMEDOUT; | |
569 | } | |
570 | if (dev->priv.vfs_pages < prev_vfs_pages) { | |
571 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); | |
572 | prev_vfs_pages = dev->priv.vfs_pages; | |
573 | } | |
574 | msleep(50); | |
575 | } | |
576 | ||
577 | mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); | |
578 | return 0; | |
579 | } |