]>
Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
302bdf68 | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
adec640e | 33 | #include <linux/highmem.h> |
e126ba97 EC |
34 | #include <linux/kernel.h> |
35 | #include <linux/module.h> | |
fc50db98 | 36 | #include <linux/delay.h> |
e126ba97 | 37 | #include <linux/mlx5/driver.h> |
e126ba97 | 38 | #include "mlx5_core.h" |
0cf53c12 | 39 | #include "lib/eq.h" |
e126ba97 EC |
40 | |
41 | enum { | |
42 | MLX5_PAGES_CANT_GIVE = 0, | |
43 | MLX5_PAGES_GIVE = 1, | |
44 | MLX5_PAGES_TAKE = 2 | |
45 | }; | |
46 | ||
47 | struct mlx5_pages_req { | |
48 | struct mlx5_core_dev *dev; | |
f241e749 | 49 | u16 func_id; |
591905ba | 50 | u8 ec_function; |
0a324f31 | 51 | s32 npages; |
e126ba97 | 52 | struct work_struct work; |
c6168161 | 53 | u8 release_all; |
e126ba97 EC |
54 | }; |
55 | ||
56 | struct fw_page { | |
bf0bf77f EC |
57 | struct rb_node rb_node; |
58 | u64 addr; | |
59 | struct page *page; | |
60 | u16 func_id; | |
61 | unsigned long bitmask; | |
62 | struct list_head list; | |
63 | unsigned free_count; | |
e126ba97 EC |
64 | }; |
65 | ||
dabed0e6 EC |
66 | enum { |
67 | MAX_RECLAIM_TIME_MSECS = 5000, | |
fc50db98 | 68 | MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60, |
dabed0e6 EC |
69 | }; |
70 | ||
bf0bf77f EC |
71 | enum { |
72 | MLX5_MAX_RECLAIM_TIME_MILI = 5000, | |
05bdb2ab | 73 | MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, |
bf0bf77f EC |
74 | }; |
75 | ||
e126ba97 EC |
76 | static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) |
77 | { | |
78 | struct rb_root *root = &dev->priv.page_root; | |
79 | struct rb_node **new = &root->rb_node; | |
80 | struct rb_node *parent = NULL; | |
81 | struct fw_page *nfp; | |
82 | struct fw_page *tfp; | |
bf0bf77f | 83 | int i; |
e126ba97 EC |
84 | |
85 | while (*new) { | |
86 | parent = *new; | |
87 | tfp = rb_entry(parent, struct fw_page, rb_node); | |
88 | if (tfp->addr < addr) | |
89 | new = &parent->rb_left; | |
90 | else if (tfp->addr > addr) | |
91 | new = &parent->rb_right; | |
92 | else | |
93 | return -EEXIST; | |
94 | } | |
95 | ||
bf0bf77f | 96 | nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); |
e126ba97 EC |
97 | if (!nfp) |
98 | return -ENOMEM; | |
99 | ||
100 | nfp->addr = addr; | |
101 | nfp->page = page; | |
102 | nfp->func_id = func_id; | |
bf0bf77f EC |
103 | nfp->free_count = MLX5_NUM_4K_IN_PAGE; |
104 | for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) | |
105 | set_bit(i, &nfp->bitmask); | |
e126ba97 EC |
106 | |
107 | rb_link_node(&nfp->rb_node, parent, new); | |
108 | rb_insert_color(&nfp->rb_node, root); | |
bf0bf77f | 109 | list_add(&nfp->list, &dev->priv.free_list); |
e126ba97 EC |
110 | |
111 | return 0; | |
112 | } | |
113 | ||
bf0bf77f | 114 | static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) |
e126ba97 EC |
115 | { |
116 | struct rb_root *root = &dev->priv.page_root; | |
117 | struct rb_node *tmp = root->rb_node; | |
bf0bf77f | 118 | struct fw_page *result = NULL; |
e126ba97 EC |
119 | struct fw_page *tfp; |
120 | ||
121 | while (tmp) { | |
122 | tfp = rb_entry(tmp, struct fw_page, rb_node); | |
123 | if (tfp->addr < addr) { | |
124 | tmp = tmp->rb_left; | |
125 | } else if (tfp->addr > addr) { | |
126 | tmp = tmp->rb_right; | |
127 | } else { | |
bf0bf77f | 128 | result = tfp; |
e126ba97 EC |
129 | break; |
130 | } | |
131 | } | |
132 | ||
133 | return result; | |
134 | } | |
135 | ||
136 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |
0a324f31 | 137 | s32 *npages, int boot) |
e126ba97 | 138 | { |
86d41641 LR |
139 | u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {}; |
140 | u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {}; | |
e126ba97 EC |
141 | int err; |
142 | ||
a533ed5e SM |
143 | MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); |
144 | MLX5_SET(query_pages_in, in, op_mod, boot ? | |
145 | MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : | |
146 | MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); | |
591905ba | 147 | MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev)); |
0a324f31 | 148 | |
86d41641 | 149 | err = mlx5_cmd_exec_inout(dev, query_pages, in, out); |
e126ba97 EC |
150 | if (err) |
151 | return err; | |
152 | ||
a533ed5e SM |
153 | *npages = MLX5_GET(query_pages_out, out, num_pages); |
154 | *func_id = MLX5_GET(query_pages_out, out, function_id); | |
e126ba97 EC |
155 | |
156 | return err; | |
157 | } | |
158 | ||
2726cd4a | 159 | static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) |
bf0bf77f | 160 | { |
2726cd4a EBE |
161 | struct fw_page *fp = NULL; |
162 | struct fw_page *iter; | |
bf0bf77f EC |
163 | unsigned n; |
164 | ||
2726cd4a EBE |
165 | list_for_each_entry(iter, &dev->priv.free_list, list) { |
166 | if (iter->func_id != func_id) | |
167 | continue; | |
168 | fp = iter; | |
169 | } | |
170 | ||
171 | if (list_empty(&dev->priv.free_list) || !fp) | |
bf0bf77f | 172 | return -ENOMEM; |
bf0bf77f | 173 | |
bf0bf77f EC |
174 | n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); |
175 | if (n >= MLX5_NUM_4K_IN_PAGE) { | |
176 | mlx5_core_warn(dev, "alloc 4k bug\n"); | |
177 | return -ENOENT; | |
178 | } | |
179 | clear_bit(n, &fp->bitmask); | |
180 | fp->free_count--; | |
181 | if (!fp->free_count) | |
182 | list_del(&fp->list); | |
183 | ||
05bdb2ab | 184 | *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; |
bf0bf77f EC |
185 | |
186 | return 0; | |
187 | } | |
188 | ||
59d2d18c HL |
189 | #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) |
190 | ||
e7f860e2 EBE |
191 | static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, |
192 | bool in_free_list) | |
bf0bf77f | 193 | { |
e7f860e2 EBE |
194 | rb_erase(&fwp->rb_node, &dev->priv.page_root); |
195 | if (in_free_list) | |
196 | list_del(&fwp->list); | |
197 | dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK, | |
198 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
199 | __free_page(fwp->page); | |
200 | kfree(fwp); | |
bf0bf77f EC |
201 | } |
202 | ||
e7f860e2 | 203 | static void free_4k(struct mlx5_core_dev *dev, u64 addr) |
c655c1f4 EBE |
204 | { |
205 | struct fw_page *fwp; | |
e7f860e2 | 206 | int n; |
c655c1f4 EBE |
207 | |
208 | fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); | |
209 | if (!fwp) { | |
c7636942 | 210 | mlx5_core_warn_rl(dev, "page not found\n"); |
c655c1f4 EBE |
211 | return; |
212 | } | |
e7f860e2 EBE |
213 | n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; |
214 | fwp->free_count++; | |
215 | set_bit(n, &fwp->bitmask); | |
216 | if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) | |
217 | free_fwp(dev, fwp, fwp->free_count != 1); | |
218 | else if (fwp->free_count == 1) | |
219 | list_add(&fwp->list, &dev->priv.free_list); | |
c655c1f4 EBE |
220 | } |
221 | ||
bf0bf77f EC |
222 | static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) |
223 | { | |
c42260f1 VP |
224 | struct device *device = dev->device; |
225 | int nid = dev_to_node(device); | |
bf0bf77f | 226 | struct page *page; |
6b276190 | 227 | u64 zero_addr = 1; |
bf0bf77f EC |
228 | u64 addr; |
229 | int err; | |
230 | ||
ad189106 | 231 | page = alloc_pages_node(nid, GFP_HIGHUSER, 0); |
bf0bf77f EC |
232 | if (!page) { |
233 | mlx5_core_warn(dev, "failed to allocate page\n"); | |
234 | return -ENOMEM; | |
235 | } | |
6b276190 | 236 | map: |
c42260f1 VP |
237 | addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
238 | if (dma_mapping_error(device, addr)) { | |
bf0bf77f EC |
239 | mlx5_core_warn(dev, "failed dma mapping page\n"); |
240 | err = -ENOMEM; | |
6b276190 | 241 | goto err_mapping; |
bf0bf77f | 242 | } |
6b276190 NO |
243 | |
244 | /* Firmware doesn't support page with physical address 0 */ | |
245 | if (addr == 0) { | |
246 | zero_addr = addr; | |
247 | goto map; | |
248 | } | |
249 | ||
bf0bf77f EC |
250 | err = insert_page(dev, addr, page, func_id); |
251 | if (err) { | |
252 | mlx5_core_err(dev, "failed to track allocated page\n"); | |
c42260f1 | 253 | dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); |
bf0bf77f EC |
254 | } |
255 | ||
6b276190 NO |
256 | err_mapping: |
257 | if (err) | |
258 | __free_page(page); | |
bf0bf77f | 259 | |
6b276190 | 260 | if (zero_addr == 0) |
c42260f1 | 261 | dma_unmap_page(device, zero_addr, PAGE_SIZE, |
6b276190 | 262 | DMA_BIDIRECTIONAL); |
bf0bf77f EC |
263 | |
264 | return err; | |
265 | } | |
a8ffe63e | 266 | |
591905ba BW |
267 | static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, |
268 | bool ec_function) | |
a8ffe63e | 269 | { |
86d41641 | 270 | u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; |
a8ffe63e EC |
271 | int err; |
272 | ||
a533ed5e SM |
273 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
274 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); | |
275 | MLX5_SET(manage_pages_in, in, function_id, func_id); | |
591905ba | 276 | MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
c4f287c4 | 277 | |
86d41641 | 278 | err = mlx5_cmd_exec_in(dev, manage_pages, in); |
a8ffe63e | 279 | if (err) |
a533ed5e SM |
280 | mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", |
281 | func_id, err); | |
a8ffe63e EC |
282 | } |
283 | ||
e126ba97 | 284 | static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, |
591905ba | 285 | int notify_fail, bool ec_function) |
e126ba97 | 286 | { |
a533ed5e SM |
287 | u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; |
288 | int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); | |
e126ba97 EC |
289 | u64 addr; |
290 | int err; | |
a533ed5e | 291 | u32 *in; |
e126ba97 EC |
292 | int i; |
293 | ||
a533ed5e | 294 | inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); |
1b9a07ee | 295 | in = kvzalloc(inlen, GFP_KERNEL); |
e126ba97 | 296 | if (!in) { |
a8ffe63e | 297 | err = -ENOMEM; |
e126ba97 | 298 | mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); |
a8ffe63e | 299 | goto out_free; |
e126ba97 | 300 | } |
e126ba97 EC |
301 | |
302 | for (i = 0; i < npages; i++) { | |
bf0bf77f | 303 | retry: |
2726cd4a | 304 | err = alloc_4k(dev, &addr, func_id); |
e126ba97 | 305 | if (err) { |
bf0bf77f EC |
306 | if (err == -ENOMEM) |
307 | err = alloc_system_page(dev, func_id); | |
308 | if (err) | |
309 | goto out_4k; | |
310 | ||
311 | goto retry; | |
e126ba97 | 312 | } |
b8a4ddb2 | 313 | MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr); |
e126ba97 EC |
314 | } |
315 | ||
a533ed5e SM |
316 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
317 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); | |
318 | MLX5_SET(manage_pages_in, in, function_id, func_id); | |
319 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); | |
591905ba | 320 | MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
a533ed5e SM |
321 | |
322 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | |
e126ba97 | 323 | if (err) { |
1a91de28 JP |
324 | mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", |
325 | func_id, npages, err); | |
a8ffe63e | 326 | goto out_4k; |
e126ba97 | 327 | } |
e126ba97 | 328 | |
fc50db98 EC |
329 | dev->priv.fw_pages += npages; |
330 | if (func_id) | |
331 | dev->priv.vfs_pages += npages; | |
591905ba BW |
332 | else if (mlx5_core_is_ecpf(dev) && !ec_function) |
333 | dev->priv.peer_pf_pages += npages; | |
fc50db98 | 334 | |
591905ba BW |
335 | mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", |
336 | npages, ec_function, func_id, err); | |
e126ba97 | 337 | |
a8ffe63e EC |
338 | kvfree(in); |
339 | return 0; | |
952f5f6e | 340 | |
bf0bf77f EC |
341 | out_4k: |
342 | for (i--; i >= 0; i--) | |
e7f860e2 | 343 | free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); |
e126ba97 | 344 | out_free: |
479163f4 | 345 | kvfree(in); |
a8ffe63e | 346 | if (notify_fail) |
591905ba | 347 | page_notify_fail(dev, func_id, ec_function); |
e126ba97 EC |
348 | return err; |
349 | } | |
350 | ||
c6168161 EBE |
351 | static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, |
352 | bool ec_function) | |
353 | { | |
354 | struct rb_node *p; | |
355 | int npages = 0; | |
356 | ||
357 | p = rb_first(&dev->priv.page_root); | |
358 | while (p) { | |
359 | struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); | |
360 | ||
361 | p = rb_next(p); | |
362 | if (fwp->func_id != func_id) | |
363 | continue; | |
e7f860e2 EBE |
364 | npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); |
365 | free_fwp(dev, fwp, fwp->free_count); | |
c6168161 EBE |
366 | } |
367 | ||
368 | dev->priv.fw_pages -= npages; | |
369 | if (func_id) | |
370 | dev->priv.vfs_pages -= npages; | |
371 | else if (mlx5_core_is_ecpf(dev) && !ec_function) | |
372 | dev->priv.peer_pf_pages -= npages; | |
373 | ||
374 | mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", | |
375 | npages, ec_function, func_id); | |
376 | } | |
377 | ||
5adff6a0 | 378 | static int reclaim_pages_cmd(struct mlx5_core_dev *dev, |
a533ed5e | 379 | u32 *in, int in_size, u32 *out, int out_size) |
5adff6a0 DJ |
380 | { |
381 | struct fw_page *fwp; | |
382 | struct rb_node *p; | |
d62292e8 | 383 | u32 func_id; |
5adff6a0 DJ |
384 | u32 npages; |
385 | u32 i = 0; | |
386 | ||
c4f287c4 SM |
387 | if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) |
388 | return mlx5_cmd_exec(dev, in, in_size, out, out_size); | |
a533ed5e SM |
389 | |
390 | /* No hard feelings, we want our pages back! */ | |
391 | npages = MLX5_GET(manage_pages_in, in, input_num_entries); | |
d62292e8 | 392 | func_id = MLX5_GET(manage_pages_in, in, function_id); |
5adff6a0 DJ |
393 | |
394 | p = rb_first(&dev->priv.page_root); | |
395 | while (p && i < npages) { | |
396 | fwp = rb_entry(p, struct fw_page, rb_node); | |
5adff6a0 | 397 | p = rb_next(p); |
d62292e8 MHY |
398 | if (fwp->func_id != func_id) |
399 | continue; | |
400 | ||
b8a4ddb2 | 401 | MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); |
5adff6a0 DJ |
402 | i++; |
403 | } | |
404 | ||
a533ed5e | 405 | MLX5_SET(manage_pages_out, out, output_num_entries, i); |
5adff6a0 DJ |
406 | return 0; |
407 | } | |
408 | ||
e126ba97 | 409 | static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, |
591905ba | 410 | int *nclaimed, bool ec_function) |
e126ba97 | 411 | { |
a533ed5e | 412 | int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); |
86d41641 | 413 | u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; |
e126ba97 | 414 | int num_claimed; |
a533ed5e | 415 | u32 *out; |
e126ba97 EC |
416 | int err; |
417 | int i; | |
418 | ||
dabed0e6 EC |
419 | if (nclaimed) |
420 | *nclaimed = 0; | |
421 | ||
a533ed5e | 422 | outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); |
1b9a07ee | 423 | out = kvzalloc(outlen, GFP_KERNEL); |
e126ba97 EC |
424 | if (!out) |
425 | return -ENOMEM; | |
426 | ||
a533ed5e SM |
427 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
428 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); | |
429 | MLX5_SET(manage_pages_in, in, function_id, func_id); | |
430 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); | |
591905ba | 431 | MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
a533ed5e | 432 | |
e126ba97 | 433 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
a533ed5e | 434 | err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); |
e126ba97 | 435 | if (err) { |
5adff6a0 | 436 | mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); |
e126ba97 EC |
437 | goto out_free; |
438 | } | |
439 | ||
a533ed5e | 440 | num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries); |
fc50db98 EC |
441 | if (num_claimed > npages) { |
442 | mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n", | |
443 | num_claimed, npages); | |
444 | err = -EINVAL; | |
445 | goto out_free; | |
446 | } | |
e126ba97 | 447 | |
a533ed5e | 448 | for (i = 0; i < num_claimed; i++) |
e7f860e2 | 449 | free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); |
a533ed5e | 450 | |
5adff6a0 DJ |
451 | if (nclaimed) |
452 | *nclaimed = num_claimed; | |
453 | ||
fc50db98 EC |
454 | dev->priv.fw_pages -= num_claimed; |
455 | if (func_id) | |
456 | dev->priv.vfs_pages -= num_claimed; | |
591905ba BW |
457 | else if (mlx5_core_is_ecpf(dev) && !ec_function) |
458 | dev->priv.peer_pf_pages -= num_claimed; | |
e126ba97 EC |
459 | |
460 | out_free: | |
479163f4 | 461 | kvfree(out); |
e126ba97 EC |
462 | return err; |
463 | } | |
464 | ||
465 | static void pages_work_handler(struct work_struct *work) | |
466 | { | |
467 | struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); | |
468 | struct mlx5_core_dev *dev = req->dev; | |
469 | int err = 0; | |
470 | ||
c6168161 EBE |
471 | if (req->release_all) |
472 | release_all_pages(dev, req->func_id, req->ec_function); | |
473 | else if (req->npages < 0) | |
591905ba BW |
474 | err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, |
475 | req->ec_function); | |
e126ba97 | 476 | else if (req->npages > 0) |
591905ba | 477 | err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); |
e126ba97 EC |
478 | |
479 | if (err) | |
1a91de28 JP |
480 | mlx5_core_warn(dev, "%s fail %d\n", |
481 | req->npages < 0 ? "reclaim" : "give", err); | |
e126ba97 EC |
482 | |
483 | kfree(req); | |
484 | } | |
485 | ||
591905ba BW |
486 | enum { |
487 | EC_FUNCTION_MASK = 0x8000, | |
c6168161 | 488 | RELEASE_ALL_PAGES_MASK = 0x4000, |
591905ba BW |
489 | }; |
490 | ||
0cf53c12 SM |
491 | static int req_pages_handler(struct notifier_block *nb, |
492 | unsigned long type, void *data) | |
e126ba97 EC |
493 | { |
494 | struct mlx5_pages_req *req; | |
0cf53c12 SM |
495 | struct mlx5_core_dev *dev; |
496 | struct mlx5_priv *priv; | |
497 | struct mlx5_eqe *eqe; | |
591905ba | 498 | bool ec_function; |
c6168161 | 499 | bool release_all; |
0cf53c12 SM |
500 | u16 func_id; |
501 | s32 npages; | |
502 | ||
503 | priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb); | |
504 | dev = container_of(priv, struct mlx5_core_dev, priv); | |
505 | eqe = data; | |
506 | ||
507 | func_id = be16_to_cpu(eqe->data.req_pages.func_id); | |
508 | npages = be32_to_cpu(eqe->data.req_pages.num_pages); | |
591905ba | 509 | ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK; |
c6168161 EBE |
510 | release_all = be16_to_cpu(eqe->data.req_pages.ec_function) & |
511 | RELEASE_ALL_PAGES_MASK; | |
512 | mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n", | |
513 | func_id, npages, release_all); | |
e126ba97 EC |
514 | req = kzalloc(sizeof(*req), GFP_ATOMIC); |
515 | if (!req) { | |
516 | mlx5_core_warn(dev, "failed to allocate pages request\n"); | |
0cf53c12 | 517 | return NOTIFY_DONE; |
e126ba97 EC |
518 | } |
519 | ||
520 | req->dev = dev; | |
521 | req->func_id = func_id; | |
522 | req->npages = npages; | |
591905ba | 523 | req->ec_function = ec_function; |
c6168161 | 524 | req->release_all = release_all; |
e126ba97 EC |
525 | INIT_WORK(&req->work, pages_work_handler); |
526 | queue_work(dev->priv.pg_wq, &req->work); | |
0cf53c12 | 527 | return NOTIFY_OK; |
e126ba97 EC |
528 | } |
529 | ||
cd23b14b | 530 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
e126ba97 | 531 | { |
3f649ab7 KC |
532 | u16 func_id; |
533 | s32 npages; | |
e126ba97 EC |
534 | int err; |
535 | ||
0a324f31 | 536 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
e126ba97 EC |
537 | if (err) |
538 | return err; | |
539 | ||
0a324f31 ML |
540 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", |
541 | npages, boot ? "boot" : "init", func_id); | |
e126ba97 | 542 | |
591905ba | 543 | return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); |
e126ba97 EC |
544 | } |
545 | ||
4e3d677b ML |
546 | enum { |
547 | MLX5_BLKS_FOR_RECLAIM_PAGES = 12 | |
548 | }; | |
549 | ||
e126ba97 EC |
550 | static int optimal_reclaimed_pages(void) |
551 | { | |
552 | struct mlx5_cmd_prot_block *block; | |
553 | struct mlx5_cmd_layout *lay; | |
554 | int ret; | |
555 | ||
4e3d677b | 556 | ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - |
a533ed5e SM |
557 | MLX5_ST_SZ_BYTES(manage_pages_out)) / |
558 | MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); | |
e126ba97 EC |
559 | |
560 | return ret; | |
561 | } | |
562 | ||
563 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) | |
564 | { | |
dabed0e6 | 565 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
e126ba97 EC |
566 | struct fw_page *fwp; |
567 | struct rb_node *p; | |
dabed0e6 | 568 | int nclaimed = 0; |
89d44f0a | 569 | int err = 0; |
e126ba97 EC |
570 | |
571 | do { | |
572 | p = rb_first(&dev->priv.page_root); | |
573 | if (p) { | |
574 | fwp = rb_entry(p, struct fw_page, rb_node); | |
5adff6a0 DJ |
575 | err = reclaim_pages(dev, fwp->func_id, |
576 | optimal_reclaimed_pages(), | |
591905ba | 577 | &nclaimed, mlx5_core_is_ecpf(dev)); |
5adff6a0 | 578 | |
e126ba97 | 579 | if (err) { |
1a91de28 JP |
580 | mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", |
581 | err); | |
e126ba97 EC |
582 | return err; |
583 | } | |
dabed0e6 EC |
584 | if (nclaimed) |
585 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); | |
e126ba97 EC |
586 | } |
587 | if (time_after(jiffies, end)) { | |
588 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); | |
589 | break; | |
590 | } | |
591 | } while (p); | |
592 | ||
5adff6a0 DJ |
593 | WARN(dev->priv.fw_pages, |
594 | "FW pages counter is %d after reclaiming all pages\n", | |
595 | dev->priv.fw_pages); | |
596 | WARN(dev->priv.vfs_pages, | |
597 | "VFs FW pages counter is %d after reclaiming all pages\n", | |
598 | dev->priv.vfs_pages); | |
591905ba BW |
599 | WARN(dev->priv.peer_pf_pages, |
600 | "Peer PF FW pages counter is %d after reclaiming all pages\n", | |
601 | dev->priv.peer_pf_pages); | |
5adff6a0 | 602 | |
e126ba97 EC |
603 | return 0; |
604 | } | |
605 | ||
0cf53c12 | 606 | int mlx5_pagealloc_init(struct mlx5_core_dev *dev) |
e126ba97 EC |
607 | { |
608 | dev->priv.page_root = RB_ROOT; | |
bf0bf77f | 609 | INIT_LIST_HEAD(&dev->priv.free_list); |
0cf53c12 SM |
610 | dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); |
611 | if (!dev->priv.pg_wq) | |
612 | return -ENOMEM; | |
613 | ||
614 | return 0; | |
e126ba97 EC |
615 | } |
616 | ||
617 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) | |
618 | { | |
0cf53c12 | 619 | destroy_workqueue(dev->priv.pg_wq); |
e126ba97 EC |
620 | } |
621 | ||
0cf53c12 | 622 | void mlx5_pagealloc_start(struct mlx5_core_dev *dev) |
e126ba97 | 623 | { |
0cf53c12 SM |
624 | MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST); |
625 | mlx5_eq_notifier_register(dev, &dev->priv.pg_nb); | |
e126ba97 EC |
626 | } |
627 | ||
628 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) | |
629 | { | |
0cf53c12 SM |
630 | mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb); |
631 | flush_workqueue(dev->priv.pg_wq); | |
e126ba97 | 632 | } |
fc50db98 | 633 | |
591905ba | 634 | int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) |
fc50db98 EC |
635 | { |
636 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); | |
591905ba | 637 | int prev_pages = *pages; |
fc50db98 | 638 | |
d62292e8 MHY |
639 | /* In case of internal error we will free the pages manually later */ |
640 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | |
641 | mlx5_core_warn(dev, "Skipping wait for vf pages stage"); | |
642 | return 0; | |
643 | } | |
644 | ||
27b942fb | 645 | mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages); |
591905ba | 646 | while (*pages) { |
fc50db98 | 647 | if (time_after(jiffies, end)) { |
591905ba | 648 | mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); |
fc50db98 EC |
649 | return -ETIMEDOUT; |
650 | } | |
591905ba | 651 | if (*pages < prev_pages) { |
fc50db98 | 652 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); |
591905ba | 653 | prev_pages = *pages; |
fc50db98 EC |
654 | } |
655 | msleep(50); | |
656 | } | |
657 | ||
27b942fb | 658 | mlx5_core_dbg(dev, "All pages received\n"); |
fc50db98 EC |
659 | return 0; |
660 | } |