]>
Commit | Line | Data |
---|---|---|
e126ba97 EC |
1 | /* |
2 | * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <asm-generic/kmap_types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/mlx5/driver.h> | |
37 | #include <linux/mlx5/cmd.h> | |
38 | #include "mlx5_core.h" | |
39 | ||
40 | enum { | |
41 | MLX5_PAGES_CANT_GIVE = 0, | |
42 | MLX5_PAGES_GIVE = 1, | |
43 | MLX5_PAGES_TAKE = 2 | |
44 | }; | |
45 | ||
46 | struct mlx5_pages_req { | |
47 | struct mlx5_core_dev *dev; | |
48 | u32 func_id; | |
49 | s16 npages; | |
50 | struct work_struct work; | |
51 | }; | |
52 | ||
53 | struct fw_page { | |
54 | struct rb_node rb_node; | |
55 | u64 addr; | |
56 | struct page *page; | |
57 | u16 func_id; | |
58 | }; | |
59 | ||
60 | struct mlx5_query_pages_inbox { | |
61 | struct mlx5_inbox_hdr hdr; | |
62 | u8 rsvd[8]; | |
63 | }; | |
64 | ||
65 | struct mlx5_query_pages_outbox { | |
66 | struct mlx5_outbox_hdr hdr; | |
67 | u8 reserved[2]; | |
68 | __be16 func_id; | |
69 | __be16 init_pages; | |
70 | __be16 num_pages; | |
71 | }; | |
72 | ||
73 | struct mlx5_manage_pages_inbox { | |
74 | struct mlx5_inbox_hdr hdr; | |
75 | __be16 rsvd0; | |
76 | __be16 func_id; | |
77 | __be16 rsvd1; | |
78 | __be16 num_entries; | |
79 | u8 rsvd2[16]; | |
80 | __be64 pas[0]; | |
81 | }; | |
82 | ||
83 | struct mlx5_manage_pages_outbox { | |
84 | struct mlx5_outbox_hdr hdr; | |
85 | u8 rsvd0[2]; | |
86 | __be16 num_entries; | |
87 | u8 rsvd1[20]; | |
88 | __be64 pas[0]; | |
89 | }; | |
90 | ||
91 | static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) | |
92 | { | |
93 | struct rb_root *root = &dev->priv.page_root; | |
94 | struct rb_node **new = &root->rb_node; | |
95 | struct rb_node *parent = NULL; | |
96 | struct fw_page *nfp; | |
97 | struct fw_page *tfp; | |
98 | ||
99 | while (*new) { | |
100 | parent = *new; | |
101 | tfp = rb_entry(parent, struct fw_page, rb_node); | |
102 | if (tfp->addr < addr) | |
103 | new = &parent->rb_left; | |
104 | else if (tfp->addr > addr) | |
105 | new = &parent->rb_right; | |
106 | else | |
107 | return -EEXIST; | |
108 | } | |
109 | ||
110 | nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); | |
111 | if (!nfp) | |
112 | return -ENOMEM; | |
113 | ||
114 | nfp->addr = addr; | |
115 | nfp->page = page; | |
116 | nfp->func_id = func_id; | |
117 | ||
118 | rb_link_node(&nfp->rb_node, parent, new); | |
119 | rb_insert_color(&nfp->rb_node, root); | |
120 | ||
121 | return 0; | |
122 | } | |
123 | ||
124 | static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | |
125 | { | |
126 | struct rb_root *root = &dev->priv.page_root; | |
127 | struct rb_node *tmp = root->rb_node; | |
128 | struct page *result = NULL; | |
129 | struct fw_page *tfp; | |
130 | ||
131 | while (tmp) { | |
132 | tfp = rb_entry(tmp, struct fw_page, rb_node); | |
133 | if (tfp->addr < addr) { | |
134 | tmp = tmp->rb_left; | |
135 | } else if (tfp->addr > addr) { | |
136 | tmp = tmp->rb_right; | |
137 | } else { | |
138 | rb_erase(&tfp->rb_node, root); | |
139 | result = tfp->page; | |
140 | kfree(tfp); | |
141 | break; | |
142 | } | |
143 | } | |
144 | ||
145 | return result; | |
146 | } | |
147 | ||
148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |
149 | s16 *pages, s16 *init_pages) | |
150 | { | |
151 | struct mlx5_query_pages_inbox in; | |
152 | struct mlx5_query_pages_outbox out; | |
153 | int err; | |
154 | ||
155 | memset(&in, 0, sizeof(in)); | |
156 | memset(&out, 0, sizeof(out)); | |
157 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | |
158 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | |
159 | if (err) | |
160 | return err; | |
161 | ||
162 | if (out.hdr.status) | |
163 | return mlx5_cmd_status_to_err(&out.hdr); | |
164 | ||
165 | if (pages) | |
166 | *pages = be16_to_cpu(out.num_pages); | |
167 | if (init_pages) | |
168 | *init_pages = be16_to_cpu(out.init_pages); | |
169 | *func_id = be16_to_cpu(out.func_id); | |
170 | ||
171 | return err; | |
172 | } | |
173 | ||
174 | static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, | |
175 | int notify_fail) | |
176 | { | |
177 | struct mlx5_manage_pages_inbox *in; | |
178 | struct mlx5_manage_pages_outbox out; | |
179 | struct page *page; | |
180 | int inlen; | |
181 | u64 addr; | |
182 | int err; | |
183 | int i; | |
184 | ||
185 | inlen = sizeof(*in) + npages * sizeof(in->pas[0]); | |
186 | in = mlx5_vzalloc(inlen); | |
187 | if (!in) { | |
188 | mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); | |
189 | return -ENOMEM; | |
190 | } | |
191 | memset(&out, 0, sizeof(out)); | |
192 | ||
193 | for (i = 0; i < npages; i++) { | |
194 | page = alloc_page(GFP_HIGHUSER); | |
195 | if (!page) { | |
196 | err = -ENOMEM; | |
197 | mlx5_core_warn(dev, "failed to allocate page\n"); | |
198 | goto out_alloc; | |
199 | } | |
200 | addr = dma_map_page(&dev->pdev->dev, page, 0, | |
201 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
202 | if (dma_mapping_error(&dev->pdev->dev, addr)) { | |
203 | mlx5_core_warn(dev, "failed dma mapping page\n"); | |
204 | __free_page(page); | |
205 | err = -ENOMEM; | |
206 | goto out_alloc; | |
207 | } | |
208 | err = insert_page(dev, addr, page, func_id); | |
209 | if (err) { | |
210 | mlx5_core_err(dev, "failed to track allocated page\n"); | |
211 | dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | |
212 | __free_page(page); | |
213 | err = -ENOMEM; | |
214 | goto out_alloc; | |
215 | } | |
216 | in->pas[i] = cpu_to_be64(addr); | |
217 | } | |
218 | ||
219 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | |
220 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | |
221 | in->func_id = cpu_to_be16(func_id); | |
222 | in->num_entries = cpu_to_be16(npages); | |
223 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | |
224 | mlx5_core_dbg(dev, "err %d\n", err); | |
225 | if (err) { | |
226 | mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); | |
227 | goto out_alloc; | |
228 | } | |
229 | dev->priv.fw_pages += npages; | |
230 | ||
231 | if (out.hdr.status) { | |
232 | err = mlx5_cmd_status_to_err(&out.hdr); | |
233 | if (err) { | |
234 | mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); | |
235 | goto out_alloc; | |
236 | } | |
237 | } | |
238 | ||
239 | mlx5_core_dbg(dev, "err %d\n", err); | |
240 | ||
241 | goto out_free; | |
242 | ||
243 | out_alloc: | |
244 | if (notify_fail) { | |
245 | memset(in, 0, inlen); | |
246 | memset(&out, 0, sizeof(out)); | |
247 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | |
248 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); | |
249 | if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out))) | |
250 | mlx5_core_warn(dev, "\n"); | |
251 | } | |
252 | for (i--; i >= 0; i--) { | |
253 | addr = be64_to_cpu(in->pas[i]); | |
254 | page = remove_page(dev, addr); | |
255 | if (!page) { | |
256 | mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n", | |
257 | addr); | |
258 | continue; | |
259 | } | |
260 | dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | |
261 | __free_page(page); | |
262 | } | |
263 | ||
264 | out_free: | |
265 | mlx5_vfree(in); | |
266 | return err; | |
267 | } | |
268 | ||
269 | static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |
270 | int *nclaimed) | |
271 | { | |
272 | struct mlx5_manage_pages_inbox in; | |
273 | struct mlx5_manage_pages_outbox *out; | |
274 | struct page *page; | |
275 | int num_claimed; | |
276 | int outlen; | |
277 | u64 addr; | |
278 | int err; | |
279 | int i; | |
280 | ||
281 | memset(&in, 0, sizeof(in)); | |
282 | outlen = sizeof(*out) + npages * sizeof(out->pas[0]); | |
283 | out = mlx5_vzalloc(outlen); | |
284 | if (!out) | |
285 | return -ENOMEM; | |
286 | ||
287 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | |
288 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | |
289 | in.func_id = cpu_to_be16(func_id); | |
290 | in.num_entries = cpu_to_be16(npages); | |
291 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); | |
292 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | |
293 | if (err) { | |
294 | mlx5_core_err(dev, "failed recliaming pages\n"); | |
295 | goto out_free; | |
296 | } | |
297 | dev->priv.fw_pages -= npages; | |
298 | ||
299 | if (out->hdr.status) { | |
300 | err = mlx5_cmd_status_to_err(&out->hdr); | |
301 | goto out_free; | |
302 | } | |
303 | ||
304 | num_claimed = be16_to_cpu(out->num_entries); | |
305 | if (nclaimed) | |
306 | *nclaimed = num_claimed; | |
307 | ||
308 | for (i = 0; i < num_claimed; i++) { | |
309 | addr = be64_to_cpu(out->pas[i]); | |
310 | page = remove_page(dev, addr); | |
311 | if (!page) { | |
312 | mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr); | |
313 | } else { | |
314 | dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | |
315 | __free_page(page); | |
316 | } | |
317 | } | |
318 | ||
319 | out_free: | |
320 | mlx5_vfree(out); | |
321 | return err; | |
322 | } | |
323 | ||
324 | static void pages_work_handler(struct work_struct *work) | |
325 | { | |
326 | struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); | |
327 | struct mlx5_core_dev *dev = req->dev; | |
328 | int err = 0; | |
329 | ||
330 | if (req->npages < 0) | |
331 | err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); | |
332 | else if (req->npages > 0) | |
333 | err = give_pages(dev, req->func_id, req->npages, 1); | |
334 | ||
335 | if (err) | |
336 | mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? | |
337 | "reclaim" : "give", err); | |
338 | ||
339 | kfree(req); | |
340 | } | |
341 | ||
342 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |
343 | s16 npages) | |
344 | { | |
345 | struct mlx5_pages_req *req; | |
346 | ||
347 | req = kzalloc(sizeof(*req), GFP_ATOMIC); | |
348 | if (!req) { | |
349 | mlx5_core_warn(dev, "failed to allocate pages request\n"); | |
350 | return; | |
351 | } | |
352 | ||
353 | req->dev = dev; | |
354 | req->func_id = func_id; | |
355 | req->npages = npages; | |
356 | INIT_WORK(&req->work, pages_work_handler); | |
357 | queue_work(dev->priv.pg_wq, &req->work); | |
358 | } | |
359 | ||
360 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) | |
361 | { | |
362 | s16 uninitialized_var(init_pages); | |
363 | u16 uninitialized_var(func_id); | |
364 | int err; | |
365 | ||
366 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); | |
367 | if (err) | |
368 | return err; | |
369 | ||
370 | mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); | |
371 | ||
372 | return give_pages(dev, func_id, init_pages, 0); | |
373 | } | |
374 | ||
375 | static int optimal_reclaimed_pages(void) | |
376 | { | |
377 | struct mlx5_cmd_prot_block *block; | |
378 | struct mlx5_cmd_layout *lay; | |
379 | int ret; | |
380 | ||
381 | ret = (sizeof(lay->in) + sizeof(block->data) - | |
382 | sizeof(struct mlx5_manage_pages_outbox)) / 8; | |
383 | ||
384 | return ret; | |
385 | } | |
386 | ||
387 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) | |
388 | { | |
389 | unsigned long end = jiffies + msecs_to_jiffies(5000); | |
390 | struct fw_page *fwp; | |
391 | struct rb_node *p; | |
392 | int err; | |
393 | ||
394 | do { | |
395 | p = rb_first(&dev->priv.page_root); | |
396 | if (p) { | |
397 | fwp = rb_entry(p, struct fw_page, rb_node); | |
398 | err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); | |
399 | if (err) { | |
400 | mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); | |
401 | return err; | |
402 | } | |
403 | } | |
404 | if (time_after(jiffies, end)) { | |
405 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); | |
406 | break; | |
407 | } | |
408 | } while (p); | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
413 | void mlx5_pagealloc_init(struct mlx5_core_dev *dev) | |
414 | { | |
415 | dev->priv.page_root = RB_ROOT; | |
416 | } | |
417 | ||
418 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) | |
419 | { | |
420 | /* nothing */ | |
421 | } | |
422 | ||
423 | int mlx5_pagealloc_start(struct mlx5_core_dev *dev) | |
424 | { | |
425 | dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); | |
426 | if (!dev->priv.pg_wq) | |
427 | return -ENOMEM; | |
428 | ||
429 | return 0; | |
430 | } | |
431 | ||
432 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) | |
433 | { | |
434 | destroy_workqueue(dev->priv.pg_wq); | |
435 | } |