]>
Commit | Line | Data |
---|---|---|
08b48a1e | 1 | /* |
b7ff8b10 | 2 | * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. |
08b48a1e AG |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
764f2dd9 | 35 | #include <linux/rculist.h> |
1bc144b6 | 36 | #include <linux/llist.h> |
08b48a1e | 37 | |
0cb43965 | 38 | #include "rds_single_path.h" |
f6df683f | 39 | #include "ib_mr.h" |
2eafa174 | 40 | #include "rds.h" |
f6df683f | 41 | |
42 | struct workqueue_struct *rds_ib_mr_wq; | |
2eafa174 HWR |
43 | |
44 | static void rds_ib_odp_mr_worker(struct work_struct *work); | |
08b48a1e | 45 | |
08b48a1e AG |
46 | static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) |
47 | { | |
48 | struct rds_ib_device *rds_ibdev; | |
49 | struct rds_ib_ipaddr *i_ipaddr; | |
50 | ||
ea819867 ZB |
51 | rcu_read_lock(); |
52 | list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) { | |
764f2dd9 | 53 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
08b48a1e | 54 | if (i_ipaddr->ipaddr == ipaddr) { |
50d61ff7 | 55 | refcount_inc(&rds_ibdev->refcount); |
764f2dd9 | 56 | rcu_read_unlock(); |
08b48a1e AG |
57 | return rds_ibdev; |
58 | } | |
59 | } | |
08b48a1e | 60 | } |
ea819867 | 61 | rcu_read_unlock(); |
08b48a1e AG |
62 | |
63 | return NULL; | |
64 | } | |
65 | ||
66 | static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
67 | { | |
68 | struct rds_ib_ipaddr *i_ipaddr; | |
69 | ||
70 | i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); | |
71 | if (!i_ipaddr) | |
72 | return -ENOMEM; | |
73 | ||
74 | i_ipaddr->ipaddr = ipaddr; | |
75 | ||
76 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 77 | list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list); |
08b48a1e AG |
78 | spin_unlock_irq(&rds_ibdev->spinlock); |
79 | ||
80 | return 0; | |
81 | } | |
82 | ||
83 | static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
84 | { | |
4a81802b | 85 | struct rds_ib_ipaddr *i_ipaddr; |
764f2dd9 CM |
86 | struct rds_ib_ipaddr *to_free = NULL; |
87 | ||
08b48a1e AG |
88 | |
89 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 90 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
08b48a1e | 91 | if (i_ipaddr->ipaddr == ipaddr) { |
764f2dd9 CM |
92 | list_del_rcu(&i_ipaddr->list); |
93 | to_free = i_ipaddr; | |
08b48a1e AG |
94 | break; |
95 | } | |
96 | } | |
97 | spin_unlock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 98 | |
59fe4606 SS |
99 | if (to_free) |
100 | kfree_rcu(to_free, rcu); | |
08b48a1e AG |
101 | } |
102 | ||
eee2fa6a KCP |
103 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, |
104 | struct in6_addr *ipaddr) | |
08b48a1e AG |
105 | { |
106 | struct rds_ib_device *rds_ibdev_old; | |
107 | ||
eee2fa6a | 108 | rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]); |
e1f475a7 | 109 | if (!rds_ibdev_old) |
eee2fa6a | 110 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]); |
e1f475a7 | 111 | |
112 | if (rds_ibdev_old != rds_ibdev) { | |
eee2fa6a | 113 | rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]); |
3e0249f9 | 114 | rds_ib_dev_put(rds_ibdev_old); |
eee2fa6a | 115 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]); |
3e0249f9 | 116 | } |
e1f475a7 | 117 | rds_ib_dev_put(rds_ibdev_old); |
08b48a1e | 118 | |
e1f475a7 | 119 | return 0; |
08b48a1e AG |
120 | } |
121 | ||
745cbcca | 122 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e AG |
123 | { |
124 | struct rds_ib_connection *ic = conn->c_transport_data; | |
125 | ||
126 | /* conn was previously on the nodev_conns_list */ | |
127 | spin_lock_irq(&ib_nodev_conns_lock); | |
128 | BUG_ON(list_empty(&ib_nodev_conns)); | |
129 | BUG_ON(list_empty(&ic->ib_node)); | |
130 | list_del(&ic->ib_node); | |
08b48a1e | 131 | |
aef3ea33 | 132 | spin_lock(&rds_ibdev->spinlock); |
08b48a1e | 133 | list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); |
aef3ea33 | 134 | spin_unlock(&rds_ibdev->spinlock); |
745cbcca | 135 | spin_unlock_irq(&ib_nodev_conns_lock); |
08b48a1e AG |
136 | |
137 | ic->rds_ibdev = rds_ibdev; | |
50d61ff7 | 138 | refcount_inc(&rds_ibdev->refcount); |
08b48a1e AG |
139 | } |
140 | ||
745cbcca | 141 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e | 142 | { |
745cbcca | 143 | struct rds_ib_connection *ic = conn->c_transport_data; |
08b48a1e | 144 | |
745cbcca AG |
145 | /* place conn on nodev_conns_list */ |
146 | spin_lock(&ib_nodev_conns_lock); | |
08b48a1e | 147 | |
745cbcca AG |
148 | spin_lock_irq(&rds_ibdev->spinlock); |
149 | BUG_ON(list_empty(&ic->ib_node)); | |
150 | list_del(&ic->ib_node); | |
151 | spin_unlock_irq(&rds_ibdev->spinlock); | |
152 | ||
153 | list_add_tail(&ic->ib_node, &ib_nodev_conns); | |
154 | ||
155 | spin_unlock(&ib_nodev_conns_lock); | |
156 | ||
157 | ic->rds_ibdev = NULL; | |
3e0249f9 | 158 | rds_ib_dev_put(rds_ibdev); |
08b48a1e AG |
159 | } |
160 | ||
8aeb1ba6 | 161 | void rds_ib_destroy_nodev_conns(void) |
08b48a1e AG |
162 | { |
163 | struct rds_ib_connection *ic, *_ic; | |
164 | LIST_HEAD(tmp_list); | |
165 | ||
166 | /* avoid calling conn_destroy with irqs off */ | |
8aeb1ba6 ZB |
167 | spin_lock_irq(&ib_nodev_conns_lock); |
168 | list_splice(&ib_nodev_conns, &tmp_list); | |
169 | spin_unlock_irq(&ib_nodev_conns_lock); | |
08b48a1e | 170 | |
433d308d | 171 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) |
08b48a1e | 172 | rds_conn_destroy(ic->conn); |
08b48a1e AG |
173 | } |
174 | ||
08b48a1e AG |
175 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) |
176 | { | |
06766513 | 177 | struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; |
08b48a1e | 178 | |
06766513 | 179 | iinfo->rdma_mr_max = pool_1m->max_items; |
07549ee2 | 180 | iinfo->rdma_mr_size = pool_1m->max_pages; |
08b48a1e AG |
181 | } |
182 | ||
e65d4d96 | 183 | #if IS_ENABLED(CONFIG_IPV6) |
b7ff8b10 KCP |
184 | void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev, |
185 | struct rds6_info_rdma_connection *iinfo6) | |
186 | { | |
187 | struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; | |
188 | ||
189 | iinfo6->rdma_mr_max = pool_1m->max_items; | |
07549ee2 | 190 | iinfo6->rdma_mr_size = pool_1m->max_pages; |
b7ff8b10 | 191 | } |
e65d4d96 | 192 | #endif |
b7ff8b10 | 193 | |
f6df683f | 194 | struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) |
08b48a1e AG |
195 | { |
196 | struct rds_ib_mr *ibmr = NULL; | |
1bc144b6 | 197 | struct llist_node *ret; |
c9467447 | 198 | unsigned long flags; |
08b48a1e | 199 | |
c9467447 | 200 | spin_lock_irqsave(&pool->clean_lock, flags); |
1bc144b6 | 201 | ret = llist_del_first(&pool->clean_list); |
c9467447 | 202 | spin_unlock_irqrestore(&pool->clean_lock, flags); |
db42753a | 203 | if (ret) { |
1bc144b6 | 204 | ibmr = llist_entry(ret, struct rds_ib_mr, llnode); |
db42753a | 205 | if (pool->pool_type == RDS_IB_MR_8K_POOL) |
206 | rds_ib_stats_inc(s_ib_rdma_mr_8k_reused); | |
207 | else | |
208 | rds_ib_stats_inc(s_ib_rdma_mr_1m_reused); | |
209 | } | |
08b48a1e AG |
210 | |
211 | return ibmr; | |
212 | } | |
213 | ||
08b48a1e AG |
214 | void rds_ib_sync_mr(void *trans_private, int direction) |
215 | { | |
216 | struct rds_ib_mr *ibmr = trans_private; | |
217 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
218 | ||
2eafa174 HWR |
219 | if (ibmr->odp) |
220 | return; | |
221 | ||
08b48a1e AG |
222 | switch (direction) { |
223 | case DMA_FROM_DEVICE: | |
224 | ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, | |
225 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
226 | break; | |
227 | case DMA_TO_DEVICE: | |
228 | ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, | |
229 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
230 | break; | |
231 | } | |
232 | } | |
233 | ||
f6df683f | 234 | void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
08b48a1e AG |
235 | { |
236 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
237 | ||
238 | if (ibmr->sg_dma_len) { | |
239 | ib_dma_unmap_sg(rds_ibdev->dev, | |
240 | ibmr->sg, ibmr->sg_len, | |
241 | DMA_BIDIRECTIONAL); | |
242 | ibmr->sg_dma_len = 0; | |
243 | } | |
244 | ||
245 | /* Release the s/g list */ | |
246 | if (ibmr->sg_len) { | |
247 | unsigned int i; | |
248 | ||
249 | for (i = 0; i < ibmr->sg_len; ++i) { | |
250 | struct page *page = sg_page(&ibmr->sg[i]); | |
251 | ||
252 | /* FIXME we need a way to tell a r/w MR | |
253 | * from a r/o MR */ | |
5c240fa2 | 254 | WARN_ON(!page->mapping && irqs_disabled()); |
08b48a1e AG |
255 | set_page_dirty(page); |
256 | put_page(page); | |
257 | } | |
258 | kfree(ibmr->sg); | |
259 | ||
260 | ibmr->sg = NULL; | |
261 | ibmr->sg_len = 0; | |
262 | } | |
263 | } | |
264 | ||
f6df683f | 265 | void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
08b48a1e AG |
266 | { |
267 | unsigned int pinned = ibmr->sg_len; | |
268 | ||
269 | __rds_ib_teardown_mr(ibmr); | |
270 | if (pinned) { | |
26139dc1 | 271 | struct rds_ib_mr_pool *pool = ibmr->pool; |
08b48a1e AG |
272 | |
273 | atomic_sub(pinned, &pool->free_pinned); | |
274 | } | |
275 | } | |
276 | ||
277 | static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) | |
278 | { | |
279 | unsigned int item_count; | |
280 | ||
281 | item_count = atomic_read(&pool->item_count); | |
282 | if (free_all) | |
283 | return item_count; | |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
6fa70da6 | 288 | /* |
1bc144b6 | 289 | * given an llist of mrs, put them all into the list_head for more processing |
6fa70da6 | 290 | */ |
6116c203 WW |
291 | static unsigned int llist_append_to_list(struct llist_head *llist, |
292 | struct list_head *list) | |
6fa70da6 CM |
293 | { |
294 | struct rds_ib_mr *ibmr; | |
1bc144b6 YH |
295 | struct llist_node *node; |
296 | struct llist_node *next; | |
6116c203 | 297 | unsigned int count = 0; |
1bc144b6 YH |
298 | |
299 | node = llist_del_all(llist); | |
300 | while (node) { | |
301 | next = node->next; | |
302 | ibmr = llist_entry(node, struct rds_ib_mr, llnode); | |
6fa70da6 | 303 | list_add_tail(&ibmr->unmap_list, list); |
1bc144b6 | 304 | node = next; |
6116c203 | 305 | count++; |
6fa70da6 | 306 | } |
6116c203 | 307 | return count; |
6fa70da6 CM |
308 | } |
309 | ||
310 | /* | |
1bc144b6 YH |
311 | * this takes a list head of mrs and turns it into linked llist nodes |
312 | * of clusters. Each cluster has linked llist nodes of | |
313 | * MR_CLUSTER_SIZE mrs that are ready for reuse. | |
6fa70da6 | 314 | */ |
c9467447 | 315 | static void list_to_llist_nodes(struct list_head *list, |
1bc144b6 YH |
316 | struct llist_node **nodes_head, |
317 | struct llist_node **nodes_tail) | |
6fa70da6 CM |
318 | { |
319 | struct rds_ib_mr *ibmr; | |
1bc144b6 YH |
320 | struct llist_node *cur = NULL; |
321 | struct llist_node **next = nodes_head; | |
6fa70da6 CM |
322 | |
323 | list_for_each_entry(ibmr, list, unmap_list) { | |
1bc144b6 YH |
324 | cur = &ibmr->llnode; |
325 | *next = cur; | |
326 | next = &cur->next; | |
6fa70da6 | 327 | } |
1bc144b6 YH |
328 | *next = NULL; |
329 | *nodes_tail = cur; | |
6fa70da6 CM |
330 | } |
331 | ||
08b48a1e AG |
332 | /* |
333 | * Flush our pool of MRs. | |
334 | * At a minimum, all currently unused MRs are unmapped. | |
335 | * If the number of MRs allocated exceeds the limit, we also try | |
336 | * to free as many MRs as needed to get back to this limit. | |
337 | */ | |
f6df683f | 338 | int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, |
339 | int free_all, struct rds_ib_mr **ibmr_ret) | |
08b48a1e | 340 | { |
490ea596 | 341 | struct rds_ib_mr *ibmr; |
1bc144b6 YH |
342 | struct llist_node *clean_nodes; |
343 | struct llist_node *clean_tail; | |
08b48a1e | 344 | LIST_HEAD(unmap_list); |
08b48a1e | 345 | unsigned long unpinned = 0; |
6116c203 | 346 | unsigned int nfreed = 0, dirty_to_clean = 0, free_goal; |
08b48a1e | 347 | |
06766513 SS |
348 | if (pool->pool_type == RDS_IB_MR_8K_POOL) |
349 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush); | |
350 | else | |
351 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush); | |
08b48a1e | 352 | |
6fa70da6 CM |
353 | if (ibmr_ret) { |
354 | DEFINE_WAIT(wait); | |
06766513 | 355 | while (!mutex_trylock(&pool->flush_lock)) { |
f6df683f | 356 | ibmr = rds_ib_reuse_mr(pool); |
6fa70da6 CM |
357 | if (ibmr) { |
358 | *ibmr_ret = ibmr; | |
359 | finish_wait(&pool->flush_wait, &wait); | |
360 | goto out_nolock; | |
361 | } | |
362 | ||
363 | prepare_to_wait(&pool->flush_wait, &wait, | |
364 | TASK_UNINTERRUPTIBLE); | |
1bc144b6 | 365 | if (llist_empty(&pool->clean_list)) |
6fa70da6 CM |
366 | schedule(); |
367 | ||
f6df683f | 368 | ibmr = rds_ib_reuse_mr(pool); |
6fa70da6 CM |
369 | if (ibmr) { |
370 | *ibmr_ret = ibmr; | |
371 | finish_wait(&pool->flush_wait, &wait); | |
372 | goto out_nolock; | |
373 | } | |
374 | } | |
375 | finish_wait(&pool->flush_wait, &wait); | |
376 | } else | |
377 | mutex_lock(&pool->flush_lock); | |
378 | ||
379 | if (ibmr_ret) { | |
f6df683f | 380 | ibmr = rds_ib_reuse_mr(pool); |
6fa70da6 CM |
381 | if (ibmr) { |
382 | *ibmr_ret = ibmr; | |
383 | goto out; | |
384 | } | |
385 | } | |
08b48a1e | 386 | |
08b48a1e | 387 | /* Get the list of all MRs to be dropped. Ordering matters - |
6fa70da6 CM |
388 | * we want to put drop_list ahead of free_list. |
389 | */ | |
6116c203 WW |
390 | dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list); |
391 | dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list); | |
c9467447 GR |
392 | if (free_all) { |
393 | unsigned long flags; | |
394 | ||
395 | spin_lock_irqsave(&pool->clean_lock, flags); | |
1bc144b6 | 396 | llist_append_to_list(&pool->clean_list, &unmap_list); |
c9467447 GR |
397 | spin_unlock_irqrestore(&pool->clean_lock, flags); |
398 | } | |
08b48a1e AG |
399 | |
400 | free_goal = rds_ib_flush_goal(pool, free_all); | |
401 | ||
402 | if (list_empty(&unmap_list)) | |
403 | goto out; | |
404 | ||
07549ee2 | 405 | rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal); |
08b48a1e | 406 | |
6fa70da6 | 407 | if (!list_empty(&unmap_list)) { |
c9467447 GR |
408 | unsigned long flags; |
409 | ||
410 | list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail); | |
85cb9287 | 411 | if (ibmr_ret) { |
1bc144b6 | 412 | *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); |
85cb9287 ZY |
413 | clean_nodes = clean_nodes->next; |
414 | } | |
1bc144b6 | 415 | /* more than one entry in llist nodes */ |
c9467447 GR |
416 | if (clean_nodes) { |
417 | spin_lock_irqsave(&pool->clean_lock, flags); | |
85cb9287 ZY |
418 | llist_add_batch(clean_nodes, clean_tail, |
419 | &pool->clean_list); | |
c9467447 GR |
420 | spin_unlock_irqrestore(&pool->clean_lock, flags); |
421 | } | |
6fa70da6 | 422 | } |
08b48a1e AG |
423 | |
424 | atomic_sub(unpinned, &pool->free_pinned); | |
6116c203 | 425 | atomic_sub(dirty_to_clean, &pool->dirty_count); |
08b48a1e AG |
426 | atomic_sub(nfreed, &pool->item_count); |
427 | ||
428 | out: | |
429 | mutex_unlock(&pool->flush_lock); | |
6fa70da6 CM |
430 | if (waitqueue_active(&pool->flush_wait)) |
431 | wake_up(&pool->flush_wait); | |
432 | out_nolock: | |
490ea596 | 433 | return 0; |
434 | } | |
435 | ||
436 | struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) | |
437 | { | |
438 | struct rds_ib_mr *ibmr = NULL; | |
439 | int iter = 0; | |
440 | ||
490ea596 | 441 | while (1) { |
442 | ibmr = rds_ib_reuse_mr(pool); | |
443 | if (ibmr) | |
444 | return ibmr; | |
445 | ||
446 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) | |
447 | break; | |
448 | ||
449 | atomic_dec(&pool->item_count); | |
450 | ||
451 | if (++iter > 2) { | |
452 | if (pool->pool_type == RDS_IB_MR_8K_POOL) | |
453 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted); | |
454 | else | |
455 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted); | |
aea01a22 | 456 | break; |
490ea596 | 457 | } |
458 | ||
459 | /* We do have some empty MRs. Flush them out. */ | |
460 | if (pool->pool_type == RDS_IB_MR_8K_POOL) | |
461 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait); | |
462 | else | |
463 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait); | |
464 | ||
465 | rds_ib_flush_mr_pool(pool, 0, &ibmr); | |
466 | if (ibmr) | |
467 | return ibmr; | |
468 | } | |
469 | ||
aea01a22 | 470 | return NULL; |
08b48a1e AG |
471 | } |
472 | ||
473 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | |
474 | { | |
7a0ff5db | 475 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
08b48a1e | 476 | |
6fa70da6 | 477 | rds_ib_flush_mr_pool(pool, 0, NULL); |
08b48a1e AG |
478 | } |
479 | ||
480 | void rds_ib_free_mr(void *trans_private, int invalidate) | |
481 | { | |
482 | struct rds_ib_mr *ibmr = trans_private; | |
26139dc1 | 483 | struct rds_ib_mr_pool *pool = ibmr->pool; |
08b48a1e | 484 | struct rds_ib_device *rds_ibdev = ibmr->device; |
08b48a1e AG |
485 | |
486 | rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); | |
487 | ||
2eafa174 HWR |
488 | if (ibmr->odp) { |
489 | /* A MR created and marked as use_once. We use delayed work, | |
490 | * because there is a change that we are in interrupt and can't | |
491 | * call to ib_dereg_mr() directly. | |
492 | */ | |
493 | INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker); | |
494 | queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0); | |
495 | return; | |
496 | } | |
497 | ||
08b48a1e | 498 | /* Return it to the pool's free list */ |
07549ee2 | 499 | rds_ib_free_frmr_list(ibmr); |
08b48a1e AG |
500 | |
501 | atomic_add(ibmr->sg_len, &pool->free_pinned); | |
502 | atomic_inc(&pool->dirty_count); | |
08b48a1e AG |
503 | |
504 | /* If we've pinned too many pages, request a flush */ | |
f64f9e71 | 505 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
ef5217a6 | 506 | atomic_read(&pool->dirty_count) >= pool->max_items / 5) |
f6df683f | 507 | queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); |
08b48a1e AG |
508 | |
509 | if (invalidate) { | |
510 | if (likely(!in_interrupt())) { | |
6fa70da6 | 511 | rds_ib_flush_mr_pool(pool, 0, NULL); |
08b48a1e AG |
512 | } else { |
513 | /* We get here if the user created a MR marked | |
ad1d7dc0 | 514 | * as use_once and invalidate at the same time. |
515 | */ | |
f6df683f | 516 | queue_delayed_work(rds_ib_mr_wq, |
ad1d7dc0 | 517 | &pool->flush_worker, 10); |
08b48a1e AG |
518 | } |
519 | } | |
3e0249f9 ZB |
520 | |
521 | rds_ib_dev_put(rds_ibdev); | |
08b48a1e AG |
522 | } |
523 | ||
524 | void rds_ib_flush_mrs(void) | |
525 | { | |
526 | struct rds_ib_device *rds_ibdev; | |
527 | ||
ea819867 | 528 | down_read(&rds_ib_devices_lock); |
08b48a1e | 529 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { |
06766513 SS |
530 | if (rds_ibdev->mr_8k_pool) |
531 | rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL); | |
08b48a1e | 532 | |
06766513 SS |
533 | if (rds_ibdev->mr_1m_pool) |
534 | rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL); | |
08b48a1e | 535 | } |
ea819867 | 536 | up_read(&rds_ib_devices_lock); |
08b48a1e AG |
537 | } |
538 | ||
2eafa174 HWR |
539 | u32 rds_ib_get_lkey(void *trans_private) |
540 | { | |
541 | struct rds_ib_mr *ibmr = trans_private; | |
542 | ||
543 | return ibmr->u.mr->lkey; | |
544 | } | |
545 | ||
08b48a1e | 546 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, |
9e630bcb | 547 | struct rds_sock *rs, u32 *key_ret, |
2eafa174 HWR |
548 | struct rds_connection *conn, |
549 | u64 start, u64 length, int need_odp) | |
08b48a1e AG |
550 | { |
551 | struct rds_ib_device *rds_ibdev; | |
552 | struct rds_ib_mr *ibmr = NULL; | |
9e630bcb | 553 | struct rds_ib_connection *ic = NULL; |
08b48a1e AG |
554 | int ret; |
555 | ||
eee2fa6a | 556 | rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]); |
08b48a1e AG |
557 | if (!rds_ibdev) { |
558 | ret = -ENODEV; | |
559 | goto out; | |
560 | } | |
561 | ||
2eafa174 HWR |
562 | if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) { |
563 | u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start; | |
564 | int access_flags = | |
565 | (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | | |
566 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC | | |
567 | IB_ACCESS_ON_DEMAND); | |
b2dfc676 | 568 | struct ib_sge sge = {}; |
2eafa174 HWR |
569 | struct ib_mr *ib_mr; |
570 | ||
571 | if (!rds_ibdev->odp_capable) { | |
572 | ret = -EOPNOTSUPP; | |
573 | goto out; | |
574 | } | |
575 | ||
576 | ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr, | |
577 | access_flags); | |
578 | ||
579 | if (IS_ERR(ib_mr)) { | |
580 | rdsdebug("rds_ib_get_user_mr returned %d\n", | |
581 | IS_ERR(ib_mr)); | |
582 | ret = PTR_ERR(ib_mr); | |
583 | goto out; | |
584 | } | |
585 | if (key_ret) | |
586 | *key_ret = ib_mr->rkey; | |
587 | ||
588 | ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); | |
589 | if (!ibmr) { | |
590 | ib_dereg_mr(ib_mr); | |
591 | ret = -ENOMEM; | |
592 | goto out; | |
593 | } | |
594 | ibmr->u.mr = ib_mr; | |
595 | ibmr->odp = 1; | |
b2dfc676 HWR |
596 | |
597 | sge.addr = virt_addr; | |
598 | sge.length = length; | |
599 | sge.lkey = ib_mr->lkey; | |
600 | ||
601 | ib_advise_mr(rds_ibdev->pd, | |
602 | IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, | |
603 | IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1); | |
2eafa174 HWR |
604 | return ibmr; |
605 | } | |
606 | ||
9e630bcb AR |
607 | if (conn) |
608 | ic = conn->c_transport_data; | |
609 | ||
06766513 | 610 | if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { |
08b48a1e AG |
611 | ret = -ENODEV; |
612 | goto out; | |
613 | } | |
614 | ||
07549ee2 | 615 | ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); |
9e630bcb AR |
616 | if (IS_ERR(ibmr)) { |
617 | ret = PTR_ERR(ibmr); | |
490ea596 | 618 | pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); |
9e630bcb AR |
619 | } else { |
620 | return ibmr; | |
621 | } | |
490ea596 | 622 | |
9e630bcb | 623 | out: |
3e0249f9 ZB |
624 | if (rds_ibdev) |
625 | rds_ib_dev_put(rds_ibdev); | |
490ea596 | 626 | |
9e630bcb | 627 | return ERR_PTR(ret); |
08b48a1e | 628 | } |
6fa70da6 | 629 | |
f6df683f | 630 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) |
631 | { | |
632 | cancel_delayed_work_sync(&pool->flush_worker); | |
633 | rds_ib_flush_mr_pool(pool, 1, NULL); | |
634 | WARN_ON(atomic_read(&pool->item_count)); | |
635 | WARN_ON(atomic_read(&pool->free_pinned)); | |
636 | kfree(pool); | |
637 | } | |
638 | ||
639 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev, | |
640 | int pool_type) | |
641 | { | |
642 | struct rds_ib_mr_pool *pool; | |
643 | ||
644 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
645 | if (!pool) | |
646 | return ERR_PTR(-ENOMEM); | |
647 | ||
648 | pool->pool_type = pool_type; | |
649 | init_llist_head(&pool->free_list); | |
650 | init_llist_head(&pool->drop_list); | |
651 | init_llist_head(&pool->clean_list); | |
c9467447 | 652 | spin_lock_init(&pool->clean_lock); |
f6df683f | 653 | mutex_init(&pool->flush_lock); |
654 | init_waitqueue_head(&pool->flush_wait); | |
655 | INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); | |
656 | ||
657 | if (pool_type == RDS_IB_MR_1M_POOL) { | |
658 | /* +1 allows for unaligned MRs */ | |
07549ee2 | 659 | pool->max_pages = RDS_MR_1M_MSG_SIZE + 1; |
b1fb67fa | 660 | pool->max_items = rds_ibdev->max_1m_mrs; |
f6df683f | 661 | } else { |
662 | /* pool_type == RDS_IB_MR_8K_POOL */ | |
07549ee2 | 663 | pool->max_pages = RDS_MR_8K_MSG_SIZE + 1; |
b1fb67fa | 664 | pool->max_items = rds_ibdev->max_8k_mrs; |
f6df683f | 665 | } |
666 | ||
07549ee2 | 667 | pool->max_free_pinned = pool->max_items * pool->max_pages / 4; |
f6df683f | 668 | pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4; |
669 | ||
670 | return pool; | |
671 | } | |
672 | ||
673 | int rds_ib_mr_init(void) | |
674 | { | |
231edca9 | 675 | rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0); |
f6df683f | 676 | if (!rds_ib_mr_wq) |
677 | return -ENOMEM; | |
678 | return 0; | |
679 | } | |
680 | ||
681 | /* By the time this is called all the IB devices should have been torn down and | |
682 | * had their pools freed. As each pool is freed its work struct is waited on, | |
683 | * so the pool flushing work queue should be idle by the time we get here. | |
684 | */ | |
685 | void rds_ib_mr_exit(void) | |
686 | { | |
687 | destroy_workqueue(rds_ib_mr_wq); | |
688 | } | |
2eafa174 HWR |
689 | |
690 | static void rds_ib_odp_mr_worker(struct work_struct *work) | |
691 | { | |
692 | struct rds_ib_mr *ibmr; | |
693 | ||
694 | ibmr = container_of(work, struct rds_ib_mr, work.work); | |
695 | ib_dereg_mr(ibmr->u.mr); | |
696 | kfree(ibmr); | |
697 | } |