]> Git Repo - linux.git/blame - net/rds/ib_rdma.c
rds: rds_ib_recv_alloc_cache() should call alloc_percpu_gfp() instead
[linux.git] / net / rds / ib_rdma.c
CommitLineData
08b48a1e 1/*
b7ff8b10 2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
08b48a1e
AG
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
5a0e3ad6 34#include <linux/slab.h>
764f2dd9 35#include <linux/rculist.h>
1bc144b6 36#include <linux/llist.h>
08b48a1e 37
0cb43965 38#include "rds_single_path.h"
f6df683f 39#include "ib_mr.h"
40
41struct workqueue_struct *rds_ib_mr_wq;
08b48a1e 42
6fa70da6
CM
43static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44#define CLEAN_LIST_BUSY_BIT 0
08b48a1e 45
08b48a1e
AG
46static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
47{
48 struct rds_ib_device *rds_ibdev;
49 struct rds_ib_ipaddr *i_ipaddr;
50
ea819867
ZB
51 rcu_read_lock();
52 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
764f2dd9 53 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
08b48a1e 54 if (i_ipaddr->ipaddr == ipaddr) {
50d61ff7 55 refcount_inc(&rds_ibdev->refcount);
764f2dd9 56 rcu_read_unlock();
08b48a1e
AG
57 return rds_ibdev;
58 }
59 }
08b48a1e 60 }
ea819867 61 rcu_read_unlock();
08b48a1e
AG
62
63 return NULL;
64}
65
66static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
67{
68 struct rds_ib_ipaddr *i_ipaddr;
69
70 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
71 if (!i_ipaddr)
72 return -ENOMEM;
73
74 i_ipaddr->ipaddr = ipaddr;
75
76 spin_lock_irq(&rds_ibdev->spinlock);
764f2dd9 77 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
08b48a1e
AG
78 spin_unlock_irq(&rds_ibdev->spinlock);
79
80 return 0;
81}
82
83static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
84{
4a81802b 85 struct rds_ib_ipaddr *i_ipaddr;
764f2dd9
CM
86 struct rds_ib_ipaddr *to_free = NULL;
87
08b48a1e
AG
88
89 spin_lock_irq(&rds_ibdev->spinlock);
764f2dd9 90 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
08b48a1e 91 if (i_ipaddr->ipaddr == ipaddr) {
764f2dd9
CM
92 list_del_rcu(&i_ipaddr->list);
93 to_free = i_ipaddr;
08b48a1e
AG
94 break;
95 }
96 }
97 spin_unlock_irq(&rds_ibdev->spinlock);
764f2dd9 98
59fe4606
SS
99 if (to_free)
100 kfree_rcu(to_free, rcu);
08b48a1e
AG
101}
102
eee2fa6a
KCP
103int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
104 struct in6_addr *ipaddr)
08b48a1e
AG
105{
106 struct rds_ib_device *rds_ibdev_old;
107
eee2fa6a 108 rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
e1f475a7 109 if (!rds_ibdev_old)
eee2fa6a 110 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
e1f475a7 111
112 if (rds_ibdev_old != rds_ibdev) {
eee2fa6a 113 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
3e0249f9 114 rds_ib_dev_put(rds_ibdev_old);
eee2fa6a 115 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
3e0249f9 116 }
e1f475a7 117 rds_ib_dev_put(rds_ibdev_old);
08b48a1e 118
e1f475a7 119 return 0;
08b48a1e
AG
120}
121
745cbcca 122void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
08b48a1e
AG
123{
124 struct rds_ib_connection *ic = conn->c_transport_data;
125
126 /* conn was previously on the nodev_conns_list */
127 spin_lock_irq(&ib_nodev_conns_lock);
128 BUG_ON(list_empty(&ib_nodev_conns));
129 BUG_ON(list_empty(&ic->ib_node));
130 list_del(&ic->ib_node);
08b48a1e 131
aef3ea33 132 spin_lock(&rds_ibdev->spinlock);
08b48a1e 133 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
aef3ea33 134 spin_unlock(&rds_ibdev->spinlock);
745cbcca 135 spin_unlock_irq(&ib_nodev_conns_lock);
08b48a1e
AG
136
137 ic->rds_ibdev = rds_ibdev;
50d61ff7 138 refcount_inc(&rds_ibdev->refcount);
08b48a1e
AG
139}
140
745cbcca 141void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
08b48a1e 142{
745cbcca 143 struct rds_ib_connection *ic = conn->c_transport_data;
08b48a1e 144
745cbcca
AG
145 /* place conn on nodev_conns_list */
146 spin_lock(&ib_nodev_conns_lock);
08b48a1e 147
745cbcca
AG
148 spin_lock_irq(&rds_ibdev->spinlock);
149 BUG_ON(list_empty(&ic->ib_node));
150 list_del(&ic->ib_node);
151 spin_unlock_irq(&rds_ibdev->spinlock);
152
153 list_add_tail(&ic->ib_node, &ib_nodev_conns);
154
155 spin_unlock(&ib_nodev_conns_lock);
156
157 ic->rds_ibdev = NULL;
3e0249f9 158 rds_ib_dev_put(rds_ibdev);
08b48a1e
AG
159}
160
8aeb1ba6 161void rds_ib_destroy_nodev_conns(void)
08b48a1e
AG
162{
163 struct rds_ib_connection *ic, *_ic;
164 LIST_HEAD(tmp_list);
165
166 /* avoid calling conn_destroy with irqs off */
8aeb1ba6
ZB
167 spin_lock_irq(&ib_nodev_conns_lock);
168 list_splice(&ib_nodev_conns, &tmp_list);
169 spin_unlock_irq(&ib_nodev_conns_lock);
08b48a1e 170
433d308d 171 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
08b48a1e 172 rds_conn_destroy(ic->conn);
08b48a1e
AG
173}
174
08b48a1e
AG
175void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
176{
06766513 177 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
08b48a1e 178
06766513
SS
179 iinfo->rdma_mr_max = pool_1m->max_items;
180 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
08b48a1e
AG
181}
182
b7ff8b10
KCP
183void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
184 struct rds6_info_rdma_connection *iinfo6)
185{
186 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
187
188 iinfo6->rdma_mr_max = pool_1m->max_items;
189 iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
190}
191
f6df683f 192struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
08b48a1e
AG
193{
194 struct rds_ib_mr *ibmr = NULL;
1bc144b6 195 struct llist_node *ret;
6fa70da6 196 unsigned long *flag;
08b48a1e 197
6fa70da6 198 preempt_disable();
903ceff7 199 flag = this_cpu_ptr(&clean_list_grace);
6fa70da6 200 set_bit(CLEAN_LIST_BUSY_BIT, flag);
1bc144b6 201 ret = llist_del_first(&pool->clean_list);
db42753a 202 if (ret) {
1bc144b6 203 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
db42753a 204 if (pool->pool_type == RDS_IB_MR_8K_POOL)
205 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
206 else
207 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
208 }
08b48a1e 209
6fa70da6
CM
210 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
211 preempt_enable();
08b48a1e
AG
212 return ibmr;
213}
214
6fa70da6
CM
215static inline void wait_clean_list_grace(void)
216{
217 int cpu;
218 unsigned long *flag;
219
220 for_each_online_cpu(cpu) {
221 flag = &per_cpu(clean_list_grace, cpu);
222 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
223 cpu_relax();
224 }
225}
226
08b48a1e
AG
227void rds_ib_sync_mr(void *trans_private, int direction)
228{
229 struct rds_ib_mr *ibmr = trans_private;
230 struct rds_ib_device *rds_ibdev = ibmr->device;
231
232 switch (direction) {
233 case DMA_FROM_DEVICE:
234 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
235 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
236 break;
237 case DMA_TO_DEVICE:
238 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
239 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
240 break;
241 }
242}
243
f6df683f 244void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
08b48a1e
AG
245{
246 struct rds_ib_device *rds_ibdev = ibmr->device;
247
248 if (ibmr->sg_dma_len) {
249 ib_dma_unmap_sg(rds_ibdev->dev,
250 ibmr->sg, ibmr->sg_len,
251 DMA_BIDIRECTIONAL);
252 ibmr->sg_dma_len = 0;
253 }
254
255 /* Release the s/g list */
256 if (ibmr->sg_len) {
257 unsigned int i;
258
259 for (i = 0; i < ibmr->sg_len; ++i) {
260 struct page *page = sg_page(&ibmr->sg[i]);
261
262 /* FIXME we need a way to tell a r/w MR
263 * from a r/o MR */
5c240fa2 264 WARN_ON(!page->mapping && irqs_disabled());
08b48a1e
AG
265 set_page_dirty(page);
266 put_page(page);
267 }
268 kfree(ibmr->sg);
269
270 ibmr->sg = NULL;
271 ibmr->sg_len = 0;
272 }
273}
274
f6df683f 275void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
08b48a1e
AG
276{
277 unsigned int pinned = ibmr->sg_len;
278
279 __rds_ib_teardown_mr(ibmr);
280 if (pinned) {
26139dc1 281 struct rds_ib_mr_pool *pool = ibmr->pool;
08b48a1e
AG
282
283 atomic_sub(pinned, &pool->free_pinned);
284 }
285}
286
287static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
288{
289 unsigned int item_count;
290
291 item_count = atomic_read(&pool->item_count);
292 if (free_all)
293 return item_count;
294
295 return 0;
296}
297
6fa70da6 298/*
1bc144b6 299 * given an llist of mrs, put them all into the list_head for more processing
6fa70da6 300 */
6116c203
WW
301static unsigned int llist_append_to_list(struct llist_head *llist,
302 struct list_head *list)
6fa70da6
CM
303{
304 struct rds_ib_mr *ibmr;
1bc144b6
YH
305 struct llist_node *node;
306 struct llist_node *next;
6116c203 307 unsigned int count = 0;
1bc144b6
YH
308
309 node = llist_del_all(llist);
310 while (node) {
311 next = node->next;
312 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
6fa70da6 313 list_add_tail(&ibmr->unmap_list, list);
1bc144b6 314 node = next;
6116c203 315 count++;
6fa70da6 316 }
6116c203 317 return count;
6fa70da6
CM
318}
319
320/*
1bc144b6
YH
321 * this takes a list head of mrs and turns it into linked llist nodes
322 * of clusters. Each cluster has linked llist nodes of
323 * MR_CLUSTER_SIZE mrs that are ready for reuse.
6fa70da6 324 */
1bc144b6
YH
325static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
326 struct list_head *list,
327 struct llist_node **nodes_head,
328 struct llist_node **nodes_tail)
6fa70da6
CM
329{
330 struct rds_ib_mr *ibmr;
1bc144b6
YH
331 struct llist_node *cur = NULL;
332 struct llist_node **next = nodes_head;
6fa70da6
CM
333
334 list_for_each_entry(ibmr, list, unmap_list) {
1bc144b6
YH
335 cur = &ibmr->llnode;
336 *next = cur;
337 next = &cur->next;
6fa70da6 338 }
1bc144b6
YH
339 *next = NULL;
340 *nodes_tail = cur;
6fa70da6
CM
341}
342
08b48a1e
AG
343/*
344 * Flush our pool of MRs.
345 * At a minimum, all currently unused MRs are unmapped.
346 * If the number of MRs allocated exceeds the limit, we also try
347 * to free as many MRs as needed to get back to this limit.
348 */
f6df683f 349int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
350 int free_all, struct rds_ib_mr **ibmr_ret)
08b48a1e 351{
490ea596 352 struct rds_ib_mr *ibmr;
1bc144b6
YH
353 struct llist_node *clean_nodes;
354 struct llist_node *clean_tail;
08b48a1e 355 LIST_HEAD(unmap_list);
08b48a1e 356 unsigned long unpinned = 0;
6116c203 357 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
08b48a1e 358
06766513
SS
359 if (pool->pool_type == RDS_IB_MR_8K_POOL)
360 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
361 else
362 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
08b48a1e 363
6fa70da6
CM
364 if (ibmr_ret) {
365 DEFINE_WAIT(wait);
06766513 366 while (!mutex_trylock(&pool->flush_lock)) {
f6df683f 367 ibmr = rds_ib_reuse_mr(pool);
6fa70da6
CM
368 if (ibmr) {
369 *ibmr_ret = ibmr;
370 finish_wait(&pool->flush_wait, &wait);
371 goto out_nolock;
372 }
373
374 prepare_to_wait(&pool->flush_wait, &wait,
375 TASK_UNINTERRUPTIBLE);
1bc144b6 376 if (llist_empty(&pool->clean_list))
6fa70da6
CM
377 schedule();
378
f6df683f 379 ibmr = rds_ib_reuse_mr(pool);
6fa70da6
CM
380 if (ibmr) {
381 *ibmr_ret = ibmr;
382 finish_wait(&pool->flush_wait, &wait);
383 goto out_nolock;
384 }
385 }
386 finish_wait(&pool->flush_wait, &wait);
387 } else
388 mutex_lock(&pool->flush_lock);
389
390 if (ibmr_ret) {
f6df683f 391 ibmr = rds_ib_reuse_mr(pool);
6fa70da6
CM
392 if (ibmr) {
393 *ibmr_ret = ibmr;
394 goto out;
395 }
396 }
08b48a1e 397
08b48a1e 398 /* Get the list of all MRs to be dropped. Ordering matters -
6fa70da6
CM
399 * we want to put drop_list ahead of free_list.
400 */
6116c203
WW
401 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
402 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
08b48a1e 403 if (free_all)
1bc144b6 404 llist_append_to_list(&pool->clean_list, &unmap_list);
08b48a1e
AG
405
406 free_goal = rds_ib_flush_goal(pool, free_all);
407
408 if (list_empty(&unmap_list))
409 goto out;
410
1659185f
AR
411 if (pool->use_fastreg)
412 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
413 else
414 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
08b48a1e 415
6fa70da6
CM
416 if (!list_empty(&unmap_list)) {
417 /* we have to make sure that none of the things we're about
418 * to put on the clean list would race with other cpus trying
1bc144b6 419 * to pull items off. The llist would explode if we managed to
6fa70da6 420 * remove something from the clean list and then add it back again
1bc144b6 421 * while another CPU was spinning on that same item in llist_del_first.
6fa70da6 422 *
1bc144b6 423 * This is pretty unlikely, but just in case wait for an llist grace period
6fa70da6
CM
424 * here before adding anything back into the clean list.
425 */
426 wait_clean_list_grace();
427
1bc144b6 428 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
6fa70da6 429 if (ibmr_ret)
1bc144b6 430 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
6fa70da6 431
1bc144b6
YH
432 /* more than one entry in llist nodes */
433 if (clean_nodes->next)
434 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
6fa70da6
CM
435
436 }
08b48a1e
AG
437
438 atomic_sub(unpinned, &pool->free_pinned);
6116c203 439 atomic_sub(dirty_to_clean, &pool->dirty_count);
08b48a1e
AG
440 atomic_sub(nfreed, &pool->item_count);
441
442out:
443 mutex_unlock(&pool->flush_lock);
6fa70da6
CM
444 if (waitqueue_active(&pool->flush_wait))
445 wake_up(&pool->flush_wait);
446out_nolock:
490ea596 447 return 0;
448}
449
450struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
451{
452 struct rds_ib_mr *ibmr = NULL;
453 int iter = 0;
454
455 if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
456 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
457
458 while (1) {
459 ibmr = rds_ib_reuse_mr(pool);
460 if (ibmr)
461 return ibmr;
462
463 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
464 break;
465
466 atomic_dec(&pool->item_count);
467
468 if (++iter > 2) {
469 if (pool->pool_type == RDS_IB_MR_8K_POOL)
470 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
471 else
472 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
473 return ERR_PTR(-EAGAIN);
474 }
475
476 /* We do have some empty MRs. Flush them out. */
477 if (pool->pool_type == RDS_IB_MR_8K_POOL)
478 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
479 else
480 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
481
482 rds_ib_flush_mr_pool(pool, 0, &ibmr);
483 if (ibmr)
484 return ibmr;
485 }
486
487 return ibmr;
08b48a1e
AG
488}
489
490static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
491{
7a0ff5db 492 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
08b48a1e 493
6fa70da6 494 rds_ib_flush_mr_pool(pool, 0, NULL);
08b48a1e
AG
495}
496
497void rds_ib_free_mr(void *trans_private, int invalidate)
498{
499 struct rds_ib_mr *ibmr = trans_private;
26139dc1 500 struct rds_ib_mr_pool *pool = ibmr->pool;
08b48a1e 501 struct rds_ib_device *rds_ibdev = ibmr->device;
08b48a1e
AG
502
503 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
504
505 /* Return it to the pool's free list */
1659185f
AR
506 if (rds_ibdev->use_fastreg)
507 rds_ib_free_frmr_list(ibmr);
508 else
509 rds_ib_free_fmr_list(ibmr);
08b48a1e
AG
510
511 atomic_add(ibmr->sg_len, &pool->free_pinned);
512 atomic_inc(&pool->dirty_count);
08b48a1e
AG
513
514 /* If we've pinned too many pages, request a flush */
f64f9e71 515 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
ef5217a6 516 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
f6df683f 517 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
08b48a1e
AG
518
519 if (invalidate) {
520 if (likely(!in_interrupt())) {
6fa70da6 521 rds_ib_flush_mr_pool(pool, 0, NULL);
08b48a1e
AG
522 } else {
523 /* We get here if the user created a MR marked
ad1d7dc0 524 * as use_once and invalidate at the same time.
525 */
f6df683f 526 queue_delayed_work(rds_ib_mr_wq,
ad1d7dc0 527 &pool->flush_worker, 10);
08b48a1e
AG
528 }
529 }
3e0249f9
ZB
530
531 rds_ib_dev_put(rds_ibdev);
08b48a1e
AG
532}
533
534void rds_ib_flush_mrs(void)
535{
536 struct rds_ib_device *rds_ibdev;
537
ea819867 538 down_read(&rds_ib_devices_lock);
08b48a1e 539 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
06766513
SS
540 if (rds_ibdev->mr_8k_pool)
541 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
08b48a1e 542
06766513
SS
543 if (rds_ibdev->mr_1m_pool)
544 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
08b48a1e 545 }
ea819867 546 up_read(&rds_ib_devices_lock);
08b48a1e
AG
547}
548
549void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
550 struct rds_sock *rs, u32 *key_ret)
551{
552 struct rds_ib_device *rds_ibdev;
553 struct rds_ib_mr *ibmr = NULL;
1659185f 554 struct rds_ib_connection *ic = rs->rs_conn->c_transport_data;
08b48a1e
AG
555 int ret;
556
eee2fa6a 557 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
08b48a1e
AG
558 if (!rds_ibdev) {
559 ret = -ENODEV;
560 goto out;
561 }
562
06766513 563 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
08b48a1e
AG
564 ret = -ENODEV;
565 goto out;
566 }
567
1659185f
AR
568 if (rds_ibdev->use_fastreg)
569 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
570 else
571 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
490ea596 572 if (ibmr)
573 rds_ibdev = NULL;
08b48a1e
AG
574
575 out:
490ea596 576 if (!ibmr)
577 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
578
3e0249f9
ZB
579 if (rds_ibdev)
580 rds_ib_dev_put(rds_ibdev);
490ea596 581
08b48a1e
AG
582 return ibmr;
583}
6fa70da6 584
f6df683f 585void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
586{
587 cancel_delayed_work_sync(&pool->flush_worker);
588 rds_ib_flush_mr_pool(pool, 1, NULL);
589 WARN_ON(atomic_read(&pool->item_count));
590 WARN_ON(atomic_read(&pool->free_pinned));
591 kfree(pool);
592}
593
594struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
595 int pool_type)
596{
597 struct rds_ib_mr_pool *pool;
598
599 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
600 if (!pool)
601 return ERR_PTR(-ENOMEM);
602
603 pool->pool_type = pool_type;
604 init_llist_head(&pool->free_list);
605 init_llist_head(&pool->drop_list);
606 init_llist_head(&pool->clean_list);
607 mutex_init(&pool->flush_lock);
608 init_waitqueue_head(&pool->flush_wait);
609 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
610
611 if (pool_type == RDS_IB_MR_1M_POOL) {
612 /* +1 allows for unaligned MRs */
613 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
b1fb67fa 614 pool->max_items = rds_ibdev->max_1m_mrs;
f6df683f 615 } else {
616 /* pool_type == RDS_IB_MR_8K_POOL */
617 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
b1fb67fa 618 pool->max_items = rds_ibdev->max_8k_mrs;
f6df683f 619 }
620
621 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
622 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
623 pool->fmr_attr.page_shift = PAGE_SHIFT;
624 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
1659185f 625 pool->use_fastreg = rds_ibdev->use_fastreg;
f6df683f 626
627 return pool;
628}
629
630int rds_ib_mr_init(void)
631{
231edca9 632 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
f6df683f 633 if (!rds_ib_mr_wq)
634 return -ENOMEM;
635 return 0;
636}
637
638/* By the time this is called all the IB devices should have been torn down and
639 * had their pools freed. As each pool is freed its work struct is waited on,
640 * so the pool flushing work queue should be idle by the time we get here.
641 */
642void rds_ib_mr_exit(void)
643{
644 destroy_workqueue(rds_ib_mr_wq);
645}
This page took 0.718125 seconds and 4 git commands to generate.