]> Git Repo - linux.git/blame_incremental - net/rds/ib_rdma.c
Merge remote-tracking branches 'asoc/fix/wm8960' and 'asoc/fix/wm8962' into asoc...
[linux.git] / net / rds / ib_rdma.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/rculist.h>
36#include <linux/llist.h>
37
38#include "rds.h"
39#include "ib.h"
40
41static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42#define CLEAN_LIST_BUSY_BIT 0
43
44/*
45 * This is stored as mr->r_trans_private.
46 */
47struct rds_ib_mr {
48 struct rds_ib_device *device;
49 struct rds_ib_mr_pool *pool;
50 struct ib_fmr *fmr;
51
52 struct llist_node llnode;
53
54 /* unmap_list is for freeing */
55 struct list_head unmap_list;
56 unsigned int remap_count;
57
58 struct scatterlist *sg;
59 unsigned int sg_len;
60 u64 *dma;
61 int sg_dma_len;
62};
63
64/*
65 * Our own little FMR pool
66 */
67struct rds_ib_mr_pool {
68 struct mutex flush_lock; /* serialize fmr invalidate */
69 struct delayed_work flush_worker; /* flush worker */
70
71 atomic_t item_count; /* total # of MRs */
72 atomic_t dirty_count; /* # dirty of MRs */
73
74 struct llist_head drop_list; /* MRs that have reached their max_maps limit */
75 struct llist_head free_list; /* unused MRs */
76 struct llist_head clean_list; /* global unused & unamapped MRs */
77 wait_queue_head_t flush_wait;
78
79 atomic_t free_pinned; /* memory pinned by free MRs */
80 unsigned long max_items;
81 unsigned long max_items_soft;
82 unsigned long max_free_pinned;
83 struct ib_fmr_attr fmr_attr;
84};
85
86struct workqueue_struct *rds_ib_fmr_wq;
87
88int rds_ib_fmr_init(void)
89{
90 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
91 if (!rds_ib_fmr_wq)
92 return -ENOMEM;
93 return 0;
94}
95
96/* By the time this is called all the IB devices should have been torn down and
97 * had their pools freed. As each pool is freed its work struct is waited on,
98 * so the pool flushing work queue should be idle by the time we get here.
99 */
100void rds_ib_fmr_exit(void)
101{
102 destroy_workqueue(rds_ib_fmr_wq);
103}
104
105static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
106static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
107static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
108
109static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
110{
111 struct rds_ib_device *rds_ibdev;
112 struct rds_ib_ipaddr *i_ipaddr;
113
114 rcu_read_lock();
115 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
116 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
117 if (i_ipaddr->ipaddr == ipaddr) {
118 atomic_inc(&rds_ibdev->refcount);
119 rcu_read_unlock();
120 return rds_ibdev;
121 }
122 }
123 }
124 rcu_read_unlock();
125
126 return NULL;
127}
128
129static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
130{
131 struct rds_ib_ipaddr *i_ipaddr;
132
133 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
134 if (!i_ipaddr)
135 return -ENOMEM;
136
137 i_ipaddr->ipaddr = ipaddr;
138
139 spin_lock_irq(&rds_ibdev->spinlock);
140 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
141 spin_unlock_irq(&rds_ibdev->spinlock);
142
143 return 0;
144}
145
146static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
147{
148 struct rds_ib_ipaddr *i_ipaddr;
149 struct rds_ib_ipaddr *to_free = NULL;
150
151
152 spin_lock_irq(&rds_ibdev->spinlock);
153 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
154 if (i_ipaddr->ipaddr == ipaddr) {
155 list_del_rcu(&i_ipaddr->list);
156 to_free = i_ipaddr;
157 break;
158 }
159 }
160 spin_unlock_irq(&rds_ibdev->spinlock);
161
162 if (to_free) {
163 synchronize_rcu();
164 kfree(to_free);
165 }
166}
167
168int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
169{
170 struct rds_ib_device *rds_ibdev_old;
171
172 rds_ibdev_old = rds_ib_get_device(ipaddr);
173 if (!rds_ibdev_old)
174 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
175
176 if (rds_ibdev_old != rds_ibdev) {
177 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
178 rds_ib_dev_put(rds_ibdev_old);
179 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
180 }
181 rds_ib_dev_put(rds_ibdev_old);
182
183 return 0;
184}
185
186void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
187{
188 struct rds_ib_connection *ic = conn->c_transport_data;
189
190 /* conn was previously on the nodev_conns_list */
191 spin_lock_irq(&ib_nodev_conns_lock);
192 BUG_ON(list_empty(&ib_nodev_conns));
193 BUG_ON(list_empty(&ic->ib_node));
194 list_del(&ic->ib_node);
195
196 spin_lock(&rds_ibdev->spinlock);
197 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
198 spin_unlock(&rds_ibdev->spinlock);
199 spin_unlock_irq(&ib_nodev_conns_lock);
200
201 ic->rds_ibdev = rds_ibdev;
202 atomic_inc(&rds_ibdev->refcount);
203}
204
205void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
206{
207 struct rds_ib_connection *ic = conn->c_transport_data;
208
209 /* place conn on nodev_conns_list */
210 spin_lock(&ib_nodev_conns_lock);
211
212 spin_lock_irq(&rds_ibdev->spinlock);
213 BUG_ON(list_empty(&ic->ib_node));
214 list_del(&ic->ib_node);
215 spin_unlock_irq(&rds_ibdev->spinlock);
216
217 list_add_tail(&ic->ib_node, &ib_nodev_conns);
218
219 spin_unlock(&ib_nodev_conns_lock);
220
221 ic->rds_ibdev = NULL;
222 rds_ib_dev_put(rds_ibdev);
223}
224
225void rds_ib_destroy_nodev_conns(void)
226{
227 struct rds_ib_connection *ic, *_ic;
228 LIST_HEAD(tmp_list);
229
230 /* avoid calling conn_destroy with irqs off */
231 spin_lock_irq(&ib_nodev_conns_lock);
232 list_splice(&ib_nodev_conns, &tmp_list);
233 spin_unlock_irq(&ib_nodev_conns_lock);
234
235 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
236 rds_conn_destroy(ic->conn);
237}
238
239struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
240{
241 struct rds_ib_mr_pool *pool;
242
243 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
244 if (!pool)
245 return ERR_PTR(-ENOMEM);
246
247 init_llist_head(&pool->free_list);
248 init_llist_head(&pool->drop_list);
249 init_llist_head(&pool->clean_list);
250 mutex_init(&pool->flush_lock);
251 init_waitqueue_head(&pool->flush_wait);
252 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
253
254 pool->fmr_attr.max_pages = fmr_message_size;
255 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
256 pool->fmr_attr.page_shift = PAGE_SHIFT;
257 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
258
259 /* We never allow more than max_items MRs to be allocated.
260 * When we exceed more than max_items_soft, we start freeing
261 * items more aggressively.
262 * Make sure that max_items > max_items_soft > max_items / 2
263 */
264 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
265 pool->max_items = rds_ibdev->max_fmrs;
266
267 return pool;
268}
269
270void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
271{
272 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
273
274 iinfo->rdma_mr_max = pool->max_items;
275 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
276}
277
278void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
279{
280 cancel_delayed_work_sync(&pool->flush_worker);
281 rds_ib_flush_mr_pool(pool, 1, NULL);
282 WARN_ON(atomic_read(&pool->item_count));
283 WARN_ON(atomic_read(&pool->free_pinned));
284 kfree(pool);
285}
286
287static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
288{
289 struct rds_ib_mr *ibmr = NULL;
290 struct llist_node *ret;
291 unsigned long *flag;
292
293 preempt_disable();
294 flag = this_cpu_ptr(&clean_list_grace);
295 set_bit(CLEAN_LIST_BUSY_BIT, flag);
296 ret = llist_del_first(&pool->clean_list);
297 if (ret)
298 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
299
300 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
301 preempt_enable();
302 return ibmr;
303}
304
305static inline void wait_clean_list_grace(void)
306{
307 int cpu;
308 unsigned long *flag;
309
310 for_each_online_cpu(cpu) {
311 flag = &per_cpu(clean_list_grace, cpu);
312 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
313 cpu_relax();
314 }
315}
316
317static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
318{
319 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
320 struct rds_ib_mr *ibmr = NULL;
321 int err = 0, iter = 0;
322
323 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
324 schedule_delayed_work(&pool->flush_worker, 10);
325
326 while (1) {
327 ibmr = rds_ib_reuse_fmr(pool);
328 if (ibmr)
329 return ibmr;
330
331 /* No clean MRs - now we have the choice of either
332 * allocating a fresh MR up to the limit imposed by the
333 * driver, or flush any dirty unused MRs.
334 * We try to avoid stalling in the send path if possible,
335 * so we allocate as long as we're allowed to.
336 *
337 * We're fussy with enforcing the FMR limit, though. If the driver
338 * tells us we can't use more than N fmrs, we shouldn't start
339 * arguing with it */
340 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
341 break;
342
343 atomic_dec(&pool->item_count);
344
345 if (++iter > 2) {
346 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
347 return ERR_PTR(-EAGAIN);
348 }
349
350 /* We do have some empty MRs. Flush them out. */
351 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
352 rds_ib_flush_mr_pool(pool, 0, &ibmr);
353 if (ibmr)
354 return ibmr;
355 }
356
357 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
358 if (!ibmr) {
359 err = -ENOMEM;
360 goto out_no_cigar;
361 }
362
363 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
364 (IB_ACCESS_LOCAL_WRITE |
365 IB_ACCESS_REMOTE_READ |
366 IB_ACCESS_REMOTE_WRITE|
367 IB_ACCESS_REMOTE_ATOMIC),
368 &pool->fmr_attr);
369 if (IS_ERR(ibmr->fmr)) {
370 err = PTR_ERR(ibmr->fmr);
371 ibmr->fmr = NULL;
372 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
373 goto out_no_cigar;
374 }
375
376 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
377 return ibmr;
378
379out_no_cigar:
380 if (ibmr) {
381 if (ibmr->fmr)
382 ib_dealloc_fmr(ibmr->fmr);
383 kfree(ibmr);
384 }
385 atomic_dec(&pool->item_count);
386 return ERR_PTR(err);
387}
388
389static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
390 struct scatterlist *sg, unsigned int nents)
391{
392 struct ib_device *dev = rds_ibdev->dev;
393 struct scatterlist *scat = sg;
394 u64 io_addr = 0;
395 u64 *dma_pages;
396 u32 len;
397 int page_cnt, sg_dma_len;
398 int i, j;
399 int ret;
400
401 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
402 DMA_BIDIRECTIONAL);
403 if (unlikely(!sg_dma_len)) {
404 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
405 return -EBUSY;
406 }
407
408 len = 0;
409 page_cnt = 0;
410
411 for (i = 0; i < sg_dma_len; ++i) {
412 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
413 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
414
415 if (dma_addr & ~PAGE_MASK) {
416 if (i > 0)
417 return -EINVAL;
418 else
419 ++page_cnt;
420 }
421 if ((dma_addr + dma_len) & ~PAGE_MASK) {
422 if (i < sg_dma_len - 1)
423 return -EINVAL;
424 else
425 ++page_cnt;
426 }
427
428 len += dma_len;
429 }
430
431 page_cnt += len >> PAGE_SHIFT;
432 if (page_cnt > fmr_message_size)
433 return -EINVAL;
434
435 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
436 rdsibdev_to_node(rds_ibdev));
437 if (!dma_pages)
438 return -ENOMEM;
439
440 page_cnt = 0;
441 for (i = 0; i < sg_dma_len; ++i) {
442 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
443 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
444
445 for (j = 0; j < dma_len; j += PAGE_SIZE)
446 dma_pages[page_cnt++] =
447 (dma_addr & PAGE_MASK) + j;
448 }
449
450 ret = ib_map_phys_fmr(ibmr->fmr,
451 dma_pages, page_cnt, io_addr);
452 if (ret)
453 goto out;
454
455 /* Success - we successfully remapped the MR, so we can
456 * safely tear down the old mapping. */
457 rds_ib_teardown_mr(ibmr);
458
459 ibmr->sg = scat;
460 ibmr->sg_len = nents;
461 ibmr->sg_dma_len = sg_dma_len;
462 ibmr->remap_count++;
463
464 rds_ib_stats_inc(s_ib_rdma_mr_used);
465 ret = 0;
466
467out:
468 kfree(dma_pages);
469
470 return ret;
471}
472
473void rds_ib_sync_mr(void *trans_private, int direction)
474{
475 struct rds_ib_mr *ibmr = trans_private;
476 struct rds_ib_device *rds_ibdev = ibmr->device;
477
478 switch (direction) {
479 case DMA_FROM_DEVICE:
480 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
481 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
482 break;
483 case DMA_TO_DEVICE:
484 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
485 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
486 break;
487 }
488}
489
490static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
491{
492 struct rds_ib_device *rds_ibdev = ibmr->device;
493
494 if (ibmr->sg_dma_len) {
495 ib_dma_unmap_sg(rds_ibdev->dev,
496 ibmr->sg, ibmr->sg_len,
497 DMA_BIDIRECTIONAL);
498 ibmr->sg_dma_len = 0;
499 }
500
501 /* Release the s/g list */
502 if (ibmr->sg_len) {
503 unsigned int i;
504
505 for (i = 0; i < ibmr->sg_len; ++i) {
506 struct page *page = sg_page(&ibmr->sg[i]);
507
508 /* FIXME we need a way to tell a r/w MR
509 * from a r/o MR */
510 WARN_ON(!page->mapping && irqs_disabled());
511 set_page_dirty(page);
512 put_page(page);
513 }
514 kfree(ibmr->sg);
515
516 ibmr->sg = NULL;
517 ibmr->sg_len = 0;
518 }
519}
520
521static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
522{
523 unsigned int pinned = ibmr->sg_len;
524
525 __rds_ib_teardown_mr(ibmr);
526 if (pinned) {
527 struct rds_ib_device *rds_ibdev = ibmr->device;
528 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
529
530 atomic_sub(pinned, &pool->free_pinned);
531 }
532}
533
534static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
535{
536 unsigned int item_count;
537
538 item_count = atomic_read(&pool->item_count);
539 if (free_all)
540 return item_count;
541
542 return 0;
543}
544
545/*
546 * given an llist of mrs, put them all into the list_head for more processing
547 */
548static unsigned int llist_append_to_list(struct llist_head *llist,
549 struct list_head *list)
550{
551 struct rds_ib_mr *ibmr;
552 struct llist_node *node;
553 struct llist_node *next;
554 unsigned int count = 0;
555
556 node = llist_del_all(llist);
557 while (node) {
558 next = node->next;
559 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
560 list_add_tail(&ibmr->unmap_list, list);
561 node = next;
562 count++;
563 }
564 return count;
565}
566
567/*
568 * this takes a list head of mrs and turns it into linked llist nodes
569 * of clusters. Each cluster has linked llist nodes of
570 * MR_CLUSTER_SIZE mrs that are ready for reuse.
571 */
572static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
573 struct list_head *list,
574 struct llist_node **nodes_head,
575 struct llist_node **nodes_tail)
576{
577 struct rds_ib_mr *ibmr;
578 struct llist_node *cur = NULL;
579 struct llist_node **next = nodes_head;
580
581 list_for_each_entry(ibmr, list, unmap_list) {
582 cur = &ibmr->llnode;
583 *next = cur;
584 next = &cur->next;
585 }
586 *next = NULL;
587 *nodes_tail = cur;
588}
589
590/*
591 * Flush our pool of MRs.
592 * At a minimum, all currently unused MRs are unmapped.
593 * If the number of MRs allocated exceeds the limit, we also try
594 * to free as many MRs as needed to get back to this limit.
595 */
596static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
597 int free_all, struct rds_ib_mr **ibmr_ret)
598{
599 struct rds_ib_mr *ibmr, *next;
600 struct llist_node *clean_nodes;
601 struct llist_node *clean_tail;
602 LIST_HEAD(unmap_list);
603 LIST_HEAD(fmr_list);
604 unsigned long unpinned = 0;
605 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
606 int ret = 0;
607
608 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
609
610 if (ibmr_ret) {
611 DEFINE_WAIT(wait);
612 while(!mutex_trylock(&pool->flush_lock)) {
613 ibmr = rds_ib_reuse_fmr(pool);
614 if (ibmr) {
615 *ibmr_ret = ibmr;
616 finish_wait(&pool->flush_wait, &wait);
617 goto out_nolock;
618 }
619
620 prepare_to_wait(&pool->flush_wait, &wait,
621 TASK_UNINTERRUPTIBLE);
622 if (llist_empty(&pool->clean_list))
623 schedule();
624
625 ibmr = rds_ib_reuse_fmr(pool);
626 if (ibmr) {
627 *ibmr_ret = ibmr;
628 finish_wait(&pool->flush_wait, &wait);
629 goto out_nolock;
630 }
631 }
632 finish_wait(&pool->flush_wait, &wait);
633 } else
634 mutex_lock(&pool->flush_lock);
635
636 if (ibmr_ret) {
637 ibmr = rds_ib_reuse_fmr(pool);
638 if (ibmr) {
639 *ibmr_ret = ibmr;
640 goto out;
641 }
642 }
643
644 /* Get the list of all MRs to be dropped. Ordering matters -
645 * we want to put drop_list ahead of free_list.
646 */
647 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
648 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
649 if (free_all)
650 llist_append_to_list(&pool->clean_list, &unmap_list);
651
652 free_goal = rds_ib_flush_goal(pool, free_all);
653
654 if (list_empty(&unmap_list))
655 goto out;
656
657 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
658 list_for_each_entry(ibmr, &unmap_list, unmap_list)
659 list_add(&ibmr->fmr->list, &fmr_list);
660
661 ret = ib_unmap_fmr(&fmr_list);
662 if (ret)
663 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
664
665 /* Now we can destroy the DMA mapping and unpin any pages */
666 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
667 unpinned += ibmr->sg_len;
668 __rds_ib_teardown_mr(ibmr);
669 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
670 rds_ib_stats_inc(s_ib_rdma_mr_free);
671 list_del(&ibmr->unmap_list);
672 ib_dealloc_fmr(ibmr->fmr);
673 kfree(ibmr);
674 nfreed++;
675 }
676 }
677
678 if (!list_empty(&unmap_list)) {
679 /* we have to make sure that none of the things we're about
680 * to put on the clean list would race with other cpus trying
681 * to pull items off. The llist would explode if we managed to
682 * remove something from the clean list and then add it back again
683 * while another CPU was spinning on that same item in llist_del_first.
684 *
685 * This is pretty unlikely, but just in case wait for an llist grace period
686 * here before adding anything back into the clean list.
687 */
688 wait_clean_list_grace();
689
690 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
691 if (ibmr_ret)
692 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
693
694 /* more than one entry in llist nodes */
695 if (clean_nodes->next)
696 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
697
698 }
699
700 atomic_sub(unpinned, &pool->free_pinned);
701 atomic_sub(dirty_to_clean, &pool->dirty_count);
702 atomic_sub(nfreed, &pool->item_count);
703
704out:
705 mutex_unlock(&pool->flush_lock);
706 if (waitqueue_active(&pool->flush_wait))
707 wake_up(&pool->flush_wait);
708out_nolock:
709 return ret;
710}
711
712static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
713{
714 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
715
716 rds_ib_flush_mr_pool(pool, 0, NULL);
717}
718
719void rds_ib_free_mr(void *trans_private, int invalidate)
720{
721 struct rds_ib_mr *ibmr = trans_private;
722 struct rds_ib_device *rds_ibdev = ibmr->device;
723 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
724
725 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
726
727 /* Return it to the pool's free list */
728 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
729 llist_add(&ibmr->llnode, &pool->drop_list);
730 else
731 llist_add(&ibmr->llnode, &pool->free_list);
732
733 atomic_add(ibmr->sg_len, &pool->free_pinned);
734 atomic_inc(&pool->dirty_count);
735
736 /* If we've pinned too many pages, request a flush */
737 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
738 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
739 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
740
741 if (invalidate) {
742 if (likely(!in_interrupt())) {
743 rds_ib_flush_mr_pool(pool, 0, NULL);
744 } else {
745 /* We get here if the user created a MR marked
746 * as use_once and invalidate at the same time.
747 */
748 queue_delayed_work(rds_ib_fmr_wq,
749 &pool->flush_worker, 10);
750 }
751 }
752
753 rds_ib_dev_put(rds_ibdev);
754}
755
756void rds_ib_flush_mrs(void)
757{
758 struct rds_ib_device *rds_ibdev;
759
760 down_read(&rds_ib_devices_lock);
761 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
762 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
763
764 if (pool)
765 rds_ib_flush_mr_pool(pool, 0, NULL);
766 }
767 up_read(&rds_ib_devices_lock);
768}
769
770void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
771 struct rds_sock *rs, u32 *key_ret)
772{
773 struct rds_ib_device *rds_ibdev;
774 struct rds_ib_mr *ibmr = NULL;
775 int ret;
776
777 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
778 if (!rds_ibdev) {
779 ret = -ENODEV;
780 goto out;
781 }
782
783 if (!rds_ibdev->mr_pool) {
784 ret = -ENODEV;
785 goto out;
786 }
787
788 ibmr = rds_ib_alloc_fmr(rds_ibdev);
789 if (IS_ERR(ibmr)) {
790 rds_ib_dev_put(rds_ibdev);
791 return ibmr;
792 }
793
794 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
795 if (ret == 0)
796 *key_ret = ibmr->fmr->rkey;
797 else
798 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
799
800 ibmr->device = rds_ibdev;
801 rds_ibdev = NULL;
802
803 out:
804 if (ret) {
805 if (ibmr)
806 rds_ib_free_mr(ibmr, 0);
807 ibmr = ERR_PTR(ret);
808 }
809 if (rds_ibdev)
810 rds_ib_dev_put(rds_ibdev);
811 return ibmr;
812}
813
This page took 0.042848 seconds and 4 git commands to generate.