]>
Commit | Line | Data |
---|---|---|
08b48a1e AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
764f2dd9 | 35 | #include <linux/rculist.h> |
1bc144b6 | 36 | #include <linux/llist.h> |
08b48a1e AG |
37 | |
38 | #include "rds.h" | |
08b48a1e AG |
39 | #include "ib.h" |
40 | ||
6fa70da6 CM |
41 | static DEFINE_PER_CPU(unsigned long, clean_list_grace); |
42 | #define CLEAN_LIST_BUSY_BIT 0 | |
08b48a1e AG |
43 | |
44 | /* | |
45 | * This is stored as mr->r_trans_private. | |
46 | */ | |
47 | struct rds_ib_mr { | |
48 | struct rds_ib_device *device; | |
49 | struct rds_ib_mr_pool *pool; | |
50 | struct ib_fmr *fmr; | |
6fa70da6 | 51 | |
1bc144b6 | 52 | struct llist_node llnode; |
6fa70da6 CM |
53 | |
54 | /* unmap_list is for freeing */ | |
55 | struct list_head unmap_list; | |
08b48a1e AG |
56 | unsigned int remap_count; |
57 | ||
58 | struct scatterlist *sg; | |
59 | unsigned int sg_len; | |
60 | u64 *dma; | |
61 | int sg_dma_len; | |
62 | }; | |
63 | ||
64 | /* | |
65 | * Our own little FMR pool | |
66 | */ | |
67 | struct rds_ib_mr_pool { | |
68 | struct mutex flush_lock; /* serialize fmr invalidate */ | |
7a0ff5db | 69 | struct delayed_work flush_worker; /* flush worker */ |
08b48a1e | 70 | |
08b48a1e AG |
71 | atomic_t item_count; /* total # of MRs */ |
72 | atomic_t dirty_count; /* # dirty of MRs */ | |
6fa70da6 | 73 | |
1bc144b6 YH |
74 | struct llist_head drop_list; /* MRs that have reached their max_maps limit */ |
75 | struct llist_head free_list; /* unused MRs */ | |
76 | struct llist_head clean_list; /* global unused & unamapped MRs */ | |
6fa70da6 CM |
77 | wait_queue_head_t flush_wait; |
78 | ||
08b48a1e AG |
79 | atomic_t free_pinned; /* memory pinned by free MRs */ |
80 | unsigned long max_items; | |
81 | unsigned long max_items_soft; | |
82 | unsigned long max_free_pinned; | |
83 | struct ib_fmr_attr fmr_attr; | |
84 | }; | |
85 | ||
6fa70da6 | 86 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **); |
08b48a1e AG |
87 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); |
88 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work); | |
89 | ||
90 | static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) | |
91 | { | |
92 | struct rds_ib_device *rds_ibdev; | |
93 | struct rds_ib_ipaddr *i_ipaddr; | |
94 | ||
ea819867 ZB |
95 | rcu_read_lock(); |
96 | list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) { | |
764f2dd9 | 97 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
08b48a1e | 98 | if (i_ipaddr->ipaddr == ipaddr) { |
3e0249f9 | 99 | atomic_inc(&rds_ibdev->refcount); |
764f2dd9 | 100 | rcu_read_unlock(); |
08b48a1e AG |
101 | return rds_ibdev; |
102 | } | |
103 | } | |
08b48a1e | 104 | } |
ea819867 | 105 | rcu_read_unlock(); |
08b48a1e AG |
106 | |
107 | return NULL; | |
108 | } | |
109 | ||
110 | static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
111 | { | |
112 | struct rds_ib_ipaddr *i_ipaddr; | |
113 | ||
114 | i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); | |
115 | if (!i_ipaddr) | |
116 | return -ENOMEM; | |
117 | ||
118 | i_ipaddr->ipaddr = ipaddr; | |
119 | ||
120 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 121 | list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list); |
08b48a1e AG |
122 | spin_unlock_irq(&rds_ibdev->spinlock); |
123 | ||
124 | return 0; | |
125 | } | |
126 | ||
127 | static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
128 | { | |
4a81802b | 129 | struct rds_ib_ipaddr *i_ipaddr; |
764f2dd9 CM |
130 | struct rds_ib_ipaddr *to_free = NULL; |
131 | ||
08b48a1e AG |
132 | |
133 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 134 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
08b48a1e | 135 | if (i_ipaddr->ipaddr == ipaddr) { |
764f2dd9 CM |
136 | list_del_rcu(&i_ipaddr->list); |
137 | to_free = i_ipaddr; | |
08b48a1e AG |
138 | break; |
139 | } | |
140 | } | |
141 | spin_unlock_irq(&rds_ibdev->spinlock); | |
764f2dd9 CM |
142 | |
143 | if (to_free) { | |
144 | synchronize_rcu(); | |
145 | kfree(to_free); | |
146 | } | |
08b48a1e AG |
147 | } |
148 | ||
149 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
150 | { | |
151 | struct rds_ib_device *rds_ibdev_old; | |
152 | ||
153 | rds_ibdev_old = rds_ib_get_device(ipaddr); | |
3e0249f9 | 154 | if (rds_ibdev_old) { |
08b48a1e | 155 | rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); |
3e0249f9 ZB |
156 | rds_ib_dev_put(rds_ibdev_old); |
157 | } | |
08b48a1e AG |
158 | |
159 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr); | |
160 | } | |
161 | ||
745cbcca | 162 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e AG |
163 | { |
164 | struct rds_ib_connection *ic = conn->c_transport_data; | |
165 | ||
166 | /* conn was previously on the nodev_conns_list */ | |
167 | spin_lock_irq(&ib_nodev_conns_lock); | |
168 | BUG_ON(list_empty(&ib_nodev_conns)); | |
169 | BUG_ON(list_empty(&ic->ib_node)); | |
170 | list_del(&ic->ib_node); | |
08b48a1e | 171 | |
aef3ea33 | 172 | spin_lock(&rds_ibdev->spinlock); |
08b48a1e | 173 | list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); |
aef3ea33 | 174 | spin_unlock(&rds_ibdev->spinlock); |
745cbcca | 175 | spin_unlock_irq(&ib_nodev_conns_lock); |
08b48a1e AG |
176 | |
177 | ic->rds_ibdev = rds_ibdev; | |
3e0249f9 | 178 | atomic_inc(&rds_ibdev->refcount); |
08b48a1e AG |
179 | } |
180 | ||
745cbcca | 181 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e | 182 | { |
745cbcca | 183 | struct rds_ib_connection *ic = conn->c_transport_data; |
08b48a1e | 184 | |
745cbcca AG |
185 | /* place conn on nodev_conns_list */ |
186 | spin_lock(&ib_nodev_conns_lock); | |
08b48a1e | 187 | |
745cbcca AG |
188 | spin_lock_irq(&rds_ibdev->spinlock); |
189 | BUG_ON(list_empty(&ic->ib_node)); | |
190 | list_del(&ic->ib_node); | |
191 | spin_unlock_irq(&rds_ibdev->spinlock); | |
192 | ||
193 | list_add_tail(&ic->ib_node, &ib_nodev_conns); | |
194 | ||
195 | spin_unlock(&ib_nodev_conns_lock); | |
196 | ||
197 | ic->rds_ibdev = NULL; | |
3e0249f9 | 198 | rds_ib_dev_put(rds_ibdev); |
08b48a1e AG |
199 | } |
200 | ||
8aeb1ba6 | 201 | void rds_ib_destroy_nodev_conns(void) |
08b48a1e AG |
202 | { |
203 | struct rds_ib_connection *ic, *_ic; | |
204 | LIST_HEAD(tmp_list); | |
205 | ||
206 | /* avoid calling conn_destroy with irqs off */ | |
8aeb1ba6 ZB |
207 | spin_lock_irq(&ib_nodev_conns_lock); |
208 | list_splice(&ib_nodev_conns, &tmp_list); | |
209 | spin_unlock_irq(&ib_nodev_conns_lock); | |
08b48a1e | 210 | |
433d308d | 211 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) |
08b48a1e | 212 | rds_conn_destroy(ic->conn); |
08b48a1e AG |
213 | } |
214 | ||
215 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) | |
216 | { | |
217 | struct rds_ib_mr_pool *pool; | |
218 | ||
219 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
220 | if (!pool) | |
221 | return ERR_PTR(-ENOMEM); | |
222 | ||
1bc144b6 YH |
223 | init_llist_head(&pool->free_list); |
224 | init_llist_head(&pool->drop_list); | |
225 | init_llist_head(&pool->clean_list); | |
08b48a1e | 226 | mutex_init(&pool->flush_lock); |
6fa70da6 | 227 | init_waitqueue_head(&pool->flush_wait); |
7a0ff5db | 228 | INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); |
08b48a1e AG |
229 | |
230 | pool->fmr_attr.max_pages = fmr_message_size; | |
231 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; | |
a870d627 | 232 | pool->fmr_attr.page_shift = PAGE_SHIFT; |
08b48a1e AG |
233 | pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; |
234 | ||
235 | /* We never allow more than max_items MRs to be allocated. | |
236 | * When we exceed more than max_items_soft, we start freeing | |
237 | * items more aggressively. | |
238 | * Make sure that max_items > max_items_soft > max_items / 2 | |
239 | */ | |
240 | pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; | |
241 | pool->max_items = rds_ibdev->max_fmrs; | |
242 | ||
243 | return pool; | |
244 | } | |
245 | ||
246 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) | |
247 | { | |
248 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
249 | ||
250 | iinfo->rdma_mr_max = pool->max_items; | |
251 | iinfo->rdma_mr_size = pool->fmr_attr.max_pages; | |
252 | } | |
253 | ||
254 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | |
255 | { | |
7a0ff5db | 256 | cancel_delayed_work_sync(&pool->flush_worker); |
6fa70da6 | 257 | rds_ib_flush_mr_pool(pool, 1, NULL); |
571c02fa AG |
258 | WARN_ON(atomic_read(&pool->item_count)); |
259 | WARN_ON(atomic_read(&pool->free_pinned)); | |
08b48a1e AG |
260 | kfree(pool); |
261 | } | |
262 | ||
263 | static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) | |
264 | { | |
265 | struct rds_ib_mr *ibmr = NULL; | |
1bc144b6 | 266 | struct llist_node *ret; |
6fa70da6 | 267 | unsigned long *flag; |
08b48a1e | 268 | |
6fa70da6 CM |
269 | preempt_disable(); |
270 | flag = &__get_cpu_var(clean_list_grace); | |
271 | set_bit(CLEAN_LIST_BUSY_BIT, flag); | |
1bc144b6 | 272 | ret = llist_del_first(&pool->clean_list); |
6fa70da6 | 273 | if (ret) |
1bc144b6 | 274 | ibmr = llist_entry(ret, struct rds_ib_mr, llnode); |
08b48a1e | 275 | |
6fa70da6 CM |
276 | clear_bit(CLEAN_LIST_BUSY_BIT, flag); |
277 | preempt_enable(); | |
08b48a1e AG |
278 | return ibmr; |
279 | } | |
280 | ||
6fa70da6 CM |
281 | static inline void wait_clean_list_grace(void) |
282 | { | |
283 | int cpu; | |
284 | unsigned long *flag; | |
285 | ||
286 | for_each_online_cpu(cpu) { | |
287 | flag = &per_cpu(clean_list_grace, cpu); | |
288 | while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) | |
289 | cpu_relax(); | |
290 | } | |
291 | } | |
292 | ||
08b48a1e AG |
293 | static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) |
294 | { | |
295 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
296 | struct rds_ib_mr *ibmr = NULL; | |
297 | int err = 0, iter = 0; | |
298 | ||
8576f374 | 299 | if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
c534a107 | 300 | schedule_delayed_work(&pool->flush_worker, 10); |
8576f374 | 301 | |
08b48a1e AG |
302 | while (1) { |
303 | ibmr = rds_ib_reuse_fmr(pool); | |
304 | if (ibmr) | |
305 | return ibmr; | |
306 | ||
307 | /* No clean MRs - now we have the choice of either | |
308 | * allocating a fresh MR up to the limit imposed by the | |
309 | * driver, or flush any dirty unused MRs. | |
310 | * We try to avoid stalling in the send path if possible, | |
311 | * so we allocate as long as we're allowed to. | |
312 | * | |
313 | * We're fussy with enforcing the FMR limit, though. If the driver | |
314 | * tells us we can't use more than N fmrs, we shouldn't start | |
315 | * arguing with it */ | |
316 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) | |
317 | break; | |
318 | ||
319 | atomic_dec(&pool->item_count); | |
320 | ||
321 | if (++iter > 2) { | |
322 | rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted); | |
323 | return ERR_PTR(-EAGAIN); | |
324 | } | |
325 | ||
326 | /* We do have some empty MRs. Flush them out. */ | |
327 | rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); | |
6fa70da6 CM |
328 | rds_ib_flush_mr_pool(pool, 0, &ibmr); |
329 | if (ibmr) | |
330 | return ibmr; | |
08b48a1e AG |
331 | } |
332 | ||
e4c52c98 | 333 | ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev)); |
08b48a1e AG |
334 | if (!ibmr) { |
335 | err = -ENOMEM; | |
336 | goto out_no_cigar; | |
337 | } | |
338 | ||
38a4e5e6 CM |
339 | memset(ibmr, 0, sizeof(*ibmr)); |
340 | ||
08b48a1e AG |
341 | ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, |
342 | (IB_ACCESS_LOCAL_WRITE | | |
343 | IB_ACCESS_REMOTE_READ | | |
15133f6e AG |
344 | IB_ACCESS_REMOTE_WRITE| |
345 | IB_ACCESS_REMOTE_ATOMIC), | |
08b48a1e AG |
346 | &pool->fmr_attr); |
347 | if (IS_ERR(ibmr->fmr)) { | |
348 | err = PTR_ERR(ibmr->fmr); | |
349 | ibmr->fmr = NULL; | |
350 | printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err); | |
351 | goto out_no_cigar; | |
352 | } | |
353 | ||
354 | rds_ib_stats_inc(s_ib_rdma_mr_alloc); | |
355 | return ibmr; | |
356 | ||
357 | out_no_cigar: | |
358 | if (ibmr) { | |
359 | if (ibmr->fmr) | |
360 | ib_dealloc_fmr(ibmr->fmr); | |
361 | kfree(ibmr); | |
362 | } | |
363 | atomic_dec(&pool->item_count); | |
364 | return ERR_PTR(err); | |
365 | } | |
366 | ||
367 | static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, | |
368 | struct scatterlist *sg, unsigned int nents) | |
369 | { | |
370 | struct ib_device *dev = rds_ibdev->dev; | |
371 | struct scatterlist *scat = sg; | |
372 | u64 io_addr = 0; | |
373 | u64 *dma_pages; | |
374 | u32 len; | |
375 | int page_cnt, sg_dma_len; | |
376 | int i, j; | |
377 | int ret; | |
378 | ||
379 | sg_dma_len = ib_dma_map_sg(dev, sg, nents, | |
380 | DMA_BIDIRECTIONAL); | |
381 | if (unlikely(!sg_dma_len)) { | |
382 | printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n"); | |
383 | return -EBUSY; | |
384 | } | |
385 | ||
386 | len = 0; | |
387 | page_cnt = 0; | |
388 | ||
389 | for (i = 0; i < sg_dma_len; ++i) { | |
390 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | |
391 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | |
392 | ||
a870d627 | 393 | if (dma_addr & ~PAGE_MASK) { |
08b48a1e AG |
394 | if (i > 0) |
395 | return -EINVAL; | |
396 | else | |
397 | ++page_cnt; | |
398 | } | |
a870d627 | 399 | if ((dma_addr + dma_len) & ~PAGE_MASK) { |
08b48a1e AG |
400 | if (i < sg_dma_len - 1) |
401 | return -EINVAL; | |
402 | else | |
403 | ++page_cnt; | |
404 | } | |
405 | ||
406 | len += dma_len; | |
407 | } | |
408 | ||
a870d627 | 409 | page_cnt += len >> PAGE_SHIFT; |
08b48a1e AG |
410 | if (page_cnt > fmr_message_size) |
411 | return -EINVAL; | |
412 | ||
e4c52c98 AG |
413 | dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, |
414 | rdsibdev_to_node(rds_ibdev)); | |
08b48a1e AG |
415 | if (!dma_pages) |
416 | return -ENOMEM; | |
417 | ||
418 | page_cnt = 0; | |
419 | for (i = 0; i < sg_dma_len; ++i) { | |
420 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | |
421 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | |
422 | ||
a870d627 | 423 | for (j = 0; j < dma_len; j += PAGE_SIZE) |
08b48a1e | 424 | dma_pages[page_cnt++] = |
a870d627 | 425 | (dma_addr & PAGE_MASK) + j; |
08b48a1e AG |
426 | } |
427 | ||
428 | ret = ib_map_phys_fmr(ibmr->fmr, | |
429 | dma_pages, page_cnt, io_addr); | |
430 | if (ret) | |
431 | goto out; | |
432 | ||
433 | /* Success - we successfully remapped the MR, so we can | |
434 | * safely tear down the old mapping. */ | |
435 | rds_ib_teardown_mr(ibmr); | |
436 | ||
437 | ibmr->sg = scat; | |
438 | ibmr->sg_len = nents; | |
439 | ibmr->sg_dma_len = sg_dma_len; | |
440 | ibmr->remap_count++; | |
441 | ||
442 | rds_ib_stats_inc(s_ib_rdma_mr_used); | |
443 | ret = 0; | |
444 | ||
445 | out: | |
446 | kfree(dma_pages); | |
447 | ||
448 | return ret; | |
449 | } | |
450 | ||
451 | void rds_ib_sync_mr(void *trans_private, int direction) | |
452 | { | |
453 | struct rds_ib_mr *ibmr = trans_private; | |
454 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
455 | ||
456 | switch (direction) { | |
457 | case DMA_FROM_DEVICE: | |
458 | ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, | |
459 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
460 | break; | |
461 | case DMA_TO_DEVICE: | |
462 | ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, | |
463 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
464 | break; | |
465 | } | |
466 | } | |
467 | ||
468 | static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |
469 | { | |
470 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
471 | ||
472 | if (ibmr->sg_dma_len) { | |
473 | ib_dma_unmap_sg(rds_ibdev->dev, | |
474 | ibmr->sg, ibmr->sg_len, | |
475 | DMA_BIDIRECTIONAL); | |
476 | ibmr->sg_dma_len = 0; | |
477 | } | |
478 | ||
479 | /* Release the s/g list */ | |
480 | if (ibmr->sg_len) { | |
481 | unsigned int i; | |
482 | ||
483 | for (i = 0; i < ibmr->sg_len; ++i) { | |
484 | struct page *page = sg_page(&ibmr->sg[i]); | |
485 | ||
486 | /* FIXME we need a way to tell a r/w MR | |
487 | * from a r/o MR */ | |
9e2effba | 488 | BUG_ON(irqs_disabled()); |
08b48a1e AG |
489 | set_page_dirty(page); |
490 | put_page(page); | |
491 | } | |
492 | kfree(ibmr->sg); | |
493 | ||
494 | ibmr->sg = NULL; | |
495 | ibmr->sg_len = 0; | |
496 | } | |
497 | } | |
498 | ||
499 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |
500 | { | |
501 | unsigned int pinned = ibmr->sg_len; | |
502 | ||
503 | __rds_ib_teardown_mr(ibmr); | |
504 | if (pinned) { | |
505 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
506 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
507 | ||
508 | atomic_sub(pinned, &pool->free_pinned); | |
509 | } | |
510 | } | |
511 | ||
512 | static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) | |
513 | { | |
514 | unsigned int item_count; | |
515 | ||
516 | item_count = atomic_read(&pool->item_count); | |
517 | if (free_all) | |
518 | return item_count; | |
519 | ||
520 | return 0; | |
521 | } | |
522 | ||
6fa70da6 | 523 | /* |
1bc144b6 | 524 | * given an llist of mrs, put them all into the list_head for more processing |
6fa70da6 | 525 | */ |
1bc144b6 | 526 | static void llist_append_to_list(struct llist_head *llist, struct list_head *list) |
6fa70da6 CM |
527 | { |
528 | struct rds_ib_mr *ibmr; | |
1bc144b6 YH |
529 | struct llist_node *node; |
530 | struct llist_node *next; | |
531 | ||
532 | node = llist_del_all(llist); | |
533 | while (node) { | |
534 | next = node->next; | |
535 | ibmr = llist_entry(node, struct rds_ib_mr, llnode); | |
6fa70da6 | 536 | list_add_tail(&ibmr->unmap_list, list); |
1bc144b6 | 537 | node = next; |
6fa70da6 CM |
538 | } |
539 | } | |
540 | ||
541 | /* | |
1bc144b6 YH |
542 | * this takes a list head of mrs and turns it into linked llist nodes |
543 | * of clusters. Each cluster has linked llist nodes of | |
544 | * MR_CLUSTER_SIZE mrs that are ready for reuse. | |
6fa70da6 | 545 | */ |
1bc144b6 YH |
546 | static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, |
547 | struct list_head *list, | |
548 | struct llist_node **nodes_head, | |
549 | struct llist_node **nodes_tail) | |
6fa70da6 CM |
550 | { |
551 | struct rds_ib_mr *ibmr; | |
1bc144b6 YH |
552 | struct llist_node *cur = NULL; |
553 | struct llist_node **next = nodes_head; | |
6fa70da6 CM |
554 | |
555 | list_for_each_entry(ibmr, list, unmap_list) { | |
1bc144b6 YH |
556 | cur = &ibmr->llnode; |
557 | *next = cur; | |
558 | next = &cur->next; | |
6fa70da6 | 559 | } |
1bc144b6 YH |
560 | *next = NULL; |
561 | *nodes_tail = cur; | |
6fa70da6 CM |
562 | } |
563 | ||
08b48a1e AG |
564 | /* |
565 | * Flush our pool of MRs. | |
566 | * At a minimum, all currently unused MRs are unmapped. | |
567 | * If the number of MRs allocated exceeds the limit, we also try | |
568 | * to free as many MRs as needed to get back to this limit. | |
569 | */ | |
6fa70da6 CM |
570 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, |
571 | int free_all, struct rds_ib_mr **ibmr_ret) | |
08b48a1e AG |
572 | { |
573 | struct rds_ib_mr *ibmr, *next; | |
1bc144b6 YH |
574 | struct llist_node *clean_nodes; |
575 | struct llist_node *clean_tail; | |
08b48a1e AG |
576 | LIST_HEAD(unmap_list); |
577 | LIST_HEAD(fmr_list); | |
578 | unsigned long unpinned = 0; | |
08b48a1e AG |
579 | unsigned int nfreed = 0, ncleaned = 0, free_goal; |
580 | int ret = 0; | |
581 | ||
582 | rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); | |
583 | ||
6fa70da6 CM |
584 | if (ibmr_ret) { |
585 | DEFINE_WAIT(wait); | |
586 | while(!mutex_trylock(&pool->flush_lock)) { | |
587 | ibmr = rds_ib_reuse_fmr(pool); | |
588 | if (ibmr) { | |
589 | *ibmr_ret = ibmr; | |
590 | finish_wait(&pool->flush_wait, &wait); | |
591 | goto out_nolock; | |
592 | } | |
593 | ||
594 | prepare_to_wait(&pool->flush_wait, &wait, | |
595 | TASK_UNINTERRUPTIBLE); | |
1bc144b6 | 596 | if (llist_empty(&pool->clean_list)) |
6fa70da6 CM |
597 | schedule(); |
598 | ||
599 | ibmr = rds_ib_reuse_fmr(pool); | |
600 | if (ibmr) { | |
601 | *ibmr_ret = ibmr; | |
602 | finish_wait(&pool->flush_wait, &wait); | |
603 | goto out_nolock; | |
604 | } | |
605 | } | |
606 | finish_wait(&pool->flush_wait, &wait); | |
607 | } else | |
608 | mutex_lock(&pool->flush_lock); | |
609 | ||
610 | if (ibmr_ret) { | |
611 | ibmr = rds_ib_reuse_fmr(pool); | |
612 | if (ibmr) { | |
613 | *ibmr_ret = ibmr; | |
614 | goto out; | |
615 | } | |
616 | } | |
08b48a1e | 617 | |
08b48a1e | 618 | /* Get the list of all MRs to be dropped. Ordering matters - |
6fa70da6 CM |
619 | * we want to put drop_list ahead of free_list. |
620 | */ | |
1bc144b6 YH |
621 | llist_append_to_list(&pool->drop_list, &unmap_list); |
622 | llist_append_to_list(&pool->free_list, &unmap_list); | |
08b48a1e | 623 | if (free_all) |
1bc144b6 | 624 | llist_append_to_list(&pool->clean_list, &unmap_list); |
08b48a1e AG |
625 | |
626 | free_goal = rds_ib_flush_goal(pool, free_all); | |
627 | ||
628 | if (list_empty(&unmap_list)) | |
629 | goto out; | |
630 | ||
631 | /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ | |
6fa70da6 | 632 | list_for_each_entry(ibmr, &unmap_list, unmap_list) |
08b48a1e | 633 | list_add(&ibmr->fmr->list, &fmr_list); |
6fa70da6 | 634 | |
08b48a1e AG |
635 | ret = ib_unmap_fmr(&fmr_list); |
636 | if (ret) | |
637 | printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); | |
638 | ||
639 | /* Now we can destroy the DMA mapping and unpin any pages */ | |
6fa70da6 | 640 | list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) { |
08b48a1e AG |
641 | unpinned += ibmr->sg_len; |
642 | __rds_ib_teardown_mr(ibmr); | |
643 | if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { | |
644 | rds_ib_stats_inc(s_ib_rdma_mr_free); | |
6fa70da6 | 645 | list_del(&ibmr->unmap_list); |
08b48a1e AG |
646 | ib_dealloc_fmr(ibmr->fmr); |
647 | kfree(ibmr); | |
648 | nfreed++; | |
649 | } | |
650 | ncleaned++; | |
651 | } | |
652 | ||
6fa70da6 CM |
653 | if (!list_empty(&unmap_list)) { |
654 | /* we have to make sure that none of the things we're about | |
655 | * to put on the clean list would race with other cpus trying | |
1bc144b6 | 656 | * to pull items off. The llist would explode if we managed to |
6fa70da6 | 657 | * remove something from the clean list and then add it back again |
1bc144b6 | 658 | * while another CPU was spinning on that same item in llist_del_first. |
6fa70da6 | 659 | * |
1bc144b6 | 660 | * This is pretty unlikely, but just in case wait for an llist grace period |
6fa70da6 CM |
661 | * here before adding anything back into the clean list. |
662 | */ | |
663 | wait_clean_list_grace(); | |
664 | ||
1bc144b6 | 665 | list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); |
6fa70da6 | 666 | if (ibmr_ret) |
1bc144b6 | 667 | *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); |
6fa70da6 | 668 | |
1bc144b6 YH |
669 | /* more than one entry in llist nodes */ |
670 | if (clean_nodes->next) | |
671 | llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); | |
6fa70da6 CM |
672 | |
673 | } | |
08b48a1e AG |
674 | |
675 | atomic_sub(unpinned, &pool->free_pinned); | |
676 | atomic_sub(ncleaned, &pool->dirty_count); | |
677 | atomic_sub(nfreed, &pool->item_count); | |
678 | ||
679 | out: | |
680 | mutex_unlock(&pool->flush_lock); | |
6fa70da6 CM |
681 | if (waitqueue_active(&pool->flush_wait)) |
682 | wake_up(&pool->flush_wait); | |
683 | out_nolock: | |
08b48a1e AG |
684 | return ret; |
685 | } | |
686 | ||
687 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | |
688 | { | |
7a0ff5db | 689 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
08b48a1e | 690 | |
6fa70da6 | 691 | rds_ib_flush_mr_pool(pool, 0, NULL); |
08b48a1e AG |
692 | } |
693 | ||
694 | void rds_ib_free_mr(void *trans_private, int invalidate) | |
695 | { | |
696 | struct rds_ib_mr *ibmr = trans_private; | |
697 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
698 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
08b48a1e AG |
699 | |
700 | rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); | |
701 | ||
702 | /* Return it to the pool's free list */ | |
08b48a1e | 703 | if (ibmr->remap_count >= pool->fmr_attr.max_maps) |
1bc144b6 | 704 | llist_add(&ibmr->llnode, &pool->drop_list); |
08b48a1e | 705 | else |
1bc144b6 | 706 | llist_add(&ibmr->llnode, &pool->free_list); |
08b48a1e AG |
707 | |
708 | atomic_add(ibmr->sg_len, &pool->free_pinned); | |
709 | atomic_inc(&pool->dirty_count); | |
08b48a1e AG |
710 | |
711 | /* If we've pinned too many pages, request a flush */ | |
f64f9e71 JP |
712 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
713 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | |
c534a107 | 714 | schedule_delayed_work(&pool->flush_worker, 10); |
08b48a1e AG |
715 | |
716 | if (invalidate) { | |
717 | if (likely(!in_interrupt())) { | |
6fa70da6 | 718 | rds_ib_flush_mr_pool(pool, 0, NULL); |
08b48a1e AG |
719 | } else { |
720 | /* We get here if the user created a MR marked | |
721 | * as use_once and invalidate at the same time. */ | |
c534a107 | 722 | schedule_delayed_work(&pool->flush_worker, 10); |
08b48a1e AG |
723 | } |
724 | } | |
3e0249f9 ZB |
725 | |
726 | rds_ib_dev_put(rds_ibdev); | |
08b48a1e AG |
727 | } |
728 | ||
729 | void rds_ib_flush_mrs(void) | |
730 | { | |
731 | struct rds_ib_device *rds_ibdev; | |
732 | ||
ea819867 | 733 | down_read(&rds_ib_devices_lock); |
08b48a1e AG |
734 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { |
735 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
736 | ||
737 | if (pool) | |
6fa70da6 | 738 | rds_ib_flush_mr_pool(pool, 0, NULL); |
08b48a1e | 739 | } |
ea819867 | 740 | up_read(&rds_ib_devices_lock); |
08b48a1e AG |
741 | } |
742 | ||
743 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |
744 | struct rds_sock *rs, u32 *key_ret) | |
745 | { | |
746 | struct rds_ib_device *rds_ibdev; | |
747 | struct rds_ib_mr *ibmr = NULL; | |
748 | int ret; | |
749 | ||
750 | rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); | |
751 | if (!rds_ibdev) { | |
752 | ret = -ENODEV; | |
753 | goto out; | |
754 | } | |
755 | ||
756 | if (!rds_ibdev->mr_pool) { | |
757 | ret = -ENODEV; | |
758 | goto out; | |
759 | } | |
760 | ||
761 | ibmr = rds_ib_alloc_fmr(rds_ibdev); | |
762 | if (IS_ERR(ibmr)) | |
763 | return ibmr; | |
764 | ||
765 | ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); | |
766 | if (ret == 0) | |
767 | *key_ret = ibmr->fmr->rkey; | |
768 | else | |
769 | printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); | |
770 | ||
771 | ibmr->device = rds_ibdev; | |
3e0249f9 | 772 | rds_ibdev = NULL; |
08b48a1e AG |
773 | |
774 | out: | |
775 | if (ret) { | |
776 | if (ibmr) | |
777 | rds_ib_free_mr(ibmr, 0); | |
778 | ibmr = ERR_PTR(ret); | |
779 | } | |
3e0249f9 ZB |
780 | if (rds_ibdev) |
781 | rds_ib_dev_put(rds_ibdev); | |
08b48a1e AG |
782 | return ibmr; |
783 | } | |
6fa70da6 | 784 |