2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
35 #include <linux/mlx4/cmd.h>
36 #include <linux/rbtree.h>
37 #include <linux/idr.h>
38 #include <rdma/ib_cm.h>
42 #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
51 struct mlx4_ib_dev *dev;
53 struct list_head list;
54 struct delayed_work timeout;
57 struct rej_tmout_entry {
60 struct delayed_work timeout;
61 struct xarray *xa_rej_tmout;
64 struct cm_generic_msg {
65 struct ib_mad_hdr hdr;
68 __be32 remote_comm_id;
69 unsigned char unused[2];
73 struct cm_sidr_generic_msg {
74 struct ib_mad_hdr hdr;
79 unsigned char unused[0x60];
80 union ib_gid primary_path_sgid;
83 static struct workqueue_struct *cm_wq;
85 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
87 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
88 struct cm_sidr_generic_msg *msg =
89 (struct cm_sidr_generic_msg *)mad;
90 msg->request_id = cpu_to_be32(cm_id);
91 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
92 pr_err("trying to set local_comm_id in SIDR_REP\n");
95 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
96 msg->local_comm_id = cpu_to_be32(cm_id);
100 static u32 get_local_comm_id(struct ib_mad *mad)
102 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
103 struct cm_sidr_generic_msg *msg =
104 (struct cm_sidr_generic_msg *)mad;
105 return be32_to_cpu(msg->request_id);
106 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
107 pr_err("trying to set local_comm_id in SIDR_REP\n");
110 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
111 return be32_to_cpu(msg->local_comm_id);
115 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
117 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
118 struct cm_sidr_generic_msg *msg =
119 (struct cm_sidr_generic_msg *)mad;
120 msg->request_id = cpu_to_be32(cm_id);
121 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
122 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
125 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
126 msg->remote_comm_id = cpu_to_be32(cm_id);
130 static u32 get_remote_comm_id(struct ib_mad *mad)
132 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
133 struct cm_sidr_generic_msg *msg =
134 (struct cm_sidr_generic_msg *)mad;
135 return be32_to_cpu(msg->request_id);
136 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
137 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
140 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
141 return be32_to_cpu(msg->remote_comm_id);
145 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
147 struct cm_req_msg *msg = (struct cm_req_msg *)mad;
149 return msg->primary_path_sgid;
152 /* Lock should be taken before called */
153 static struct id_map_entry *
154 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
156 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
157 struct rb_node *node = sl_id_map->rb_node;
160 struct id_map_entry *id_map_entry =
161 rb_entry(node, struct id_map_entry, node);
163 if (id_map_entry->sl_cm_id > sl_cm_id)
164 node = node->rb_left;
165 else if (id_map_entry->sl_cm_id < sl_cm_id)
166 node = node->rb_right;
167 else if (id_map_entry->slave_id > slave_id)
168 node = node->rb_left;
169 else if (id_map_entry->slave_id < slave_id)
170 node = node->rb_right;
177 static void id_map_ent_timeout(struct work_struct *work)
179 struct delayed_work *delay = to_delayed_work(work);
180 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
181 struct id_map_entry *found_ent;
182 struct mlx4_ib_dev *dev = ent->dev;
183 struct mlx4_ib_sriov *sriov = &dev->sriov;
184 struct rb_root *sl_id_map = &sriov->sl_id_map;
186 spin_lock(&sriov->id_map_lock);
187 if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
189 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
190 if (found_ent && found_ent == ent)
191 rb_erase(&found_ent->node, sl_id_map);
194 list_del(&ent->list);
195 spin_unlock(&sriov->id_map_lock);
199 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
201 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
202 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
203 struct id_map_entry *ent;
204 int slave_id = new->slave_id;
205 int sl_cm_id = new->sl_cm_id;
207 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
209 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
212 rb_replace_node(&ent->node, &new->node, sl_id_map);
216 /* Go to the bottom of the tree */
219 ent = rb_entry(parent, struct id_map_entry, node);
221 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
222 link = &(*link)->rb_left;
224 link = &(*link)->rb_right;
227 rb_link_node(&new->node, parent, link);
228 rb_insert_color(&new->node, sl_id_map);
231 static struct id_map_entry *
232 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
235 struct id_map_entry *ent;
236 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
238 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
240 return ERR_PTR(-ENOMEM);
242 ent->sl_cm_id = sl_cm_id;
243 ent->slave_id = slave_id;
244 ent->scheduled_delete = 0;
245 ent->dev = to_mdev(ibdev);
246 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
248 ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
249 xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
251 spin_lock(&sriov->id_map_lock);
252 sl_id_map_add(ibdev, ent);
253 list_add_tail(&ent->list, &sriov->cm_list);
254 spin_unlock(&sriov->id_map_lock);
260 mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
261 return ERR_PTR(-ENOMEM);
264 static struct id_map_entry *
265 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
267 struct id_map_entry *ent;
268 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
270 spin_lock(&sriov->id_map_lock);
271 if (*pv_cm_id == -1) {
272 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
274 *pv_cm_id = (int) ent->pv_cm_id;
276 ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
277 spin_unlock(&sriov->id_map_lock);
282 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
284 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
287 spin_lock(&sriov->id_map_lock);
288 spin_lock_irqsave(&sriov->going_down_lock, flags);
289 /*make sure that there is no schedule inside the scheduled work.*/
290 if (!sriov->is_going_down && !id->scheduled_delete) {
291 id->scheduled_delete = 1;
292 queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
293 } else if (id->scheduled_delete) {
294 /* Adjust timeout if already scheduled */
295 mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
297 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
298 spin_unlock(&sriov->id_map_lock);
301 #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
302 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
305 struct id_map_entry *id;
309 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
310 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
311 mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
312 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
313 (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
314 sl_cm_id = get_local_comm_id(mad);
315 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
318 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
320 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
321 __func__, slave_id, sl_cm_id);
324 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
325 mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
328 sl_cm_id = get_local_comm_id(mad);
329 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
333 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
334 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
339 set_local_comm_id(mad, id->pv_cm_id);
341 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
342 schedule_delayed(ibdev, id);
346 static void rej_tmout_timeout(struct work_struct *work)
348 struct delayed_work *delay = to_delayed_work(work);
349 struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
350 struct rej_tmout_entry *deleted;
352 deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
355 pr_debug("deleted(%p) != item(%p)\n", deleted, item);
360 static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
362 struct rej_tmout_entry *item;
363 struct rej_tmout_entry *old;
366 xa_lock(&sriov->xa_rej_tmout);
367 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
373 /* If a retry, adjust delayed work */
374 mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
377 xa_unlock(&sriov->xa_rej_tmout);
379 item = kmalloc(sizeof(*item), GFP_KERNEL);
383 INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
385 item->rem_pv_cm_id = rem_pv_cm_id;
386 item->xa_rej_tmout = &sriov->xa_rej_tmout;
388 old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
391 "Non-null old entry (%p) or error (%d) when inserting\n",
397 queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
402 xa_unlock(&sriov->xa_rej_tmout);
406 static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
408 struct rej_tmout_entry *item;
411 xa_lock(&sriov->xa_rej_tmout);
412 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
414 if (!item || xa_err(item)) {
415 pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
416 rem_pv_cm_id, xa_err(item));
417 slave = !item ? -ENOENT : xa_err(item);
421 xa_unlock(&sriov->xa_rej_tmout);
426 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
429 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
430 u32 rem_pv_cm_id = get_local_comm_id(mad);
432 struct id_map_entry *id;
435 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
436 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
442 gid = gid_from_req_msg(ibdev, mad);
443 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
445 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
446 be64_to_cpu(gid.global.interface_id));
450 sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
452 /* Even if this fails, we pass on the REQ to the slave */
453 pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
454 rem_pv_cm_id, *slave, sts);
459 pv_cm_id = get_remote_comm_id(mad);
460 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
463 if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
464 REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
465 *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
467 return (*slave < 0) ? *slave : 0;
469 pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
470 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
475 *slave = id->slave_id;
476 set_remote_comm_id(mad, id->sl_cm_id);
478 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
479 mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
480 schedule_delayed(ibdev, id);
485 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
487 spin_lock_init(&dev->sriov.id_map_lock);
488 INIT_LIST_HEAD(&dev->sriov.cm_list);
489 dev->sriov.sl_id_map = RB_ROOT;
490 xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
491 xa_init(&dev->sriov.xa_rej_tmout);
494 static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
496 struct rej_tmout_entry *item;
497 bool flush_needed = false;
501 xa_lock(&sriov->xa_rej_tmout);
502 xa_for_each(&sriov->xa_rej_tmout, id, item) {
503 if (slave < 0 || slave == item->slave) {
504 mod_delayed_work(cm_wq, &item->timeout, 0);
509 xa_unlock(&sriov->xa_rej_tmout);
512 flush_workqueue(cm_wq);
513 pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
518 WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
521 /* slave = -1 ==> all slaves */
522 /* TBD -- call paravirt clean for single slave. Need for slave RESET event */
523 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
525 struct mlx4_ib_sriov *sriov = &dev->sriov;
526 struct rb_root *sl_id_map = &sriov->sl_id_map;
530 struct id_map_entry *map, *tmp_map;
531 /* cancel all delayed work queue entries */
533 spin_lock(&sriov->id_map_lock);
534 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
535 if (slave < 0 || slave == map->slave_id) {
536 if (map->scheduled_delete)
537 need_flush |= !cancel_delayed_work(&map->timeout);
541 spin_unlock(&sriov->id_map_lock);
544 flush_workqueue(cm_wq); /* make sure all timers were flushed */
546 /* now, remove all leftover entries from databases*/
547 spin_lock(&sriov->id_map_lock);
549 while (rb_first(sl_id_map)) {
550 struct id_map_entry *ent =
551 rb_entry(rb_first(sl_id_map),
552 struct id_map_entry, node);
554 rb_erase(&ent->node, sl_id_map);
555 xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
557 list_splice_init(&dev->sriov.cm_list, &lh);
559 /* first, move nodes belonging to slave to db remove list */
560 nd = rb_first(sl_id_map);
562 struct id_map_entry *ent =
563 rb_entry(nd, struct id_map_entry, node);
565 if (ent->slave_id == slave)
566 list_move_tail(&ent->list, &lh);
568 /* remove those nodes from databases */
569 list_for_each_entry_safe(map, tmp_map, &lh, list) {
570 rb_erase(&map->node, sl_id_map);
571 xa_erase(&sriov->pv_id_table, map->pv_cm_id);
574 /* add remaining nodes from cm_list */
575 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
576 if (slave == map->slave_id)
577 list_move_tail(&map->list, &lh);
581 spin_unlock(&sriov->id_map_lock);
583 /* free any map entries left behind due to cancel_delayed_work above */
584 list_for_each_entry_safe(map, tmp_map, &lh, list) {
585 list_del(&map->list);
589 rej_tmout_xa_cleanup(sriov, slave);
592 int mlx4_ib_cm_init(void)
594 cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
601 void mlx4_ib_cm_destroy(void)
603 destroy_workqueue(cm_wq);