2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static const char * const ibcm_rej_reason_strs[] = {
61 [IB_CM_REJ_NO_QP] = "no QP",
62 [IB_CM_REJ_NO_EEC] = "no EEC",
63 [IB_CM_REJ_NO_RESOURCES] = "no resources",
64 [IB_CM_REJ_TIMEOUT] = "timeout",
65 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
66 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
67 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
68 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
69 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
70 [IB_CM_REJ_STALE_CONN] = "stale conn",
71 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
72 [IB_CM_REJ_INVALID_GID] = "invalid GID",
73 [IB_CM_REJ_INVALID_LID] = "invalid LID",
74 [IB_CM_REJ_INVALID_SL] = "invalid SL",
75 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
76 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
77 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
78 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
79 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
80 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
81 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
82 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
83 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
84 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
85 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
86 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
87 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
88 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
89 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
90 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
91 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
92 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
93 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
96 const char *__attribute_const__ ibcm_reject_msg(int reason)
98 size_t index = reason;
100 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
101 ibcm_rej_reason_strs[index])
102 return ibcm_rej_reason_strs[index];
104 return "unrecognized reason";
106 EXPORT_SYMBOL(ibcm_reject_msg);
108 static void cm_add_one(struct ib_device *device);
109 static void cm_remove_one(struct ib_device *device, void *client_data);
111 static struct ib_client cm_client = {
114 .remove = cm_remove_one
117 static struct ib_cm {
119 struct list_head device_list;
120 rwlock_t device_lock;
121 struct rb_root listen_service_table;
122 u64 listen_service_id;
123 /* struct rb_root peer_service_table; todo: fix peer to peer */
124 struct rb_root remote_qp_table;
125 struct rb_root remote_id_table;
126 struct rb_root remote_sidr_table;
127 struct idr local_id_table;
128 __be32 random_id_operand;
129 struct list_head timewait_list;
130 struct workqueue_struct *wq;
131 /* Sync on cm change port state */
132 spinlock_t state_lock;
135 /* Counter indexes ordered by attribute ID */
149 CM_ATTR_ID_OFFSET = 0x0010,
160 static char const counter_group_names[CM_COUNTER_GROUPS]
161 [sizeof("cm_rx_duplicates")] = {
162 "cm_tx_msgs", "cm_tx_retries",
163 "cm_rx_msgs", "cm_rx_duplicates"
166 struct cm_counter_group {
168 atomic_long_t counter[CM_ATTR_COUNT];
171 struct cm_counter_attribute {
172 struct attribute attr;
176 #define CM_COUNTER_ATTR(_name, _index) \
177 struct cm_counter_attribute cm_##_name##_counter_attr = { \
178 .attr = { .name = __stringify(_name), .mode = 0444 }, \
182 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
183 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
184 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
185 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
186 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
187 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
188 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
189 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
190 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
191 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
192 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
194 static struct attribute *cm_counter_default_attrs[] = {
195 &cm_req_counter_attr.attr,
196 &cm_mra_counter_attr.attr,
197 &cm_rej_counter_attr.attr,
198 &cm_rep_counter_attr.attr,
199 &cm_rtu_counter_attr.attr,
200 &cm_dreq_counter_attr.attr,
201 &cm_drep_counter_attr.attr,
202 &cm_sidr_req_counter_attr.attr,
203 &cm_sidr_rep_counter_attr.attr,
204 &cm_lap_counter_attr.attr,
205 &cm_apr_counter_attr.attr,
210 struct cm_device *cm_dev;
211 struct ib_mad_agent *mad_agent;
212 struct kobject port_obj;
214 struct list_head cm_priv_prim_list;
215 struct list_head cm_priv_altr_list;
216 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
220 struct list_head list;
221 struct ib_device *ib_device;
222 struct device *device;
225 struct cm_port *port[0];
229 struct cm_port *port;
231 struct rdma_ah_attr ah_attr;
237 struct delayed_work work;
238 struct list_head list;
239 struct cm_port *port;
240 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
241 __be32 local_id; /* Established / timewait */
243 struct ib_cm_event cm_event;
244 struct sa_path_rec path[0];
247 struct cm_timewait_info {
248 struct cm_work work; /* Must be first. */
249 struct list_head list;
250 struct rb_node remote_qp_node;
251 struct rb_node remote_id_node;
252 __be64 remote_ca_guid;
254 u8 inserted_remote_qp;
255 u8 inserted_remote_id;
258 struct cm_id_private {
261 struct rb_node service_node;
262 struct rb_node sidr_id_node;
263 spinlock_t lock; /* Do not acquire inside cm.lock */
264 struct completion comp;
266 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
267 * Protected by the cm.lock spinlock. */
268 int listen_sharecount;
270 struct ib_mad_send_buf *msg;
271 struct cm_timewait_info *timewait_info;
272 /* todo: use alternate port on send failure */
280 enum ib_qp_type qp_type;
284 enum ib_mtu path_mtu;
289 u8 responder_resources;
296 struct list_head prim_list;
297 struct list_head altr_list;
298 /* Indicates that the send port mad is registered and av is set */
299 int prim_send_port_not_ready;
300 int altr_send_port_not_ready;
302 struct list_head work_list;
306 static void cm_work_handler(struct work_struct *work);
308 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
310 if (atomic_dec_and_test(&cm_id_priv->refcount))
311 complete(&cm_id_priv->comp);
314 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
315 struct ib_mad_send_buf **msg)
317 struct ib_mad_agent *mad_agent;
318 struct ib_mad_send_buf *m;
321 unsigned long flags, flags2;
324 /* don't let the port to be released till the agent is down */
325 spin_lock_irqsave(&cm.state_lock, flags2);
326 spin_lock_irqsave(&cm.lock, flags);
327 if (!cm_id_priv->prim_send_port_not_ready)
328 av = &cm_id_priv->av;
329 else if (!cm_id_priv->altr_send_port_not_ready &&
330 (cm_id_priv->alt_av.port))
331 av = &cm_id_priv->alt_av;
333 pr_info("%s: not valid CM id\n", __func__);
335 spin_unlock_irqrestore(&cm.lock, flags);
338 spin_unlock_irqrestore(&cm.lock, flags);
339 /* Make sure the port haven't released the mad yet */
340 mad_agent = cm_id_priv->av.port->mad_agent;
342 pr_info("%s: not a valid MAD agent\n", __func__);
346 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr);
352 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
354 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
356 IB_MGMT_BASE_VERSION);
363 /* Timeout set by caller if response is expected. */
365 m->retries = cm_id_priv->max_cm_retries;
367 atomic_inc(&cm_id_priv->refcount);
368 m->context[0] = cm_id_priv;
372 spin_unlock_irqrestore(&cm.state_lock, flags2);
376 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
377 struct ib_mad_recv_wc *mad_recv_wc)
379 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
380 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
382 IB_MGMT_BASE_VERSION);
385 static int cm_create_response_msg_ah(struct cm_port *port,
386 struct ib_mad_recv_wc *mad_recv_wc,
387 struct ib_mad_send_buf *msg)
391 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
392 mad_recv_wc->recv_buf.grh, port->port_num);
400 static void cm_free_msg(struct ib_mad_send_buf *msg)
403 rdma_destroy_ah(msg->ah);
405 cm_deref_id(msg->context[0]);
406 ib_free_send_mad(msg);
409 static int cm_alloc_response_msg(struct cm_port *port,
410 struct ib_mad_recv_wc *mad_recv_wc,
411 struct ib_mad_send_buf **msg)
413 struct ib_mad_send_buf *m;
416 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
420 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
430 static void * cm_copy_private_data(const void *private_data,
435 if (!private_data || !private_data_len)
438 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
440 return ERR_PTR(-ENOMEM);
445 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
446 void *private_data, u8 private_data_len)
448 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
449 kfree(cm_id_priv->private_data);
451 cm_id_priv->private_data = private_data;
452 cm_id_priv->private_data_len = private_data_len;
455 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
456 struct ib_grh *grh, struct cm_av *av)
459 av->pkey_index = wc->pkey_index;
460 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
464 static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
465 struct cm_id_private *cm_id_priv)
467 struct cm_device *cm_dev;
468 struct cm_port *port = NULL;
472 struct net_device *ndev = ib_get_ndev_from_path(path);
474 read_lock_irqsave(&cm.device_lock, flags);
475 list_for_each_entry(cm_dev, &cm.device_list, list) {
476 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
477 sa_conv_pathrec_to_gid_type(path),
479 port = cm_dev->port[p-1];
483 read_unlock_irqrestore(&cm.device_lock, flags);
491 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
492 be16_to_cpu(path->pkey), &av->pkey_index);
497 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
499 av->timeout = path->packet_life_time + 1;
501 spin_lock_irqsave(&cm.lock, flags);
502 if (&cm_id_priv->av == av)
503 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
504 else if (&cm_id_priv->alt_av == av)
505 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
509 spin_unlock_irqrestore(&cm.lock, flags);
514 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
519 idr_preload(GFP_KERNEL);
520 spin_lock_irqsave(&cm.lock, flags);
522 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
524 spin_unlock_irqrestore(&cm.lock, flags);
527 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
528 return id < 0 ? id : 0;
531 static void cm_free_id(__be32 local_id)
533 spin_lock_irq(&cm.lock);
534 idr_remove(&cm.local_id_table,
535 (__force int) (local_id ^ cm.random_id_operand));
536 spin_unlock_irq(&cm.lock);
539 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
541 struct cm_id_private *cm_id_priv;
543 cm_id_priv = idr_find(&cm.local_id_table,
544 (__force int) (local_id ^ cm.random_id_operand));
546 if (cm_id_priv->id.remote_id == remote_id)
547 atomic_inc(&cm_id_priv->refcount);
555 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
557 struct cm_id_private *cm_id_priv;
559 spin_lock_irq(&cm.lock);
560 cm_id_priv = cm_get_id(local_id, remote_id);
561 spin_unlock_irq(&cm.lock);
567 * Trivial helpers to strip endian annotation and compare; the
568 * endianness doesn't actually matter since we just need a stable
569 * order for the RB tree.
571 static int be32_lt(__be32 a, __be32 b)
573 return (__force u32) a < (__force u32) b;
576 static int be32_gt(__be32 a, __be32 b)
578 return (__force u32) a > (__force u32) b;
581 static int be64_lt(__be64 a, __be64 b)
583 return (__force u64) a < (__force u64) b;
586 static int be64_gt(__be64 a, __be64 b)
588 return (__force u64) a > (__force u64) b;
591 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
593 struct rb_node **link = &cm.listen_service_table.rb_node;
594 struct rb_node *parent = NULL;
595 struct cm_id_private *cur_cm_id_priv;
596 __be64 service_id = cm_id_priv->id.service_id;
597 __be64 service_mask = cm_id_priv->id.service_mask;
601 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
603 if ((cur_cm_id_priv->id.service_mask & service_id) ==
604 (service_mask & cur_cm_id_priv->id.service_id) &&
605 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
606 return cur_cm_id_priv;
608 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
609 link = &(*link)->rb_left;
610 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
611 link = &(*link)->rb_right;
612 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
613 link = &(*link)->rb_left;
614 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
615 link = &(*link)->rb_right;
617 link = &(*link)->rb_right;
619 rb_link_node(&cm_id_priv->service_node, parent, link);
620 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
624 static struct cm_id_private * cm_find_listen(struct ib_device *device,
627 struct rb_node *node = cm.listen_service_table.rb_node;
628 struct cm_id_private *cm_id_priv;
631 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
632 if ((cm_id_priv->id.service_mask & service_id) ==
633 cm_id_priv->id.service_id &&
634 (cm_id_priv->id.device == device))
637 if (device < cm_id_priv->id.device)
638 node = node->rb_left;
639 else if (device > cm_id_priv->id.device)
640 node = node->rb_right;
641 else if (be64_lt(service_id, cm_id_priv->id.service_id))
642 node = node->rb_left;
643 else if (be64_gt(service_id, cm_id_priv->id.service_id))
644 node = node->rb_right;
646 node = node->rb_right;
651 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
654 struct rb_node **link = &cm.remote_id_table.rb_node;
655 struct rb_node *parent = NULL;
656 struct cm_timewait_info *cur_timewait_info;
657 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
658 __be32 remote_id = timewait_info->work.remote_id;
662 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
664 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
665 link = &(*link)->rb_left;
666 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
667 link = &(*link)->rb_right;
668 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
669 link = &(*link)->rb_left;
670 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
671 link = &(*link)->rb_right;
673 return cur_timewait_info;
675 timewait_info->inserted_remote_id = 1;
676 rb_link_node(&timewait_info->remote_id_node, parent, link);
677 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
681 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
684 struct rb_node *node = cm.remote_id_table.rb_node;
685 struct cm_timewait_info *timewait_info;
688 timewait_info = rb_entry(node, struct cm_timewait_info,
690 if (be32_lt(remote_id, timewait_info->work.remote_id))
691 node = node->rb_left;
692 else if (be32_gt(remote_id, timewait_info->work.remote_id))
693 node = node->rb_right;
694 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
695 node = node->rb_left;
696 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
697 node = node->rb_right;
699 return timewait_info;
704 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
707 struct rb_node **link = &cm.remote_qp_table.rb_node;
708 struct rb_node *parent = NULL;
709 struct cm_timewait_info *cur_timewait_info;
710 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
711 __be32 remote_qpn = timewait_info->remote_qpn;
715 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
717 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
718 link = &(*link)->rb_left;
719 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
720 link = &(*link)->rb_right;
721 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
722 link = &(*link)->rb_left;
723 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
724 link = &(*link)->rb_right;
726 return cur_timewait_info;
728 timewait_info->inserted_remote_qp = 1;
729 rb_link_node(&timewait_info->remote_qp_node, parent, link);
730 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
734 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
737 struct rb_node **link = &cm.remote_sidr_table.rb_node;
738 struct rb_node *parent = NULL;
739 struct cm_id_private *cur_cm_id_priv;
740 union ib_gid *port_gid = &cm_id_priv->av.dgid;
741 __be32 remote_id = cm_id_priv->id.remote_id;
745 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
747 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
748 link = &(*link)->rb_left;
749 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
750 link = &(*link)->rb_right;
753 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
756 link = &(*link)->rb_left;
758 link = &(*link)->rb_right;
760 return cur_cm_id_priv;
763 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
764 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
768 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
769 enum ib_cm_sidr_status status)
771 struct ib_cm_sidr_rep_param param;
773 memset(¶m, 0, sizeof param);
774 param.status = status;
775 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
778 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
779 ib_cm_handler cm_handler,
782 struct cm_id_private *cm_id_priv;
785 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
787 return ERR_PTR(-ENOMEM);
789 cm_id_priv->id.state = IB_CM_IDLE;
790 cm_id_priv->id.device = device;
791 cm_id_priv->id.cm_handler = cm_handler;
792 cm_id_priv->id.context = context;
793 cm_id_priv->id.remote_cm_qpn = 1;
794 ret = cm_alloc_id(cm_id_priv);
798 spin_lock_init(&cm_id_priv->lock);
799 init_completion(&cm_id_priv->comp);
800 INIT_LIST_HEAD(&cm_id_priv->work_list);
801 INIT_LIST_HEAD(&cm_id_priv->prim_list);
802 INIT_LIST_HEAD(&cm_id_priv->altr_list);
803 atomic_set(&cm_id_priv->work_count, -1);
804 atomic_set(&cm_id_priv->refcount, 1);
805 return &cm_id_priv->id;
809 return ERR_PTR(-ENOMEM);
811 EXPORT_SYMBOL(ib_create_cm_id);
813 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
815 struct cm_work *work;
817 if (list_empty(&cm_id_priv->work_list))
820 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
821 list_del(&work->list);
825 static void cm_free_work(struct cm_work *work)
827 if (work->mad_recv_wc)
828 ib_free_recv_mad(work->mad_recv_wc);
832 static inline int cm_convert_to_ms(int iba_time)
834 /* approximate conversion to ms from 4.096us x 2^iba_time */
835 return 1 << max(iba_time - 8, 0);
839 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
840 * Because of how ack_timeout is stored, adding one doubles the timeout.
841 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
842 * increment it (round up) only if the other is within 50%.
844 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
846 int ack_timeout = packet_life_time + 1;
848 if (ack_timeout >= ca_ack_delay)
849 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
851 ack_timeout = ca_ack_delay +
852 (ack_timeout >= (ca_ack_delay - 1));
854 return min(31, ack_timeout);
857 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
859 if (timewait_info->inserted_remote_id) {
860 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
861 timewait_info->inserted_remote_id = 0;
864 if (timewait_info->inserted_remote_qp) {
865 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
866 timewait_info->inserted_remote_qp = 0;
870 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
872 struct cm_timewait_info *timewait_info;
874 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
876 return ERR_PTR(-ENOMEM);
878 timewait_info->work.local_id = local_id;
879 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
880 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
881 return timewait_info;
884 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
888 struct cm_device *cm_dev;
890 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
894 spin_lock_irqsave(&cm.lock, flags);
895 cm_cleanup_timewait(cm_id_priv->timewait_info);
896 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
897 spin_unlock_irqrestore(&cm.lock, flags);
900 * The cm_id could be destroyed by the user before we exit timewait.
901 * To protect against this, we search for the cm_id after exiting
902 * timewait before notifying the user that we've exited timewait.
904 cm_id_priv->id.state = IB_CM_TIMEWAIT;
905 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
907 /* Check if the device started its remove_one */
908 spin_lock_irqsave(&cm.lock, flags);
909 if (!cm_dev->going_down)
910 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
911 msecs_to_jiffies(wait_time));
912 spin_unlock_irqrestore(&cm.lock, flags);
914 cm_id_priv->timewait_info = NULL;
917 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
921 cm_id_priv->id.state = IB_CM_IDLE;
922 if (cm_id_priv->timewait_info) {
923 spin_lock_irqsave(&cm.lock, flags);
924 cm_cleanup_timewait(cm_id_priv->timewait_info);
925 spin_unlock_irqrestore(&cm.lock, flags);
926 kfree(cm_id_priv->timewait_info);
927 cm_id_priv->timewait_info = NULL;
931 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
933 struct cm_id_private *cm_id_priv;
934 struct cm_work *work;
936 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
938 spin_lock_irq(&cm_id_priv->lock);
939 switch (cm_id->state) {
941 spin_unlock_irq(&cm_id_priv->lock);
943 spin_lock_irq(&cm.lock);
944 if (--cm_id_priv->listen_sharecount > 0) {
945 /* The id is still shared. */
946 cm_deref_id(cm_id_priv);
947 spin_unlock_irq(&cm.lock);
950 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
951 spin_unlock_irq(&cm.lock);
953 case IB_CM_SIDR_REQ_SENT:
954 cm_id->state = IB_CM_IDLE;
955 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
956 spin_unlock_irq(&cm_id_priv->lock);
958 case IB_CM_SIDR_REQ_RCVD:
959 spin_unlock_irq(&cm_id_priv->lock);
960 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
961 spin_lock_irq(&cm.lock);
962 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
963 rb_erase(&cm_id_priv->sidr_id_node,
964 &cm.remote_sidr_table);
965 spin_unlock_irq(&cm.lock);
968 case IB_CM_MRA_REQ_RCVD:
969 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
970 spin_unlock_irq(&cm_id_priv->lock);
971 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
972 &cm_id_priv->id.device->node_guid,
973 sizeof cm_id_priv->id.device->node_guid,
977 if (err == -ENOMEM) {
978 /* Do not reject to allow future retries. */
979 cm_reset_to_idle(cm_id_priv);
980 spin_unlock_irq(&cm_id_priv->lock);
982 spin_unlock_irq(&cm_id_priv->lock);
983 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
988 case IB_CM_MRA_REP_RCVD:
989 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
991 case IB_CM_MRA_REQ_SENT:
993 case IB_CM_MRA_REP_SENT:
994 spin_unlock_irq(&cm_id_priv->lock);
995 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
998 case IB_CM_ESTABLISHED:
999 spin_unlock_irq(&cm_id_priv->lock);
1000 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
1002 ib_send_cm_dreq(cm_id, NULL, 0);
1004 case IB_CM_DREQ_SENT:
1005 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1006 cm_enter_timewait(cm_id_priv);
1007 spin_unlock_irq(&cm_id_priv->lock);
1009 case IB_CM_DREQ_RCVD:
1010 spin_unlock_irq(&cm_id_priv->lock);
1011 ib_send_cm_drep(cm_id, NULL, 0);
1014 spin_unlock_irq(&cm_id_priv->lock);
1018 spin_lock_irq(&cm.lock);
1019 if (!list_empty(&cm_id_priv->altr_list) &&
1020 (!cm_id_priv->altr_send_port_not_ready))
1021 list_del(&cm_id_priv->altr_list);
1022 if (!list_empty(&cm_id_priv->prim_list) &&
1023 (!cm_id_priv->prim_send_port_not_ready))
1024 list_del(&cm_id_priv->prim_list);
1025 spin_unlock_irq(&cm.lock);
1027 cm_free_id(cm_id->local_id);
1028 cm_deref_id(cm_id_priv);
1029 wait_for_completion(&cm_id_priv->comp);
1030 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1032 kfree(cm_id_priv->private_data);
1036 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1038 cm_destroy_id(cm_id, 0);
1040 EXPORT_SYMBOL(ib_destroy_cm_id);
1043 * __ib_cm_listen - Initiates listening on the specified service ID for
1044 * connection and service ID resolution requests.
1045 * @cm_id: Connection identifier associated with the listen request.
1046 * @service_id: Service identifier matched against incoming connection
1047 * and service ID resolution requests. The service ID should be specified
1048 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1049 * assign a service ID to the caller.
1050 * @service_mask: Mask applied to service ID used to listen across a
1051 * range of service IDs. If set to 0, the service ID is matched
1052 * exactly. This parameter is ignored if %service_id is set to
1053 * IB_CM_ASSIGN_SERVICE_ID.
1055 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1056 __be64 service_mask)
1058 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1061 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1062 service_id &= service_mask;
1063 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1064 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1067 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1068 if (cm_id->state != IB_CM_IDLE)
1071 cm_id->state = IB_CM_LISTEN;
1072 ++cm_id_priv->listen_sharecount;
1074 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1075 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1076 cm_id->service_mask = ~cpu_to_be64(0);
1078 cm_id->service_id = service_id;
1079 cm_id->service_mask = service_mask;
1081 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1083 if (cur_cm_id_priv) {
1084 cm_id->state = IB_CM_IDLE;
1085 --cm_id_priv->listen_sharecount;
1091 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1093 unsigned long flags;
1096 spin_lock_irqsave(&cm.lock, flags);
1097 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1098 spin_unlock_irqrestore(&cm.lock, flags);
1102 EXPORT_SYMBOL(ib_cm_listen);
1105 * Create a new listening ib_cm_id and listen on the given service ID.
1107 * If there's an existing ID listening on that same device and service ID,
1110 * @device: Device associated with the cm_id. All related communication will
1111 * be associated with the specified device.
1112 * @cm_handler: Callback invoked to notify the user of CM events.
1113 * @service_id: Service identifier matched against incoming connection
1114 * and service ID resolution requests. The service ID should be specified
1115 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1116 * assign a service ID to the caller.
1118 * Callers should call ib_destroy_cm_id when done with the listener ID.
1120 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1121 ib_cm_handler cm_handler,
1124 struct cm_id_private *cm_id_priv;
1125 struct ib_cm_id *cm_id;
1126 unsigned long flags;
1129 /* Create an ID in advance, since the creation may sleep */
1130 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1134 spin_lock_irqsave(&cm.lock, flags);
1136 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1139 /* Find an existing ID */
1140 cm_id_priv = cm_find_listen(device, service_id);
1142 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1143 /* Sharing an ib_cm_id with different handlers is not
1145 spin_unlock_irqrestore(&cm.lock, flags);
1146 return ERR_PTR(-EINVAL);
1148 atomic_inc(&cm_id_priv->refcount);
1149 ++cm_id_priv->listen_sharecount;
1150 spin_unlock_irqrestore(&cm.lock, flags);
1152 ib_destroy_cm_id(cm_id);
1153 cm_id = &cm_id_priv->id;
1158 /* Use newly created ID */
1159 err = __ib_cm_listen(cm_id, service_id, 0);
1161 spin_unlock_irqrestore(&cm.lock, flags);
1164 ib_destroy_cm_id(cm_id);
1165 return ERR_PTR(err);
1169 EXPORT_SYMBOL(ib_cm_insert_listen);
1171 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1172 enum cm_msg_sequence msg_seq)
1174 u64 hi_tid, low_tid;
1176 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1177 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1179 return cpu_to_be64(hi_tid | low_tid);
1182 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1183 __be16 attr_id, __be64 tid)
1185 hdr->base_version = IB_MGMT_BASE_VERSION;
1186 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1187 hdr->class_version = IB_CM_CLASS_VERSION;
1188 hdr->method = IB_MGMT_METHOD_SEND;
1189 hdr->attr_id = attr_id;
1193 static void cm_format_req(struct cm_req_msg *req_msg,
1194 struct cm_id_private *cm_id_priv,
1195 struct ib_cm_req_param *param)
1197 struct sa_path_rec *pri_path = param->primary_path;
1198 struct sa_path_rec *alt_path = param->alternate_path;
1199 bool pri_ext = false;
1201 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1202 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1203 pri_path->opa.slid);
1205 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1206 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1208 req_msg->local_comm_id = cm_id_priv->id.local_id;
1209 req_msg->service_id = param->service_id;
1210 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1211 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1212 cm_req_set_init_depth(req_msg, param->initiator_depth);
1213 cm_req_set_remote_resp_timeout(req_msg,
1214 param->remote_cm_response_timeout);
1215 cm_req_set_qp_type(req_msg, param->qp_type);
1216 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1217 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1218 cm_req_set_local_resp_timeout(req_msg,
1219 param->local_cm_response_timeout);
1220 req_msg->pkey = param->primary_path->pkey;
1221 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1222 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1224 if (param->qp_type != IB_QPT_XRC_INI) {
1225 cm_req_set_resp_res(req_msg, param->responder_resources);
1226 cm_req_set_retry_count(req_msg, param->retry_count);
1227 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1228 cm_req_set_srq(req_msg, param->srq);
1231 req_msg->primary_local_gid = pri_path->sgid;
1232 req_msg->primary_remote_gid = pri_path->dgid;
1234 req_msg->primary_local_gid.global.interface_id
1235 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1236 req_msg->primary_remote_gid.global.interface_id
1237 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1239 if (pri_path->hop_limit <= 1) {
1240 req_msg->primary_local_lid = pri_ext ? 0 :
1241 htons(ntohl(sa_path_get_slid(pri_path)));
1242 req_msg->primary_remote_lid = pri_ext ? 0 :
1243 htons(ntohl(sa_path_get_dlid(pri_path)));
1245 /* Work-around until there's a way to obtain remote LID info */
1246 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1247 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1249 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1250 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1251 req_msg->primary_traffic_class = pri_path->traffic_class;
1252 req_msg->primary_hop_limit = pri_path->hop_limit;
1253 cm_req_set_primary_sl(req_msg, pri_path->sl);
1254 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1255 cm_req_set_primary_local_ack_timeout(req_msg,
1256 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1257 pri_path->packet_life_time));
1260 bool alt_ext = false;
1262 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1263 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1264 alt_path->opa.slid);
1266 req_msg->alt_local_gid = alt_path->sgid;
1267 req_msg->alt_remote_gid = alt_path->dgid;
1269 req_msg->alt_local_gid.global.interface_id
1270 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1271 req_msg->alt_remote_gid.global.interface_id
1272 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1274 if (alt_path->hop_limit <= 1) {
1275 req_msg->alt_local_lid = alt_ext ? 0 :
1276 htons(ntohl(sa_path_get_slid(alt_path)));
1277 req_msg->alt_remote_lid = alt_ext ? 0 :
1278 htons(ntohl(sa_path_get_dlid(alt_path)));
1280 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1281 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1283 cm_req_set_alt_flow_label(req_msg,
1284 alt_path->flow_label);
1285 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1286 req_msg->alt_traffic_class = alt_path->traffic_class;
1287 req_msg->alt_hop_limit = alt_path->hop_limit;
1288 cm_req_set_alt_sl(req_msg, alt_path->sl);
1289 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1290 cm_req_set_alt_local_ack_timeout(req_msg,
1291 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1292 alt_path->packet_life_time));
1295 if (param->private_data && param->private_data_len)
1296 memcpy(req_msg->private_data, param->private_data,
1297 param->private_data_len);
1300 static int cm_validate_req_param(struct ib_cm_req_param *param)
1302 /* peer-to-peer not supported */
1303 if (param->peer_to_peer)
1306 if (!param->primary_path)
1309 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1310 param->qp_type != IB_QPT_XRC_INI)
1313 if (param->private_data &&
1314 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1317 if (param->alternate_path &&
1318 (param->alternate_path->pkey != param->primary_path->pkey ||
1319 param->alternate_path->mtu != param->primary_path->mtu))
1325 int ib_send_cm_req(struct ib_cm_id *cm_id,
1326 struct ib_cm_req_param *param)
1328 struct cm_id_private *cm_id_priv;
1329 struct cm_req_msg *req_msg;
1330 unsigned long flags;
1333 ret = cm_validate_req_param(param);
1337 /* Verify that we're not in timewait. */
1338 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1339 spin_lock_irqsave(&cm_id_priv->lock, flags);
1340 if (cm_id->state != IB_CM_IDLE) {
1341 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1345 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1347 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1349 if (IS_ERR(cm_id_priv->timewait_info)) {
1350 ret = PTR_ERR(cm_id_priv->timewait_info);
1354 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1358 if (param->alternate_path) {
1359 ret = cm_init_av_by_path(param->alternate_path,
1360 &cm_id_priv->alt_av, cm_id_priv);
1364 cm_id->service_id = param->service_id;
1365 cm_id->service_mask = ~cpu_to_be64(0);
1366 cm_id_priv->timeout_ms = cm_convert_to_ms(
1367 param->primary_path->packet_life_time) * 2 +
1369 param->remote_cm_response_timeout);
1370 cm_id_priv->max_cm_retries = param->max_cm_retries;
1371 cm_id_priv->initiator_depth = param->initiator_depth;
1372 cm_id_priv->responder_resources = param->responder_resources;
1373 cm_id_priv->retry_count = param->retry_count;
1374 cm_id_priv->path_mtu = param->primary_path->mtu;
1375 cm_id_priv->pkey = param->primary_path->pkey;
1376 cm_id_priv->qp_type = param->qp_type;
1378 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1382 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1383 cm_format_req(req_msg, cm_id_priv, param);
1384 cm_id_priv->tid = req_msg->hdr.tid;
1385 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1386 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1388 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1389 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1391 spin_lock_irqsave(&cm_id_priv->lock, flags);
1392 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1394 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1397 BUG_ON(cm_id->state != IB_CM_IDLE);
1398 cm_id->state = IB_CM_REQ_SENT;
1399 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1402 error2: cm_free_msg(cm_id_priv->msg);
1403 error1: kfree(cm_id_priv->timewait_info);
1406 EXPORT_SYMBOL(ib_send_cm_req);
1408 static int cm_issue_rej(struct cm_port *port,
1409 struct ib_mad_recv_wc *mad_recv_wc,
1410 enum ib_cm_rej_reason reason,
1411 enum cm_msg_response msg_rejected,
1412 void *ari, u8 ari_length)
1414 struct ib_mad_send_buf *msg = NULL;
1415 struct cm_rej_msg *rej_msg, *rcv_msg;
1418 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1422 /* We just need common CM header information. Cast to any message. */
1423 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1424 rej_msg = (struct cm_rej_msg *) msg->mad;
1426 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1427 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1428 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1429 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1430 rej_msg->reason = cpu_to_be16(reason);
1432 if (ari && ari_length) {
1433 cm_rej_set_reject_info_len(rej_msg, ari_length);
1434 memcpy(rej_msg->ari, ari, ari_length);
1437 ret = ib_post_send_mad(msg, NULL);
1444 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1445 __be32 local_qpn, __be32 remote_qpn)
1447 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1448 ((local_ca_guid == remote_ca_guid) &&
1449 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1452 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1454 return ((req_msg->alt_local_lid) ||
1455 (ib_is_opa_gid(&req_msg->alt_local_gid)));
1458 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1459 struct sa_path_rec *path, union ib_gid *gid)
1461 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1462 path->rec_type = SA_PATH_REC_TYPE_OPA;
1464 path->rec_type = SA_PATH_REC_TYPE_IB;
1467 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1468 struct sa_path_rec *primary_path,
1469 struct sa_path_rec *alt_path)
1473 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1474 sa_path_set_dlid(primary_path,
1475 htonl(ntohs(req_msg->primary_local_lid)));
1476 sa_path_set_slid(primary_path,
1477 htonl(ntohs(req_msg->primary_remote_lid)));
1479 lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
1480 sa_path_set_dlid(primary_path, cpu_to_be32(lid));
1482 lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
1483 sa_path_set_slid(primary_path, cpu_to_be32(lid));
1486 if (!cm_req_has_alt_path(req_msg))
1489 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1490 sa_path_set_dlid(alt_path,
1491 htonl(ntohs(req_msg->alt_local_lid)));
1492 sa_path_set_slid(alt_path,
1493 htonl(ntohs(req_msg->alt_remote_lid)));
1495 lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
1496 sa_path_set_dlid(alt_path, cpu_to_be32(lid));
1498 lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
1499 sa_path_set_slid(alt_path, cpu_to_be32(lid));
1503 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1504 struct sa_path_rec *primary_path,
1505 struct sa_path_rec *alt_path)
1507 primary_path->dgid = req_msg->primary_local_gid;
1508 primary_path->sgid = req_msg->primary_remote_gid;
1509 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1510 primary_path->hop_limit = req_msg->primary_hop_limit;
1511 primary_path->traffic_class = req_msg->primary_traffic_class;
1512 primary_path->reversible = 1;
1513 primary_path->pkey = req_msg->pkey;
1514 primary_path->sl = cm_req_get_primary_sl(req_msg);
1515 primary_path->mtu_selector = IB_SA_EQ;
1516 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1517 primary_path->rate_selector = IB_SA_EQ;
1518 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1519 primary_path->packet_life_time_selector = IB_SA_EQ;
1520 primary_path->packet_life_time =
1521 cm_req_get_primary_local_ack_timeout(req_msg);
1522 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1523 primary_path->service_id = req_msg->service_id;
1525 if (cm_req_has_alt_path(req_msg)) {
1526 alt_path->dgid = req_msg->alt_local_gid;
1527 alt_path->sgid = req_msg->alt_remote_gid;
1528 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1529 alt_path->hop_limit = req_msg->alt_hop_limit;
1530 alt_path->traffic_class = req_msg->alt_traffic_class;
1531 alt_path->reversible = 1;
1532 alt_path->pkey = req_msg->pkey;
1533 alt_path->sl = cm_req_get_alt_sl(req_msg);
1534 alt_path->mtu_selector = IB_SA_EQ;
1535 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1536 alt_path->rate_selector = IB_SA_EQ;
1537 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1538 alt_path->packet_life_time_selector = IB_SA_EQ;
1539 alt_path->packet_life_time =
1540 cm_req_get_alt_local_ack_timeout(req_msg);
1541 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1542 alt_path->service_id = req_msg->service_id;
1544 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1547 static u16 cm_get_bth_pkey(struct cm_work *work)
1549 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1550 u8 port_num = work->port->port_num;
1551 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1555 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1557 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1558 port_num, pkey_index, ret);
1565 static void cm_format_req_event(struct cm_work *work,
1566 struct cm_id_private *cm_id_priv,
1567 struct ib_cm_id *listen_id)
1569 struct cm_req_msg *req_msg;
1570 struct ib_cm_req_event_param *param;
1572 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1573 param = &work->cm_event.param.req_rcvd;
1574 param->listen_id = listen_id;
1575 param->bth_pkey = cm_get_bth_pkey(work);
1576 param->port = cm_id_priv->av.port->port_num;
1577 param->primary_path = &work->path[0];
1578 if (req_msg->alt_local_lid)
1579 param->alternate_path = &work->path[1];
1581 param->alternate_path = NULL;
1582 param->remote_ca_guid = req_msg->local_ca_guid;
1583 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1584 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1585 param->qp_type = cm_req_get_qp_type(req_msg);
1586 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1587 param->responder_resources = cm_req_get_init_depth(req_msg);
1588 param->initiator_depth = cm_req_get_resp_res(req_msg);
1589 param->local_cm_response_timeout =
1590 cm_req_get_remote_resp_timeout(req_msg);
1591 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1592 param->remote_cm_response_timeout =
1593 cm_req_get_local_resp_timeout(req_msg);
1594 param->retry_count = cm_req_get_retry_count(req_msg);
1595 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1596 param->srq = cm_req_get_srq(req_msg);
1597 work->cm_event.private_data = &req_msg->private_data;
1600 static void cm_process_work(struct cm_id_private *cm_id_priv,
1601 struct cm_work *work)
1605 /* We will typically only have the current event to report. */
1606 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1609 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1610 spin_lock_irq(&cm_id_priv->lock);
1611 work = cm_dequeue_work(cm_id_priv);
1612 spin_unlock_irq(&cm_id_priv->lock);
1614 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1618 cm_deref_id(cm_id_priv);
1620 cm_destroy_id(&cm_id_priv->id, ret);
1623 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1624 struct cm_id_private *cm_id_priv,
1625 enum cm_msg_response msg_mraed, u8 service_timeout,
1626 const void *private_data, u8 private_data_len)
1628 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1629 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1630 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1631 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1632 cm_mra_set_service_timeout(mra_msg, service_timeout);
1634 if (private_data && private_data_len)
1635 memcpy(mra_msg->private_data, private_data, private_data_len);
1638 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1639 struct cm_id_private *cm_id_priv,
1640 enum ib_cm_rej_reason reason,
1643 const void *private_data,
1644 u8 private_data_len)
1646 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1647 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1649 switch(cm_id_priv->id.state) {
1650 case IB_CM_REQ_RCVD:
1651 rej_msg->local_comm_id = 0;
1652 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1654 case IB_CM_MRA_REQ_SENT:
1655 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1656 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1658 case IB_CM_REP_RCVD:
1659 case IB_CM_MRA_REP_SENT:
1660 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1661 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1664 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1665 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1669 rej_msg->reason = cpu_to_be16(reason);
1670 if (ari && ari_length) {
1671 cm_rej_set_reject_info_len(rej_msg, ari_length);
1672 memcpy(rej_msg->ari, ari, ari_length);
1675 if (private_data && private_data_len)
1676 memcpy(rej_msg->private_data, private_data, private_data_len);
1679 static void cm_dup_req_handler(struct cm_work *work,
1680 struct cm_id_private *cm_id_priv)
1682 struct ib_mad_send_buf *msg = NULL;
1685 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1686 counter[CM_REQ_COUNTER]);
1688 /* Quick state check to discard duplicate REQs. */
1689 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1692 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1696 spin_lock_irq(&cm_id_priv->lock);
1697 switch (cm_id_priv->id.state) {
1698 case IB_CM_MRA_REQ_SENT:
1699 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1700 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1701 cm_id_priv->private_data,
1702 cm_id_priv->private_data_len);
1704 case IB_CM_TIMEWAIT:
1705 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1706 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1711 spin_unlock_irq(&cm_id_priv->lock);
1713 ret = ib_post_send_mad(msg, NULL);
1718 unlock: spin_unlock_irq(&cm_id_priv->lock);
1719 free: cm_free_msg(msg);
1722 static struct cm_id_private * cm_match_req(struct cm_work *work,
1723 struct cm_id_private *cm_id_priv)
1725 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1726 struct cm_timewait_info *timewait_info;
1727 struct cm_req_msg *req_msg;
1728 struct ib_cm_id *cm_id;
1730 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1732 /* Check for possible duplicate REQ. */
1733 spin_lock_irq(&cm.lock);
1734 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1735 if (timewait_info) {
1736 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1737 timewait_info->work.remote_id);
1738 spin_unlock_irq(&cm.lock);
1739 if (cur_cm_id_priv) {
1740 cm_dup_req_handler(work, cur_cm_id_priv);
1741 cm_deref_id(cur_cm_id_priv);
1746 /* Check for stale connections. */
1747 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1748 if (timewait_info) {
1749 cm_cleanup_timewait(cm_id_priv->timewait_info);
1750 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1751 timewait_info->work.remote_id);
1753 spin_unlock_irq(&cm.lock);
1754 cm_issue_rej(work->port, work->mad_recv_wc,
1755 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1757 if (cur_cm_id_priv) {
1758 cm_id = &cur_cm_id_priv->id;
1759 ib_send_cm_dreq(cm_id, NULL, 0);
1760 cm_deref_id(cur_cm_id_priv);
1765 /* Find matching listen request. */
1766 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1767 req_msg->service_id);
1768 if (!listen_cm_id_priv) {
1769 cm_cleanup_timewait(cm_id_priv->timewait_info);
1770 spin_unlock_irq(&cm.lock);
1771 cm_issue_rej(work->port, work->mad_recv_wc,
1772 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1776 atomic_inc(&listen_cm_id_priv->refcount);
1777 atomic_inc(&cm_id_priv->refcount);
1778 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1779 atomic_inc(&cm_id_priv->work_count);
1780 spin_unlock_irq(&cm.lock);
1782 return listen_cm_id_priv;
1786 * Work-around for inter-subnet connections. If the LIDs are permissive,
1787 * we need to override the LID/SL data in the REQ with the LID information
1788 * in the work completion.
1790 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1792 if (!cm_req_get_primary_subnet_local(req_msg)) {
1793 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1794 req_msg->primary_local_lid = ib_lid_be16(wc->slid);
1795 cm_req_set_primary_sl(req_msg, wc->sl);
1798 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1799 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1802 if (!cm_req_get_alt_subnet_local(req_msg)) {
1803 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1804 req_msg->alt_local_lid = ib_lid_be16(wc->slid);
1805 cm_req_set_alt_sl(req_msg, wc->sl);
1808 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1809 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1813 static int cm_req_handler(struct cm_work *work)
1815 struct ib_cm_id *cm_id;
1816 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1817 struct cm_req_msg *req_msg;
1819 struct ib_gid_attr gid_attr;
1820 const struct ib_global_route *grh;
1823 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1825 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1827 return PTR_ERR(cm_id);
1829 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1830 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1831 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1832 work->mad_recv_wc->recv_buf.grh,
1834 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1836 if (IS_ERR(cm_id_priv->timewait_info)) {
1837 ret = PTR_ERR(cm_id_priv->timewait_info);
1840 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1841 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1842 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1844 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1845 if (!listen_cm_id_priv) {
1847 kfree(cm_id_priv->timewait_info);
1851 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1852 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1853 cm_id_priv->id.service_id = req_msg->service_id;
1854 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1856 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1858 memset(&work->path[0], 0, sizeof(work->path[0]));
1859 memset(&work->path[1], 0, sizeof(work->path[1]));
1860 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1861 ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1862 work->port->port_num,
1866 if (gid_attr.ndev) {
1867 work->path[0].rec_type =
1868 sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
1869 sa_path_set_ifindex(&work->path[0],
1870 gid_attr.ndev->ifindex);
1871 sa_path_set_ndev(&work->path[0],
1872 dev_net(gid_attr.ndev));
1873 dev_put(gid_attr.ndev);
1875 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1876 work->port->port_num,
1878 &req_msg->primary_local_gid);
1880 if (cm_req_has_alt_path(req_msg))
1881 work->path[1].rec_type = work->path[0].rec_type;
1882 cm_format_paths_from_req(req_msg, &work->path[0],
1884 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
1885 sa_path_set_dmac(&work->path[0],
1886 cm_id_priv->av.ah_attr.roce.dmac);
1887 work->path[0].hop_limit = grh->hop_limit;
1888 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1892 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1893 work->port->port_num, 0,
1894 &work->path[0].sgid,
1896 if (!err && gid_attr.ndev) {
1897 work->path[0].rec_type =
1898 sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
1899 sa_path_set_ifindex(&work->path[0],
1900 gid_attr.ndev->ifindex);
1901 sa_path_set_ndev(&work->path[0],
1902 dev_net(gid_attr.ndev));
1903 dev_put(gid_attr.ndev);
1905 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1906 work->port->port_num,
1908 &req_msg->primary_local_gid);
1910 if (cm_req_has_alt_path(req_msg))
1911 work->path[1].rec_type = work->path[0].rec_type;
1912 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1913 &work->path[0].sgid, sizeof work->path[0].sgid,
1917 if (cm_req_has_alt_path(req_msg)) {
1918 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1921 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1922 &work->path[0].sgid,
1923 sizeof work->path[0].sgid, NULL, 0);
1927 cm_id_priv->tid = req_msg->hdr.tid;
1928 cm_id_priv->timeout_ms = cm_convert_to_ms(
1929 cm_req_get_local_resp_timeout(req_msg));
1930 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1931 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1932 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1933 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1934 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1935 cm_id_priv->pkey = req_msg->pkey;
1936 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1937 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1938 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1939 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1941 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1942 cm_process_work(cm_id_priv, work);
1943 cm_deref_id(listen_cm_id_priv);
1947 atomic_dec(&cm_id_priv->refcount);
1948 cm_deref_id(listen_cm_id_priv);
1950 ib_destroy_cm_id(cm_id);
1954 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1955 struct cm_id_private *cm_id_priv,
1956 struct ib_cm_rep_param *param)
1958 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1959 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1960 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1961 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1962 rep_msg->resp_resources = param->responder_resources;
1963 cm_rep_set_target_ack_delay(rep_msg,
1964 cm_id_priv->av.port->cm_dev->ack_delay);
1965 cm_rep_set_failover(rep_msg, param->failover_accepted);
1966 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1967 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1969 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1970 rep_msg->initiator_depth = param->initiator_depth;
1971 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1972 cm_rep_set_srq(rep_msg, param->srq);
1973 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1975 cm_rep_set_srq(rep_msg, 1);
1976 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1979 if (param->private_data && param->private_data_len)
1980 memcpy(rep_msg->private_data, param->private_data,
1981 param->private_data_len);
1984 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1985 struct ib_cm_rep_param *param)
1987 struct cm_id_private *cm_id_priv;
1988 struct ib_mad_send_buf *msg;
1989 struct cm_rep_msg *rep_msg;
1990 unsigned long flags;
1993 if (param->private_data &&
1994 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1997 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1998 spin_lock_irqsave(&cm_id_priv->lock, flags);
1999 if (cm_id->state != IB_CM_REQ_RCVD &&
2000 cm_id->state != IB_CM_MRA_REQ_SENT) {
2005 ret = cm_alloc_msg(cm_id_priv, &msg);
2009 rep_msg = (struct cm_rep_msg *) msg->mad;
2010 cm_format_rep(rep_msg, cm_id_priv, param);
2011 msg->timeout_ms = cm_id_priv->timeout_ms;
2012 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2014 ret = ib_post_send_mad(msg, NULL);
2016 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2021 cm_id->state = IB_CM_REP_SENT;
2022 cm_id_priv->msg = msg;
2023 cm_id_priv->initiator_depth = param->initiator_depth;
2024 cm_id_priv->responder_resources = param->responder_resources;
2025 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
2026 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2028 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2031 EXPORT_SYMBOL(ib_send_cm_rep);
2033 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2034 struct cm_id_private *cm_id_priv,
2035 const void *private_data,
2036 u8 private_data_len)
2038 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2039 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
2040 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
2042 if (private_data && private_data_len)
2043 memcpy(rtu_msg->private_data, private_data, private_data_len);
2046 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2047 const void *private_data,
2048 u8 private_data_len)
2050 struct cm_id_private *cm_id_priv;
2051 struct ib_mad_send_buf *msg;
2052 unsigned long flags;
2056 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2059 data = cm_copy_private_data(private_data, private_data_len);
2061 return PTR_ERR(data);
2063 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2064 spin_lock_irqsave(&cm_id_priv->lock, flags);
2065 if (cm_id->state != IB_CM_REP_RCVD &&
2066 cm_id->state != IB_CM_MRA_REP_SENT) {
2071 ret = cm_alloc_msg(cm_id_priv, &msg);
2075 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2076 private_data, private_data_len);
2078 ret = ib_post_send_mad(msg, NULL);
2080 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2086 cm_id->state = IB_CM_ESTABLISHED;
2087 cm_set_private_data(cm_id_priv, data, private_data_len);
2088 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2091 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2095 EXPORT_SYMBOL(ib_send_cm_rtu);
2097 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2099 struct cm_rep_msg *rep_msg;
2100 struct ib_cm_rep_event_param *param;
2102 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2103 param = &work->cm_event.param.rep_rcvd;
2104 param->remote_ca_guid = rep_msg->local_ca_guid;
2105 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
2106 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2107 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
2108 param->responder_resources = rep_msg->initiator_depth;
2109 param->initiator_depth = rep_msg->resp_resources;
2110 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2111 param->failover_accepted = cm_rep_get_failover(rep_msg);
2112 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
2113 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2114 param->srq = cm_rep_get_srq(rep_msg);
2115 work->cm_event.private_data = &rep_msg->private_data;
2118 static void cm_dup_rep_handler(struct cm_work *work)
2120 struct cm_id_private *cm_id_priv;
2121 struct cm_rep_msg *rep_msg;
2122 struct ib_mad_send_buf *msg = NULL;
2125 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2126 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2127 rep_msg->local_comm_id);
2131 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2132 counter[CM_REP_COUNTER]);
2133 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2137 spin_lock_irq(&cm_id_priv->lock);
2138 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2139 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2140 cm_id_priv->private_data,
2141 cm_id_priv->private_data_len);
2142 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2143 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2144 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2145 cm_id_priv->private_data,
2146 cm_id_priv->private_data_len);
2149 spin_unlock_irq(&cm_id_priv->lock);
2151 ret = ib_post_send_mad(msg, NULL);
2156 unlock: spin_unlock_irq(&cm_id_priv->lock);
2157 free: cm_free_msg(msg);
2158 deref: cm_deref_id(cm_id_priv);
2161 static int cm_rep_handler(struct cm_work *work)
2163 struct cm_id_private *cm_id_priv;
2164 struct cm_rep_msg *rep_msg;
2166 struct cm_id_private *cur_cm_id_priv;
2167 struct ib_cm_id *cm_id;
2168 struct cm_timewait_info *timewait_info;
2170 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2171 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2173 cm_dup_rep_handler(work);
2177 cm_format_rep_event(work, cm_id_priv->qp_type);
2179 spin_lock_irq(&cm_id_priv->lock);
2180 switch (cm_id_priv->id.state) {
2181 case IB_CM_REQ_SENT:
2182 case IB_CM_MRA_REQ_RCVD:
2185 spin_unlock_irq(&cm_id_priv->lock);
2190 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2191 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2192 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2194 spin_lock(&cm.lock);
2195 /* Check for duplicate REP. */
2196 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2197 spin_unlock(&cm.lock);
2198 spin_unlock_irq(&cm_id_priv->lock);
2202 /* Check for a stale connection. */
2203 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2204 if (timewait_info) {
2205 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2206 &cm.remote_id_table);
2207 cm_id_priv->timewait_info->inserted_remote_id = 0;
2208 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2209 timewait_info->work.remote_id);
2211 spin_unlock(&cm.lock);
2212 spin_unlock_irq(&cm_id_priv->lock);
2213 cm_issue_rej(work->port, work->mad_recv_wc,
2214 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2217 if (cur_cm_id_priv) {
2218 cm_id = &cur_cm_id_priv->id;
2219 ib_send_cm_dreq(cm_id, NULL, 0);
2220 cm_deref_id(cur_cm_id_priv);
2225 spin_unlock(&cm.lock);
2227 cm_id_priv->id.state = IB_CM_REP_RCVD;
2228 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2229 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2230 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2231 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2232 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2233 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2234 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2235 cm_id_priv->av.timeout =
2236 cm_ack_timeout(cm_id_priv->target_ack_delay,
2237 cm_id_priv->av.timeout - 1);
2238 cm_id_priv->alt_av.timeout =
2239 cm_ack_timeout(cm_id_priv->target_ack_delay,
2240 cm_id_priv->alt_av.timeout - 1);
2242 /* todo: handle peer_to_peer */
2244 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2245 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2247 list_add_tail(&work->list, &cm_id_priv->work_list);
2248 spin_unlock_irq(&cm_id_priv->lock);
2251 cm_process_work(cm_id_priv, work);
2253 cm_deref_id(cm_id_priv);
2257 cm_deref_id(cm_id_priv);
2261 static int cm_establish_handler(struct cm_work *work)
2263 struct cm_id_private *cm_id_priv;
2266 /* See comment in cm_establish about lookup. */
2267 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2271 spin_lock_irq(&cm_id_priv->lock);
2272 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2273 spin_unlock_irq(&cm_id_priv->lock);
2277 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2278 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2280 list_add_tail(&work->list, &cm_id_priv->work_list);
2281 spin_unlock_irq(&cm_id_priv->lock);
2284 cm_process_work(cm_id_priv, work);
2286 cm_deref_id(cm_id_priv);
2289 cm_deref_id(cm_id_priv);
2293 static int cm_rtu_handler(struct cm_work *work)
2295 struct cm_id_private *cm_id_priv;
2296 struct cm_rtu_msg *rtu_msg;
2299 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2300 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2301 rtu_msg->local_comm_id);
2305 work->cm_event.private_data = &rtu_msg->private_data;
2307 spin_lock_irq(&cm_id_priv->lock);
2308 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2309 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2310 spin_unlock_irq(&cm_id_priv->lock);
2311 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2312 counter[CM_RTU_COUNTER]);
2315 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2317 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2318 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2320 list_add_tail(&work->list, &cm_id_priv->work_list);
2321 spin_unlock_irq(&cm_id_priv->lock);
2324 cm_process_work(cm_id_priv, work);
2326 cm_deref_id(cm_id_priv);
2329 cm_deref_id(cm_id_priv);
2333 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2334 struct cm_id_private *cm_id_priv,
2335 const void *private_data,
2336 u8 private_data_len)
2338 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2339 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2340 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2341 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2342 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2344 if (private_data && private_data_len)
2345 memcpy(dreq_msg->private_data, private_data, private_data_len);
2348 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2349 const void *private_data,
2350 u8 private_data_len)
2352 struct cm_id_private *cm_id_priv;
2353 struct ib_mad_send_buf *msg;
2354 unsigned long flags;
2357 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2360 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2361 spin_lock_irqsave(&cm_id_priv->lock, flags);
2362 if (cm_id->state != IB_CM_ESTABLISHED) {
2367 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2368 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2369 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2371 ret = cm_alloc_msg(cm_id_priv, &msg);
2373 cm_enter_timewait(cm_id_priv);
2377 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2378 private_data, private_data_len);
2379 msg->timeout_ms = cm_id_priv->timeout_ms;
2380 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2382 ret = ib_post_send_mad(msg, NULL);
2384 cm_enter_timewait(cm_id_priv);
2385 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2390 cm_id->state = IB_CM_DREQ_SENT;
2391 cm_id_priv->msg = msg;
2392 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2395 EXPORT_SYMBOL(ib_send_cm_dreq);
2397 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2398 struct cm_id_private *cm_id_priv,
2399 const void *private_data,
2400 u8 private_data_len)
2402 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2403 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2404 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2406 if (private_data && private_data_len)
2407 memcpy(drep_msg->private_data, private_data, private_data_len);
2410 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2411 const void *private_data,
2412 u8 private_data_len)
2414 struct cm_id_private *cm_id_priv;
2415 struct ib_mad_send_buf *msg;
2416 unsigned long flags;
2420 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2423 data = cm_copy_private_data(private_data, private_data_len);
2425 return PTR_ERR(data);
2427 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2428 spin_lock_irqsave(&cm_id_priv->lock, flags);
2429 if (cm_id->state != IB_CM_DREQ_RCVD) {
2430 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2435 cm_set_private_data(cm_id_priv, data, private_data_len);
2436 cm_enter_timewait(cm_id_priv);
2438 ret = cm_alloc_msg(cm_id_priv, &msg);
2442 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2443 private_data, private_data_len);
2445 ret = ib_post_send_mad(msg, NULL);
2447 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2452 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2455 EXPORT_SYMBOL(ib_send_cm_drep);
2457 static int cm_issue_drep(struct cm_port *port,
2458 struct ib_mad_recv_wc *mad_recv_wc)
2460 struct ib_mad_send_buf *msg = NULL;
2461 struct cm_dreq_msg *dreq_msg;
2462 struct cm_drep_msg *drep_msg;
2465 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2469 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2470 drep_msg = (struct cm_drep_msg *) msg->mad;
2472 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2473 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2474 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2476 ret = ib_post_send_mad(msg, NULL);
2483 static int cm_dreq_handler(struct cm_work *work)
2485 struct cm_id_private *cm_id_priv;
2486 struct cm_dreq_msg *dreq_msg;
2487 struct ib_mad_send_buf *msg = NULL;
2490 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2491 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2492 dreq_msg->local_comm_id);
2494 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2495 counter[CM_DREQ_COUNTER]);
2496 cm_issue_drep(work->port, work->mad_recv_wc);
2500 work->cm_event.private_data = &dreq_msg->private_data;
2502 spin_lock_irq(&cm_id_priv->lock);
2503 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2506 switch (cm_id_priv->id.state) {
2507 case IB_CM_REP_SENT:
2508 case IB_CM_DREQ_SENT:
2509 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2511 case IB_CM_ESTABLISHED:
2512 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2513 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2514 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2516 case IB_CM_MRA_REP_RCVD:
2518 case IB_CM_TIMEWAIT:
2519 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2520 counter[CM_DREQ_COUNTER]);
2521 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2525 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2526 cm_id_priv->private_data,
2527 cm_id_priv->private_data_len);
2528 spin_unlock_irq(&cm_id_priv->lock);
2530 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2531 ib_post_send_mad(msg, NULL))
2534 case IB_CM_DREQ_RCVD:
2535 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2536 counter[CM_DREQ_COUNTER]);
2541 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2542 cm_id_priv->tid = dreq_msg->hdr.tid;
2543 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2545 list_add_tail(&work->list, &cm_id_priv->work_list);
2546 spin_unlock_irq(&cm_id_priv->lock);
2549 cm_process_work(cm_id_priv, work);
2551 cm_deref_id(cm_id_priv);
2554 unlock: spin_unlock_irq(&cm_id_priv->lock);
2555 deref: cm_deref_id(cm_id_priv);
2559 static int cm_drep_handler(struct cm_work *work)
2561 struct cm_id_private *cm_id_priv;
2562 struct cm_drep_msg *drep_msg;
2565 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2566 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2567 drep_msg->local_comm_id);
2571 work->cm_event.private_data = &drep_msg->private_data;
2573 spin_lock_irq(&cm_id_priv->lock);
2574 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2575 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2576 spin_unlock_irq(&cm_id_priv->lock);
2579 cm_enter_timewait(cm_id_priv);
2581 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2582 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2584 list_add_tail(&work->list, &cm_id_priv->work_list);
2585 spin_unlock_irq(&cm_id_priv->lock);
2588 cm_process_work(cm_id_priv, work);
2590 cm_deref_id(cm_id_priv);
2593 cm_deref_id(cm_id_priv);
2597 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2598 enum ib_cm_rej_reason reason,
2601 const void *private_data,
2602 u8 private_data_len)
2604 struct cm_id_private *cm_id_priv;
2605 struct ib_mad_send_buf *msg;
2606 unsigned long flags;
2609 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2610 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2613 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2615 spin_lock_irqsave(&cm_id_priv->lock, flags);
2616 switch (cm_id->state) {
2617 case IB_CM_REQ_SENT:
2618 case IB_CM_MRA_REQ_RCVD:
2619 case IB_CM_REQ_RCVD:
2620 case IB_CM_MRA_REQ_SENT:
2621 case IB_CM_REP_RCVD:
2622 case IB_CM_MRA_REP_SENT:
2623 ret = cm_alloc_msg(cm_id_priv, &msg);
2625 cm_format_rej((struct cm_rej_msg *) msg->mad,
2626 cm_id_priv, reason, ari, ari_length,
2627 private_data, private_data_len);
2629 cm_reset_to_idle(cm_id_priv);
2631 case IB_CM_REP_SENT:
2632 case IB_CM_MRA_REP_RCVD:
2633 ret = cm_alloc_msg(cm_id_priv, &msg);
2635 cm_format_rej((struct cm_rej_msg *) msg->mad,
2636 cm_id_priv, reason, ari, ari_length,
2637 private_data, private_data_len);
2639 cm_enter_timewait(cm_id_priv);
2649 ret = ib_post_send_mad(msg, NULL);
2653 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2656 EXPORT_SYMBOL(ib_send_cm_rej);
2658 static void cm_format_rej_event(struct cm_work *work)
2660 struct cm_rej_msg *rej_msg;
2661 struct ib_cm_rej_event_param *param;
2663 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2664 param = &work->cm_event.param.rej_rcvd;
2665 param->ari = rej_msg->ari;
2666 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2667 param->reason = __be16_to_cpu(rej_msg->reason);
2668 work->cm_event.private_data = &rej_msg->private_data;
2671 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2673 struct cm_timewait_info *timewait_info;
2674 struct cm_id_private *cm_id_priv;
2677 remote_id = rej_msg->local_comm_id;
2679 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2680 spin_lock_irq(&cm.lock);
2681 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2683 if (!timewait_info) {
2684 spin_unlock_irq(&cm.lock);
2687 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2688 (timewait_info->work.local_id ^
2689 cm.random_id_operand));
2691 if (cm_id_priv->id.remote_id == remote_id)
2692 atomic_inc(&cm_id_priv->refcount);
2696 spin_unlock_irq(&cm.lock);
2697 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2698 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2700 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2705 static int cm_rej_handler(struct cm_work *work)
2707 struct cm_id_private *cm_id_priv;
2708 struct cm_rej_msg *rej_msg;
2711 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2712 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2716 cm_format_rej_event(work);
2718 spin_lock_irq(&cm_id_priv->lock);
2719 switch (cm_id_priv->id.state) {
2720 case IB_CM_REQ_SENT:
2721 case IB_CM_MRA_REQ_RCVD:
2722 case IB_CM_REP_SENT:
2723 case IB_CM_MRA_REP_RCVD:
2724 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2726 case IB_CM_REQ_RCVD:
2727 case IB_CM_MRA_REQ_SENT:
2728 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2729 cm_enter_timewait(cm_id_priv);
2731 cm_reset_to_idle(cm_id_priv);
2733 case IB_CM_DREQ_SENT:
2734 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2736 case IB_CM_REP_RCVD:
2737 case IB_CM_MRA_REP_SENT:
2738 cm_enter_timewait(cm_id_priv);
2740 case IB_CM_ESTABLISHED:
2741 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2742 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2743 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2744 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2746 cm_enter_timewait(cm_id_priv);
2751 spin_unlock_irq(&cm_id_priv->lock);
2756 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2758 list_add_tail(&work->list, &cm_id_priv->work_list);
2759 spin_unlock_irq(&cm_id_priv->lock);
2762 cm_process_work(cm_id_priv, work);
2764 cm_deref_id(cm_id_priv);
2767 cm_deref_id(cm_id_priv);
2771 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2773 const void *private_data,
2774 u8 private_data_len)
2776 struct cm_id_private *cm_id_priv;
2777 struct ib_mad_send_buf *msg;
2778 enum ib_cm_state cm_state;
2779 enum ib_cm_lap_state lap_state;
2780 enum cm_msg_response msg_response;
2782 unsigned long flags;
2785 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2788 data = cm_copy_private_data(private_data, private_data_len);
2790 return PTR_ERR(data);
2792 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2794 spin_lock_irqsave(&cm_id_priv->lock, flags);
2795 switch(cm_id_priv->id.state) {
2796 case IB_CM_REQ_RCVD:
2797 cm_state = IB_CM_MRA_REQ_SENT;
2798 lap_state = cm_id->lap_state;
2799 msg_response = CM_MSG_RESPONSE_REQ;
2801 case IB_CM_REP_RCVD:
2802 cm_state = IB_CM_MRA_REP_SENT;
2803 lap_state = cm_id->lap_state;
2804 msg_response = CM_MSG_RESPONSE_REP;
2806 case IB_CM_ESTABLISHED:
2807 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2808 cm_state = cm_id->state;
2809 lap_state = IB_CM_MRA_LAP_SENT;
2810 msg_response = CM_MSG_RESPONSE_OTHER;
2818 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2819 ret = cm_alloc_msg(cm_id_priv, &msg);
2823 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2824 msg_response, service_timeout,
2825 private_data, private_data_len);
2826 ret = ib_post_send_mad(msg, NULL);
2831 cm_id->state = cm_state;
2832 cm_id->lap_state = lap_state;
2833 cm_id_priv->service_timeout = service_timeout;
2834 cm_set_private_data(cm_id_priv, data, private_data_len);
2835 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2838 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2842 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2847 EXPORT_SYMBOL(ib_send_cm_mra);
2849 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2851 switch (cm_mra_get_msg_mraed(mra_msg)) {
2852 case CM_MSG_RESPONSE_REQ:
2853 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2854 case CM_MSG_RESPONSE_REP:
2855 case CM_MSG_RESPONSE_OTHER:
2856 return cm_acquire_id(mra_msg->remote_comm_id,
2857 mra_msg->local_comm_id);
2863 static int cm_mra_handler(struct cm_work *work)
2865 struct cm_id_private *cm_id_priv;
2866 struct cm_mra_msg *mra_msg;
2869 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2870 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2874 work->cm_event.private_data = &mra_msg->private_data;
2875 work->cm_event.param.mra_rcvd.service_timeout =
2876 cm_mra_get_service_timeout(mra_msg);
2877 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2878 cm_convert_to_ms(cm_id_priv->av.timeout);
2880 spin_lock_irq(&cm_id_priv->lock);
2881 switch (cm_id_priv->id.state) {
2882 case IB_CM_REQ_SENT:
2883 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2884 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2885 cm_id_priv->msg, timeout))
2887 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2889 case IB_CM_REP_SENT:
2890 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2891 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2892 cm_id_priv->msg, timeout))
2894 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2896 case IB_CM_ESTABLISHED:
2897 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2898 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2899 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2900 cm_id_priv->msg, timeout)) {
2901 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2902 atomic_long_inc(&work->port->
2903 counter_group[CM_RECV_DUPLICATES].
2904 counter[CM_MRA_COUNTER]);
2907 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2909 case IB_CM_MRA_REQ_RCVD:
2910 case IB_CM_MRA_REP_RCVD:
2911 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2912 counter[CM_MRA_COUNTER]);
2918 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2919 cm_id_priv->id.state;
2920 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2922 list_add_tail(&work->list, &cm_id_priv->work_list);
2923 spin_unlock_irq(&cm_id_priv->lock);
2926 cm_process_work(cm_id_priv, work);
2928 cm_deref_id(cm_id_priv);
2931 spin_unlock_irq(&cm_id_priv->lock);
2932 cm_deref_id(cm_id_priv);
2936 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2937 struct cm_id_private *cm_id_priv,
2938 struct sa_path_rec *alternate_path,
2939 const void *private_data,
2940 u8 private_data_len)
2942 bool alt_ext = false;
2944 if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
2945 alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
2946 alternate_path->opa.slid);
2947 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2948 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2949 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2950 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2951 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2952 /* todo: need remote CM response timeout */
2953 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2954 lap_msg->alt_local_lid =
2955 htons(ntohl(sa_path_get_slid(alternate_path)));
2956 lap_msg->alt_remote_lid =
2957 htons(ntohl(sa_path_get_dlid(alternate_path)));
2958 lap_msg->alt_local_gid = alternate_path->sgid;
2959 lap_msg->alt_remote_gid = alternate_path->dgid;
2961 lap_msg->alt_local_gid.global.interface_id
2962 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
2963 lap_msg->alt_remote_gid.global.interface_id
2964 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
2966 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2967 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2968 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2969 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2970 cm_lap_set_sl(lap_msg, alternate_path->sl);
2971 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2972 cm_lap_set_local_ack_timeout(lap_msg,
2973 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2974 alternate_path->packet_life_time));
2976 if (private_data && private_data_len)
2977 memcpy(lap_msg->private_data, private_data, private_data_len);
2980 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2981 struct sa_path_rec *alternate_path,
2982 const void *private_data,
2983 u8 private_data_len)
2985 struct cm_id_private *cm_id_priv;
2986 struct ib_mad_send_buf *msg;
2987 unsigned long flags;
2990 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2993 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2994 spin_lock_irqsave(&cm_id_priv->lock, flags);
2995 if (cm_id->state != IB_CM_ESTABLISHED ||
2996 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2997 cm_id->lap_state != IB_CM_LAP_IDLE)) {
3002 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
3006 cm_id_priv->alt_av.timeout =
3007 cm_ack_timeout(cm_id_priv->target_ack_delay,
3008 cm_id_priv->alt_av.timeout - 1);
3010 ret = cm_alloc_msg(cm_id_priv, &msg);
3014 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
3015 alternate_path, private_data, private_data_len);
3016 msg->timeout_ms = cm_id_priv->timeout_ms;
3017 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
3019 ret = ib_post_send_mad(msg, NULL);
3021 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3026 cm_id->lap_state = IB_CM_LAP_SENT;
3027 cm_id_priv->msg = msg;
3029 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3032 EXPORT_SYMBOL(ib_send_cm_lap);
3034 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3035 struct sa_path_rec *path)
3039 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3040 sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
3041 sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
3043 lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
3044 sa_path_set_dlid(path, cpu_to_be32(lid));
3046 lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
3047 sa_path_set_slid(path, cpu_to_be32(lid));
3051 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3052 struct sa_path_rec *path,
3053 struct cm_lap_msg *lap_msg)
3055 path->dgid = lap_msg->alt_local_gid;
3056 path->sgid = lap_msg->alt_remote_gid;
3057 path->flow_label = cm_lap_get_flow_label(lap_msg);
3058 path->hop_limit = lap_msg->alt_hop_limit;
3059 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
3060 path->reversible = 1;
3061 path->pkey = cm_id_priv->pkey;
3062 path->sl = cm_lap_get_sl(lap_msg);
3063 path->mtu_selector = IB_SA_EQ;
3064 path->mtu = cm_id_priv->path_mtu;
3065 path->rate_selector = IB_SA_EQ;
3066 path->rate = cm_lap_get_packet_rate(lap_msg);
3067 path->packet_life_time_selector = IB_SA_EQ;
3068 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
3069 path->packet_life_time -= (path->packet_life_time > 0);
3070 cm_format_path_lid_from_lap(lap_msg, path);
3073 static int cm_lap_handler(struct cm_work *work)
3075 struct cm_id_private *cm_id_priv;
3076 struct cm_lap_msg *lap_msg;
3077 struct ib_cm_lap_event_param *param;
3078 struct ib_mad_send_buf *msg = NULL;
3081 /* todo: verify LAP request and send reject APR if invalid. */
3082 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3083 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
3084 lap_msg->local_comm_id);
3088 param = &work->cm_event.param.lap_rcvd;
3089 memset(&work->path[0], 0, sizeof(work->path[1]));
3090 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3091 work->port->port_num,
3093 &lap_msg->alt_local_gid);
3094 param->alternate_path = &work->path[0];
3095 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3096 work->cm_event.private_data = &lap_msg->private_data;
3098 spin_lock_irq(&cm_id_priv->lock);
3099 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3102 switch (cm_id_priv->id.lap_state) {
3103 case IB_CM_LAP_UNINIT:
3104 case IB_CM_LAP_IDLE:
3106 case IB_CM_MRA_LAP_SENT:
3107 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3108 counter[CM_LAP_COUNTER]);
3109 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3113 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3114 CM_MSG_RESPONSE_OTHER,
3115 cm_id_priv->service_timeout,
3116 cm_id_priv->private_data,
3117 cm_id_priv->private_data_len);
3118 spin_unlock_irq(&cm_id_priv->lock);
3120 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3121 ib_post_send_mad(msg, NULL))
3124 case IB_CM_LAP_RCVD:
3125 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3126 counter[CM_LAP_COUNTER]);
3132 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3133 cm_id_priv->tid = lap_msg->hdr.tid;
3134 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3135 work->mad_recv_wc->recv_buf.grh,
3137 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
3139 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3141 list_add_tail(&work->list, &cm_id_priv->work_list);
3142 spin_unlock_irq(&cm_id_priv->lock);
3145 cm_process_work(cm_id_priv, work);
3147 cm_deref_id(cm_id_priv);
3150 unlock: spin_unlock_irq(&cm_id_priv->lock);
3151 deref: cm_deref_id(cm_id_priv);
3155 static void cm_format_apr(struct cm_apr_msg *apr_msg,
3156 struct cm_id_private *cm_id_priv,
3157 enum ib_cm_apr_status status,
3160 const void *private_data,
3161 u8 private_data_len)
3163 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3164 apr_msg->local_comm_id = cm_id_priv->id.local_id;
3165 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3166 apr_msg->ap_status = (u8) status;
3168 if (info && info_length) {
3169 apr_msg->info_length = info_length;
3170 memcpy(apr_msg->info, info, info_length);
3173 if (private_data && private_data_len)
3174 memcpy(apr_msg->private_data, private_data, private_data_len);
3177 int ib_send_cm_apr(struct ib_cm_id *cm_id,
3178 enum ib_cm_apr_status status,
3181 const void *private_data,
3182 u8 private_data_len)
3184 struct cm_id_private *cm_id_priv;
3185 struct ib_mad_send_buf *msg;
3186 unsigned long flags;
3189 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3190 (info && info_length > IB_CM_APR_INFO_LENGTH))
3193 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3194 spin_lock_irqsave(&cm_id_priv->lock, flags);
3195 if (cm_id->state != IB_CM_ESTABLISHED ||
3196 (cm_id->lap_state != IB_CM_LAP_RCVD &&
3197 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3202 ret = cm_alloc_msg(cm_id_priv, &msg);
3206 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3207 info, info_length, private_data, private_data_len);
3208 ret = ib_post_send_mad(msg, NULL);
3210 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3215 cm_id->lap_state = IB_CM_LAP_IDLE;
3216 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3219 EXPORT_SYMBOL(ib_send_cm_apr);
3221 static int cm_apr_handler(struct cm_work *work)
3223 struct cm_id_private *cm_id_priv;
3224 struct cm_apr_msg *apr_msg;
3227 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3228 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3229 apr_msg->local_comm_id);
3231 return -EINVAL; /* Unmatched reply. */
3233 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3234 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3235 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3236 work->cm_event.private_data = &apr_msg->private_data;
3238 spin_lock_irq(&cm_id_priv->lock);
3239 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3240 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3241 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3242 spin_unlock_irq(&cm_id_priv->lock);
3245 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3246 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3247 cm_id_priv->msg = NULL;
3249 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3251 list_add_tail(&work->list, &cm_id_priv->work_list);
3252 spin_unlock_irq(&cm_id_priv->lock);
3255 cm_process_work(cm_id_priv, work);
3257 cm_deref_id(cm_id_priv);
3260 cm_deref_id(cm_id_priv);
3264 static int cm_timewait_handler(struct cm_work *work)
3266 struct cm_timewait_info *timewait_info;
3267 struct cm_id_private *cm_id_priv;
3270 timewait_info = (struct cm_timewait_info *)work;
3271 spin_lock_irq(&cm.lock);
3272 list_del(&timewait_info->list);
3273 spin_unlock_irq(&cm.lock);
3275 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3276 timewait_info->work.remote_id);
3280 spin_lock_irq(&cm_id_priv->lock);
3281 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3282 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3283 spin_unlock_irq(&cm_id_priv->lock);
3286 cm_id_priv->id.state = IB_CM_IDLE;
3287 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3289 list_add_tail(&work->list, &cm_id_priv->work_list);
3290 spin_unlock_irq(&cm_id_priv->lock);
3293 cm_process_work(cm_id_priv, work);
3295 cm_deref_id(cm_id_priv);
3298 cm_deref_id(cm_id_priv);
3302 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3303 struct cm_id_private *cm_id_priv,
3304 struct ib_cm_sidr_req_param *param)
3306 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3307 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3308 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3309 sidr_req_msg->pkey = param->path->pkey;
3310 sidr_req_msg->service_id = param->service_id;
3312 if (param->private_data && param->private_data_len)
3313 memcpy(sidr_req_msg->private_data, param->private_data,
3314 param->private_data_len);
3317 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3318 struct ib_cm_sidr_req_param *param)
3320 struct cm_id_private *cm_id_priv;
3321 struct ib_mad_send_buf *msg;
3322 unsigned long flags;
3325 if (!param->path || (param->private_data &&
3326 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3329 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3330 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3334 cm_id->service_id = param->service_id;
3335 cm_id->service_mask = ~cpu_to_be64(0);
3336 cm_id_priv->timeout_ms = param->timeout_ms;
3337 cm_id_priv->max_cm_retries = param->max_cm_retries;
3338 ret = cm_alloc_msg(cm_id_priv, &msg);
3342 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3344 msg->timeout_ms = cm_id_priv->timeout_ms;
3345 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3347 spin_lock_irqsave(&cm_id_priv->lock, flags);
3348 if (cm_id->state == IB_CM_IDLE)
3349 ret = ib_post_send_mad(msg, NULL);
3354 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3358 cm_id->state = IB_CM_SIDR_REQ_SENT;
3359 cm_id_priv->msg = msg;
3360 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3364 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3366 static void cm_format_sidr_req_event(struct cm_work *work,
3367 struct ib_cm_id *listen_id)
3369 struct cm_sidr_req_msg *sidr_req_msg;
3370 struct ib_cm_sidr_req_event_param *param;
3372 sidr_req_msg = (struct cm_sidr_req_msg *)
3373 work->mad_recv_wc->recv_buf.mad;
3374 param = &work->cm_event.param.sidr_req_rcvd;
3375 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3376 param->listen_id = listen_id;
3377 param->service_id = sidr_req_msg->service_id;
3378 param->bth_pkey = cm_get_bth_pkey(work);
3379 param->port = work->port->port_num;
3380 work->cm_event.private_data = &sidr_req_msg->private_data;
3383 static int cm_sidr_req_handler(struct cm_work *work)
3385 struct ib_cm_id *cm_id;
3386 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3387 struct cm_sidr_req_msg *sidr_req_msg;
3390 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3392 return PTR_ERR(cm_id);
3393 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3395 /* Record SGID/SLID and request ID for lookup. */
3396 sidr_req_msg = (struct cm_sidr_req_msg *)
3397 work->mad_recv_wc->recv_buf.mad;
3398 wc = work->mad_recv_wc->wc;
3399 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3400 cm_id_priv->av.dgid.global.interface_id = 0;
3401 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3402 work->mad_recv_wc->recv_buf.grh,
3404 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3405 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3406 atomic_inc(&cm_id_priv->work_count);
3408 spin_lock_irq(&cm.lock);
3409 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3410 if (cur_cm_id_priv) {
3411 spin_unlock_irq(&cm.lock);
3412 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3413 counter[CM_SIDR_REQ_COUNTER]);
3414 goto out; /* Duplicate message. */
3416 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3417 cur_cm_id_priv = cm_find_listen(cm_id->device,
3418 sidr_req_msg->service_id);
3419 if (!cur_cm_id_priv) {
3420 spin_unlock_irq(&cm.lock);
3421 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3422 goto out; /* No match. */
3424 atomic_inc(&cur_cm_id_priv->refcount);
3425 atomic_inc(&cm_id_priv->refcount);
3426 spin_unlock_irq(&cm.lock);
3428 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3429 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3430 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3431 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3433 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3434 cm_process_work(cm_id_priv, work);
3435 cm_deref_id(cur_cm_id_priv);
3438 ib_destroy_cm_id(&cm_id_priv->id);
3442 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3443 struct cm_id_private *cm_id_priv,
3444 struct ib_cm_sidr_rep_param *param)
3446 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3448 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3449 sidr_rep_msg->status = param->status;
3450 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3451 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3452 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3454 if (param->info && param->info_length)
3455 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3457 if (param->private_data && param->private_data_len)
3458 memcpy(sidr_rep_msg->private_data, param->private_data,
3459 param->private_data_len);
3462 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3463 struct ib_cm_sidr_rep_param *param)
3465 struct cm_id_private *cm_id_priv;
3466 struct ib_mad_send_buf *msg;
3467 unsigned long flags;
3470 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3471 (param->private_data &&
3472 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3475 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3476 spin_lock_irqsave(&cm_id_priv->lock, flags);
3477 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3482 ret = cm_alloc_msg(cm_id_priv, &msg);
3486 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3488 ret = ib_post_send_mad(msg, NULL);
3490 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3494 cm_id->state = IB_CM_IDLE;
3495 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3497 spin_lock_irqsave(&cm.lock, flags);
3498 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3499 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3500 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3502 spin_unlock_irqrestore(&cm.lock, flags);
3505 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3508 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3510 static void cm_format_sidr_rep_event(struct cm_work *work)
3512 struct cm_sidr_rep_msg *sidr_rep_msg;
3513 struct ib_cm_sidr_rep_event_param *param;
3515 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3516 work->mad_recv_wc->recv_buf.mad;
3517 param = &work->cm_event.param.sidr_rep_rcvd;
3518 param->status = sidr_rep_msg->status;
3519 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3520 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3521 param->info = &sidr_rep_msg->info;
3522 param->info_len = sidr_rep_msg->info_length;
3523 work->cm_event.private_data = &sidr_rep_msg->private_data;
3526 static int cm_sidr_rep_handler(struct cm_work *work)
3528 struct cm_sidr_rep_msg *sidr_rep_msg;
3529 struct cm_id_private *cm_id_priv;
3531 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3532 work->mad_recv_wc->recv_buf.mad;
3533 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3535 return -EINVAL; /* Unmatched reply. */
3537 spin_lock_irq(&cm_id_priv->lock);
3538 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3539 spin_unlock_irq(&cm_id_priv->lock);
3542 cm_id_priv->id.state = IB_CM_IDLE;
3543 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3544 spin_unlock_irq(&cm_id_priv->lock);
3546 cm_format_sidr_rep_event(work);
3547 cm_process_work(cm_id_priv, work);
3550 cm_deref_id(cm_id_priv);
3554 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3555 enum ib_wc_status wc_status)
3557 struct cm_id_private *cm_id_priv;
3558 struct ib_cm_event cm_event;
3559 enum ib_cm_state state;
3562 memset(&cm_event, 0, sizeof cm_event);
3563 cm_id_priv = msg->context[0];
3565 /* Discard old sends or ones without a response. */
3566 spin_lock_irq(&cm_id_priv->lock);
3567 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3568 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3571 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3572 state, ib_wc_status_msg(wc_status));
3574 case IB_CM_REQ_SENT:
3575 case IB_CM_MRA_REQ_RCVD:
3576 cm_reset_to_idle(cm_id_priv);
3577 cm_event.event = IB_CM_REQ_ERROR;
3579 case IB_CM_REP_SENT:
3580 case IB_CM_MRA_REP_RCVD:
3581 cm_reset_to_idle(cm_id_priv);
3582 cm_event.event = IB_CM_REP_ERROR;
3584 case IB_CM_DREQ_SENT:
3585 cm_enter_timewait(cm_id_priv);
3586 cm_event.event = IB_CM_DREQ_ERROR;
3588 case IB_CM_SIDR_REQ_SENT:
3589 cm_id_priv->id.state = IB_CM_IDLE;
3590 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3595 spin_unlock_irq(&cm_id_priv->lock);
3596 cm_event.param.send_status = wc_status;
3598 /* No other events can occur on the cm_id at this point. */
3599 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3602 ib_destroy_cm_id(&cm_id_priv->id);
3605 spin_unlock_irq(&cm_id_priv->lock);
3609 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3610 struct ib_mad_send_wc *mad_send_wc)
3612 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3613 struct cm_port *port;
3616 port = mad_agent->context;
3617 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3618 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3621 * If the send was in response to a received message (context[0] is not
3622 * set to a cm_id), and is not a REJ, then it is a send that was
3625 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3628 atomic_long_add(1 + msg->retries,
3629 &port->counter_group[CM_XMIT].counter[attr_index]);
3631 atomic_long_add(msg->retries,
3632 &port->counter_group[CM_XMIT_RETRIES].
3633 counter[attr_index]);
3635 switch (mad_send_wc->status) {
3637 case IB_WC_WR_FLUSH_ERR:
3641 if (msg->context[0] && msg->context[1])
3642 cm_process_send_error(msg, mad_send_wc->status);
3649 static void cm_work_handler(struct work_struct *_work)
3651 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3654 switch (work->cm_event.event) {
3655 case IB_CM_REQ_RECEIVED:
3656 ret = cm_req_handler(work);
3658 case IB_CM_MRA_RECEIVED:
3659 ret = cm_mra_handler(work);
3661 case IB_CM_REJ_RECEIVED:
3662 ret = cm_rej_handler(work);
3664 case IB_CM_REP_RECEIVED:
3665 ret = cm_rep_handler(work);
3667 case IB_CM_RTU_RECEIVED:
3668 ret = cm_rtu_handler(work);
3670 case IB_CM_USER_ESTABLISHED:
3671 ret = cm_establish_handler(work);
3673 case IB_CM_DREQ_RECEIVED:
3674 ret = cm_dreq_handler(work);
3676 case IB_CM_DREP_RECEIVED:
3677 ret = cm_drep_handler(work);
3679 case IB_CM_SIDR_REQ_RECEIVED:
3680 ret = cm_sidr_req_handler(work);
3682 case IB_CM_SIDR_REP_RECEIVED:
3683 ret = cm_sidr_rep_handler(work);
3685 case IB_CM_LAP_RECEIVED:
3686 ret = cm_lap_handler(work);
3688 case IB_CM_APR_RECEIVED:
3689 ret = cm_apr_handler(work);
3691 case IB_CM_TIMEWAIT_EXIT:
3692 ret = cm_timewait_handler(work);
3702 static int cm_establish(struct ib_cm_id *cm_id)
3704 struct cm_id_private *cm_id_priv;
3705 struct cm_work *work;
3706 unsigned long flags;
3708 struct cm_device *cm_dev;
3710 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3714 work = kmalloc(sizeof *work, GFP_ATOMIC);
3718 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3719 spin_lock_irqsave(&cm_id_priv->lock, flags);
3720 switch (cm_id->state)
3722 case IB_CM_REP_SENT:
3723 case IB_CM_MRA_REP_RCVD:
3724 cm_id->state = IB_CM_ESTABLISHED;
3726 case IB_CM_ESTABLISHED:
3733 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3741 * The CM worker thread may try to destroy the cm_id before it
3742 * can execute this work item. To prevent potential deadlock,
3743 * we need to find the cm_id once we're in the context of the
3744 * worker thread, rather than holding a reference on it.
3746 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3747 work->local_id = cm_id->local_id;
3748 work->remote_id = cm_id->remote_id;
3749 work->mad_recv_wc = NULL;
3750 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3752 /* Check if the device started its remove_one */
3753 spin_lock_irqsave(&cm.lock, flags);
3754 if (!cm_dev->going_down) {
3755 queue_delayed_work(cm.wq, &work->work, 0);
3760 spin_unlock_irqrestore(&cm.lock, flags);
3766 static int cm_migrate(struct ib_cm_id *cm_id)
3768 struct cm_id_private *cm_id_priv;
3769 struct cm_av tmp_av;
3770 unsigned long flags;
3771 int tmp_send_port_not_ready;
3774 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3775 spin_lock_irqsave(&cm_id_priv->lock, flags);
3776 if (cm_id->state == IB_CM_ESTABLISHED &&
3777 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3778 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3779 cm_id->lap_state = IB_CM_LAP_IDLE;
3780 /* Swap address vector */
3781 tmp_av = cm_id_priv->av;
3782 cm_id_priv->av = cm_id_priv->alt_av;
3783 cm_id_priv->alt_av = tmp_av;
3784 /* Swap port send ready state */
3785 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3786 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3787 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3790 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3795 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3800 case IB_EVENT_COMM_EST:
3801 ret = cm_establish(cm_id);
3803 case IB_EVENT_PATH_MIG:
3804 ret = cm_migrate(cm_id);
3811 EXPORT_SYMBOL(ib_cm_notify);
3813 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3814 struct ib_mad_send_buf *send_buf,
3815 struct ib_mad_recv_wc *mad_recv_wc)
3817 struct cm_port *port = mad_agent->context;
3818 struct cm_work *work;
3819 enum ib_cm_event_type event;
3824 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3825 case CM_REQ_ATTR_ID:
3826 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3827 alt_local_lid != 0);
3828 event = IB_CM_REQ_RECEIVED;
3830 case CM_MRA_ATTR_ID:
3831 event = IB_CM_MRA_RECEIVED;
3833 case CM_REJ_ATTR_ID:
3834 event = IB_CM_REJ_RECEIVED;
3836 case CM_REP_ATTR_ID:
3837 event = IB_CM_REP_RECEIVED;
3839 case CM_RTU_ATTR_ID:
3840 event = IB_CM_RTU_RECEIVED;
3842 case CM_DREQ_ATTR_ID:
3843 event = IB_CM_DREQ_RECEIVED;
3845 case CM_DREP_ATTR_ID:
3846 event = IB_CM_DREP_RECEIVED;
3848 case CM_SIDR_REQ_ATTR_ID:
3849 event = IB_CM_SIDR_REQ_RECEIVED;
3851 case CM_SIDR_REP_ATTR_ID:
3852 event = IB_CM_SIDR_REP_RECEIVED;
3854 case CM_LAP_ATTR_ID:
3856 event = IB_CM_LAP_RECEIVED;
3858 case CM_APR_ATTR_ID:
3859 event = IB_CM_APR_RECEIVED;
3862 ib_free_recv_mad(mad_recv_wc);
3866 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3867 atomic_long_inc(&port->counter_group[CM_RECV].
3868 counter[attr_id - CM_ATTR_ID_OFFSET]);
3870 work = kmalloc(sizeof(*work) + sizeof(struct sa_path_rec) * paths,
3873 ib_free_recv_mad(mad_recv_wc);
3877 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3878 work->cm_event.event = event;
3879 work->mad_recv_wc = mad_recv_wc;
3882 /* Check if the device started its remove_one */
3883 spin_lock_irq(&cm.lock);
3884 if (!port->cm_dev->going_down)
3885 queue_delayed_work(cm.wq, &work->work, 0);
3888 spin_unlock_irq(&cm.lock);
3892 ib_free_recv_mad(mad_recv_wc);
3896 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3897 struct ib_qp_attr *qp_attr,
3900 unsigned long flags;
3903 spin_lock_irqsave(&cm_id_priv->lock, flags);
3904 switch (cm_id_priv->id.state) {
3905 case IB_CM_REQ_SENT:
3906 case IB_CM_MRA_REQ_RCVD:
3907 case IB_CM_REQ_RCVD:
3908 case IB_CM_MRA_REQ_SENT:
3909 case IB_CM_REP_RCVD:
3910 case IB_CM_MRA_REP_SENT:
3911 case IB_CM_REP_SENT:
3912 case IB_CM_MRA_REP_RCVD:
3913 case IB_CM_ESTABLISHED:
3914 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3915 IB_QP_PKEY_INDEX | IB_QP_PORT;
3916 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3917 if (cm_id_priv->responder_resources)
3918 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3919 IB_ACCESS_REMOTE_ATOMIC;
3920 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3921 qp_attr->port_num = cm_id_priv->av.port->port_num;
3928 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3932 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3933 struct ib_qp_attr *qp_attr,
3936 unsigned long flags;
3939 spin_lock_irqsave(&cm_id_priv->lock, flags);
3940 switch (cm_id_priv->id.state) {
3941 case IB_CM_REQ_RCVD:
3942 case IB_CM_MRA_REQ_SENT:
3943 case IB_CM_REP_RCVD:
3944 case IB_CM_MRA_REP_SENT:
3945 case IB_CM_REP_SENT:
3946 case IB_CM_MRA_REP_RCVD:
3947 case IB_CM_ESTABLISHED:
3948 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3949 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3950 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3951 qp_attr->path_mtu = cm_id_priv->path_mtu;
3952 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3953 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3954 if (cm_id_priv->qp_type == IB_QPT_RC ||
3955 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3956 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3957 IB_QP_MIN_RNR_TIMER;
3958 qp_attr->max_dest_rd_atomic =
3959 cm_id_priv->responder_resources;
3960 qp_attr->min_rnr_timer = 0;
3962 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
3963 *qp_attr_mask |= IB_QP_ALT_PATH;
3964 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3965 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3966 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3967 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3975 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3979 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3980 struct ib_qp_attr *qp_attr,
3983 unsigned long flags;
3986 spin_lock_irqsave(&cm_id_priv->lock, flags);
3987 switch (cm_id_priv->id.state) {
3988 /* Allow transition to RTS before sending REP */
3989 case IB_CM_REQ_RCVD:
3990 case IB_CM_MRA_REQ_SENT:
3992 case IB_CM_REP_RCVD:
3993 case IB_CM_MRA_REP_SENT:
3994 case IB_CM_REP_SENT:
3995 case IB_CM_MRA_REP_RCVD:
3996 case IB_CM_ESTABLISHED:
3997 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3998 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3999 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4000 switch (cm_id_priv->qp_type) {
4002 case IB_QPT_XRC_INI:
4003 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4004 IB_QP_MAX_QP_RD_ATOMIC;
4005 qp_attr->retry_cnt = cm_id_priv->retry_count;
4006 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4007 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4009 case IB_QPT_XRC_TGT:
4010 *qp_attr_mask |= IB_QP_TIMEOUT;
4011 qp_attr->timeout = cm_id_priv->av.timeout;
4016 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4017 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4018 qp_attr->path_mig_state = IB_MIG_REARM;
4021 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4022 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4023 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4024 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4025 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4026 qp_attr->path_mig_state = IB_MIG_REARM;
4034 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4038 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4039 struct ib_qp_attr *qp_attr,
4042 struct cm_id_private *cm_id_priv;
4045 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4046 switch (qp_attr->qp_state) {
4048 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4051 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4054 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4062 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4064 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4067 struct cm_counter_group *group;
4068 struct cm_counter_attribute *cm_attr;
4070 group = container_of(obj, struct cm_counter_group, obj);
4071 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4073 return sprintf(buf, "%ld\n",
4074 atomic_long_read(&group->counter[cm_attr->index]));
4077 static const struct sysfs_ops cm_counter_ops = {
4078 .show = cm_show_counter
4081 static struct kobj_type cm_counter_obj_type = {
4082 .sysfs_ops = &cm_counter_ops,
4083 .default_attrs = cm_counter_default_attrs
4086 static void cm_release_port_obj(struct kobject *obj)
4088 struct cm_port *cm_port;
4090 cm_port = container_of(obj, struct cm_port, port_obj);
4094 static struct kobj_type cm_port_obj_type = {
4095 .release = cm_release_port_obj
4098 static char *cm_devnode(struct device *dev, umode_t *mode)
4102 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4105 struct class cm_class = {
4106 .owner = THIS_MODULE,
4107 .name = "infiniband_cm",
4108 .devnode = cm_devnode,
4110 EXPORT_SYMBOL(cm_class);
4112 static int cm_create_port_fs(struct cm_port *port)
4116 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
4117 &port->cm_dev->device->kobj,
4118 "%d", port->port_num);
4124 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4125 ret = kobject_init_and_add(&port->counter_group[i].obj,
4126 &cm_counter_obj_type,
4128 "%s", counter_group_names[i]);
4137 kobject_put(&port->counter_group[i].obj);
4138 kobject_put(&port->port_obj);
4143 static void cm_remove_port_fs(struct cm_port *port)
4147 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4148 kobject_put(&port->counter_group[i].obj);
4150 kobject_put(&port->port_obj);
4153 static void cm_add_one(struct ib_device *ib_device)
4155 struct cm_device *cm_dev;
4156 struct cm_port *port;
4157 struct ib_mad_reg_req reg_req = {
4158 .mgmt_class = IB_MGMT_CLASS_CM,
4159 .mgmt_class_version = IB_CM_CLASS_VERSION,
4161 struct ib_port_modify port_modify = {
4162 .set_port_cap_mask = IB_PORT_CM_SUP
4164 unsigned long flags;
4169 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
4170 ib_device->phys_port_cnt, GFP_KERNEL);
4174 cm_dev->ib_device = ib_device;
4175 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4176 cm_dev->going_down = 0;
4177 cm_dev->device = device_create(&cm_class, &ib_device->dev,
4179 "%s", ib_device->name);
4180 if (IS_ERR(cm_dev->device)) {
4185 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4186 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4187 if (!rdma_cap_ib_cm(ib_device, i))
4190 port = kzalloc(sizeof *port, GFP_KERNEL);
4194 cm_dev->port[i-1] = port;
4195 port->cm_dev = cm_dev;
4198 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4199 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4201 ret = cm_create_port_fs(port);
4205 port->mad_agent = ib_register_mad_agent(ib_device, i,
4213 if (IS_ERR(port->mad_agent))
4216 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4226 ib_set_client_data(ib_device, &cm_client, cm_dev);
4228 write_lock_irqsave(&cm.device_lock, flags);
4229 list_add_tail(&cm_dev->list, &cm.device_list);
4230 write_unlock_irqrestore(&cm.device_lock, flags);
4234 ib_unregister_mad_agent(port->mad_agent);
4236 cm_remove_port_fs(port);
4238 port_modify.set_port_cap_mask = 0;
4239 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4241 if (!rdma_cap_ib_cm(ib_device, i))
4244 port = cm_dev->port[i-1];
4245 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4246 ib_unregister_mad_agent(port->mad_agent);
4247 cm_remove_port_fs(port);
4250 device_unregister(cm_dev->device);
4254 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4256 struct cm_device *cm_dev = client_data;
4257 struct cm_port *port;
4258 struct cm_id_private *cm_id_priv;
4259 struct ib_mad_agent *cur_mad_agent;
4260 struct ib_port_modify port_modify = {
4261 .clr_port_cap_mask = IB_PORT_CM_SUP
4263 unsigned long flags;
4269 write_lock_irqsave(&cm.device_lock, flags);
4270 list_del(&cm_dev->list);
4271 write_unlock_irqrestore(&cm.device_lock, flags);
4273 spin_lock_irq(&cm.lock);
4274 cm_dev->going_down = 1;
4275 spin_unlock_irq(&cm.lock);
4277 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4278 if (!rdma_cap_ib_cm(ib_device, i))
4281 port = cm_dev->port[i-1];
4282 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4283 /* Mark all the cm_id's as not valid */
4284 spin_lock_irq(&cm.lock);
4285 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4286 cm_id_priv->altr_send_port_not_ready = 1;
4287 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4288 cm_id_priv->prim_send_port_not_ready = 1;
4289 spin_unlock_irq(&cm.lock);
4291 * We flush the queue here after the going_down set, this
4292 * verify that no new works will be queued in the recv handler,
4293 * after that we can call the unregister_mad_agent
4295 flush_workqueue(cm.wq);
4296 spin_lock_irq(&cm.state_lock);
4297 cur_mad_agent = port->mad_agent;
4298 port->mad_agent = NULL;
4299 spin_unlock_irq(&cm.state_lock);
4300 ib_unregister_mad_agent(cur_mad_agent);
4301 cm_remove_port_fs(port);
4304 device_unregister(cm_dev->device);
4308 static int __init ib_cm_init(void)
4312 memset(&cm, 0, sizeof cm);
4313 INIT_LIST_HEAD(&cm.device_list);
4314 rwlock_init(&cm.device_lock);
4315 spin_lock_init(&cm.lock);
4316 spin_lock_init(&cm.state_lock);
4317 cm.listen_service_table = RB_ROOT;
4318 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4319 cm.remote_id_table = RB_ROOT;
4320 cm.remote_qp_table = RB_ROOT;
4321 cm.remote_sidr_table = RB_ROOT;
4322 idr_init(&cm.local_id_table);
4323 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4324 INIT_LIST_HEAD(&cm.timewait_list);
4326 ret = class_register(&cm_class);
4332 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4338 ret = ib_register_client(&cm_client);
4344 destroy_workqueue(cm.wq);
4346 class_unregister(&cm_class);
4348 idr_destroy(&cm.local_id_table);
4352 static void __exit ib_cm_cleanup(void)
4354 struct cm_timewait_info *timewait_info, *tmp;
4356 spin_lock_irq(&cm.lock);
4357 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4358 cancel_delayed_work(&timewait_info->work.work);
4359 spin_unlock_irq(&cm.lock);
4361 ib_unregister_client(&cm_client);
4362 destroy_workqueue(cm.wq);
4364 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4365 list_del(&timewait_info->list);
4366 kfree(timewait_info);
4369 class_unregister(&cm_class);
4370 idr_destroy(&cm.local_id_table);
4373 module_init(ib_cm_init);
4374 module_exit(ib_cm_cleanup);