2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/rbtree.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
49 #include <linux/sysctl.h>
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_addr.h>
53 #include <rdma/iw_portmap.h>
54 #include <rdma/rdma_netlink.h>
58 MODULE_AUTHOR("Tom Tucker");
59 MODULE_DESCRIPTION("iWARP CM");
60 MODULE_LICENSE("Dual BSD/GPL");
62 static const char * const iwcm_rej_reason_strs[] = {
63 [ECONNRESET] = "reset by remote host",
64 [ECONNREFUSED] = "refused by remote application",
65 [ETIMEDOUT] = "setup timeout",
68 const char *__attribute_const__ iwcm_reject_msg(int reason)
72 /* iWARP uses negative errnos */
75 if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
76 iwcm_rej_reason_strs[index])
77 return iwcm_rej_reason_strs[index];
79 return "unrecognized reason";
81 EXPORT_SYMBOL(iwcm_reject_msg);
83 static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
87 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
88 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
89 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
90 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb},
91 [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb}
94 static struct workqueue_struct *iwcm_wq;
96 struct work_struct work;
97 struct iwcm_id_private *cm_id;
98 struct list_head list;
99 struct iw_cm_event event;
100 struct list_head free_list;
103 static unsigned int default_backlog = 256;
105 static struct ctl_table_header *iwcm_ctl_table_hdr;
106 static struct ctl_table iwcm_ctl_table[] = {
108 .procname = "default_backlog",
109 .data = &default_backlog,
110 .maxlen = sizeof(default_backlog),
112 .proc_handler = proc_dointvec,
118 * The following services provide a mechanism for pre-allocating iwcm_work
119 * elements. The design pre-allocates them based on the cm_id type:
120 * LISTENING IDS: Get enough elements preallocated to handle the
122 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
123 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
125 * Allocating them in connect and listen avoids having to deal
126 * with allocation failures on the event upcall from the provider (which
127 * is called in the interrupt context).
129 * One exception is when creating the cm_id for incoming connection requests.
130 * There are two cases:
131 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
132 * the backlog is exceeded, then no more connection request events will
133 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
134 * to the provider to reject the connection request.
135 * 2) in the connection request workqueue handler, cm_conn_req_handler().
136 * If work elements cannot be allocated for the new connect request cm_id,
137 * then IWCM will call the provider reject method. This is ok since
138 * cm_conn_req_handler() runs in the workqueue thread context.
141 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
143 struct iwcm_work *work;
145 if (list_empty(&cm_id_priv->work_free_list))
147 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
149 list_del_init(&work->free_list);
153 static void put_work(struct iwcm_work *work)
155 list_add(&work->free_list, &work->cm_id->work_free_list);
158 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
160 struct list_head *e, *tmp;
162 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
163 kfree(list_entry(e, struct iwcm_work, free_list));
166 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
168 struct iwcm_work *work;
170 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
172 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
174 dealloc_work_entries(cm_id_priv);
177 work->cm_id = cm_id_priv;
178 INIT_LIST_HEAD(&work->list);
185 * Save private data from incoming connection requests to
186 * iw_cm_event, so the low level driver doesn't have to. Adjust
187 * the event ptr to point to the local copy.
189 static int copy_private_data(struct iw_cm_event *event)
193 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
196 event->private_data = p;
200 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
202 dealloc_work_entries(cm_id_priv);
207 * Release a reference on cm_id. If the last reference is being
208 * released, free the cm_id and return 1.
210 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
212 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
213 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
214 BUG_ON(!list_empty(&cm_id_priv->work_list));
215 free_cm_id(cm_id_priv);
222 static void add_ref(struct iw_cm_id *cm_id)
224 struct iwcm_id_private *cm_id_priv;
225 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
226 atomic_inc(&cm_id_priv->refcount);
229 static void rem_ref(struct iw_cm_id *cm_id)
231 struct iwcm_id_private *cm_id_priv;
233 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
235 (void)iwcm_deref_id(cm_id_priv);
238 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
240 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
241 iw_cm_handler cm_handler,
244 struct iwcm_id_private *cm_id_priv;
246 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
248 return ERR_PTR(-ENOMEM);
250 cm_id_priv->state = IW_CM_STATE_IDLE;
251 cm_id_priv->id.device = device;
252 cm_id_priv->id.cm_handler = cm_handler;
253 cm_id_priv->id.context = context;
254 cm_id_priv->id.event_handler = cm_event_handler;
255 cm_id_priv->id.add_ref = add_ref;
256 cm_id_priv->id.rem_ref = rem_ref;
257 spin_lock_init(&cm_id_priv->lock);
258 atomic_set(&cm_id_priv->refcount, 1);
259 init_waitqueue_head(&cm_id_priv->connect_wait);
260 init_completion(&cm_id_priv->destroy_comp);
261 INIT_LIST_HEAD(&cm_id_priv->work_list);
262 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
264 return &cm_id_priv->id;
266 EXPORT_SYMBOL(iw_create_cm_id);
269 static int iwcm_modify_qp_err(struct ib_qp *qp)
271 struct ib_qp_attr qp_attr;
276 qp_attr.qp_state = IB_QPS_ERR;
277 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
281 * This is really the RDMAC CLOSING state. It is most similar to the
284 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
286 struct ib_qp_attr qp_attr;
289 qp_attr.qp_state = IB_QPS_SQD;
290 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
296 * Block if a passive or active connection is currently being processed. Then
297 * process the event as follows:
298 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
299 * based on the abrupt flag
300 * - If the connection is already in the CLOSING or IDLE state, the peer is
301 * disconnecting concurrently with us and we've already seen the
302 * DISCONNECT event -- ignore the request and return 0
303 * - Disconnect on a listening endpoint returns -EINVAL
305 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
307 struct iwcm_id_private *cm_id_priv;
310 struct ib_qp *qp = NULL;
312 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
313 /* Wait if we're currently in a connect or accept downcall */
314 wait_event(cm_id_priv->connect_wait,
315 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
317 spin_lock_irqsave(&cm_id_priv->lock, flags);
318 switch (cm_id_priv->state) {
319 case IW_CM_STATE_ESTABLISHED:
320 cm_id_priv->state = IW_CM_STATE_CLOSING;
322 /* QP could be <nul> for user-mode client */
328 case IW_CM_STATE_LISTEN:
331 case IW_CM_STATE_CLOSING:
332 /* remote peer closed first */
333 case IW_CM_STATE_IDLE:
334 /* accept or connect returned !0 */
336 case IW_CM_STATE_CONN_RECV:
338 * App called disconnect before/without calling accept after
339 * connect_request event delivered.
342 case IW_CM_STATE_CONN_SENT:
343 /* Can only get here if wait above fails */
347 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
351 ret = iwcm_modify_qp_err(qp);
353 ret = iwcm_modify_qp_sqd(qp);
356 * If both sides are disconnecting the QP could
357 * already be in ERR or SQD states
364 EXPORT_SYMBOL(iw_cm_disconnect);
367 * CM_ID <-- DESTROYING
369 * Clean up all resources associated with the connection and release
370 * the initial reference taken by iw_create_cm_id.
372 static void destroy_cm_id(struct iw_cm_id *cm_id)
374 struct iwcm_id_private *cm_id_priv;
377 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
379 * Wait if we're currently in a connect or accept downcall. A
380 * listening endpoint should never block here.
382 wait_event(cm_id_priv->connect_wait,
383 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
386 * Since we're deleting the cm_id, drop any events that
387 * might arrive before the last dereference.
389 set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
391 spin_lock_irqsave(&cm_id_priv->lock, flags);
392 switch (cm_id_priv->state) {
393 case IW_CM_STATE_LISTEN:
394 cm_id_priv->state = IW_CM_STATE_DESTROYING;
395 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
396 /* destroy the listening endpoint */
397 cm_id->device->ops.iw_destroy_listen(cm_id);
398 spin_lock_irqsave(&cm_id_priv->lock, flags);
400 case IW_CM_STATE_ESTABLISHED:
401 cm_id_priv->state = IW_CM_STATE_DESTROYING;
402 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
403 /* Abrupt close of the connection */
404 (void)iwcm_modify_qp_err(cm_id_priv->qp);
405 spin_lock_irqsave(&cm_id_priv->lock, flags);
407 case IW_CM_STATE_IDLE:
408 case IW_CM_STATE_CLOSING:
409 cm_id_priv->state = IW_CM_STATE_DESTROYING;
411 case IW_CM_STATE_CONN_RECV:
413 * App called destroy before/without calling accept after
414 * receiving connection request event notification or
415 * returned non zero from the event callback function.
416 * In either case, must tell the provider to reject.
418 cm_id_priv->state = IW_CM_STATE_DESTROYING;
419 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
420 cm_id->device->ops.iw_reject(cm_id, NULL, 0);
421 spin_lock_irqsave(&cm_id_priv->lock, flags);
423 case IW_CM_STATE_CONN_SENT:
424 case IW_CM_STATE_DESTROYING:
429 if (cm_id_priv->qp) {
430 cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
431 cm_id_priv->qp = NULL;
433 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
436 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
437 iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
440 (void)iwcm_deref_id(cm_id_priv);
444 * This function is only called by the application thread and cannot
445 * be called by the event thread. The function will wait for all
446 * references to be released on the cm_id and then kfree the cm_id
449 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
451 destroy_cm_id(cm_id);
453 EXPORT_SYMBOL(iw_destroy_cm_id);
456 * iw_cm_check_wildcard - If IP address is 0 then use original
457 * @pm_addr: sockaddr containing the ip to check for wildcard
458 * @cm_addr: sockaddr containing the actual IP address
459 * @cm_outaddr: sockaddr to set IP addr which leaving port
461 * Checks the pm_addr for wildcard and then sets cm_outaddr's
462 * IP to the actual (cm_addr).
464 static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
465 struct sockaddr_storage *cm_addr,
466 struct sockaddr_storage *cm_outaddr)
468 if (pm_addr->ss_family == AF_INET) {
469 struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
471 if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
472 struct sockaddr_in *cm4_addr =
473 (struct sockaddr_in *)cm_addr;
474 struct sockaddr_in *cm4_outaddr =
475 (struct sockaddr_in *)cm_outaddr;
477 cm4_outaddr->sin_addr = cm4_addr->sin_addr;
480 struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
482 if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
483 struct sockaddr_in6 *cm6_addr =
484 (struct sockaddr_in6 *)cm_addr;
485 struct sockaddr_in6 *cm6_outaddr =
486 (struct sockaddr_in6 *)cm_outaddr;
488 cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
494 * iw_cm_map - Use portmapper to map the ports
495 * @cm_id: connection manager pointer
496 * @active: Indicates the active side when true
497 * returns nonzero for error only if iwpm_create_mapinfo() fails
499 * Tries to add a mapping for a port using the Portmapper. If
500 * successful in mapping the IP/Port it will check the remote
501 * mapped IP address for a wildcard IP address and replace the
502 * zero IP address with the remote_addr.
504 static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
506 const char *devname = dev_name(&cm_id->device->dev);
507 const char *ifname = cm_id->device->iw_ifname;
508 struct iwpm_dev_data pm_reg_msg = {};
509 struct iwpm_sa_data pm_msg;
512 if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
513 strlen(ifname) >= sizeof(pm_reg_msg.if_name))
516 cm_id->m_local_addr = cm_id->local_addr;
517 cm_id->m_remote_addr = cm_id->remote_addr;
519 strcpy(pm_reg_msg.dev_name, devname);
520 strcpy(pm_reg_msg.if_name, ifname);
522 if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
526 cm_id->mapped = true;
527 pm_msg.loc_addr = cm_id->local_addr;
528 pm_msg.rem_addr = cm_id->remote_addr;
529 pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
530 IWPM_FLAGS_NO_PORT_MAP : 0;
532 status = iwpm_add_and_query_mapping(&pm_msg,
535 status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
538 cm_id->m_local_addr = pm_msg.mapped_loc_addr;
540 cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
541 iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
543 &cm_id->m_remote_addr);
547 return iwpm_create_mapinfo(&cm_id->local_addr,
548 &cm_id->m_local_addr,
549 RDMA_NL_IWCM, pm_msg.flags);
555 * Start listening for connect requests. Generates one CONNECT_REQUEST
556 * event for each inbound connect request.
558 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
560 struct iwcm_id_private *cm_id_priv;
564 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
567 backlog = default_backlog;
569 ret = alloc_work_entries(cm_id_priv, backlog);
573 spin_lock_irqsave(&cm_id_priv->lock, flags);
574 switch (cm_id_priv->state) {
575 case IW_CM_STATE_IDLE:
576 cm_id_priv->state = IW_CM_STATE_LISTEN;
577 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
578 ret = iw_cm_map(cm_id, false);
580 ret = cm_id->device->ops.iw_create_listen(cm_id,
583 cm_id_priv->state = IW_CM_STATE_IDLE;
584 spin_lock_irqsave(&cm_id_priv->lock, flags);
589 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
593 EXPORT_SYMBOL(iw_cm_listen);
598 * Rejects an inbound connection request. No events are generated.
600 int iw_cm_reject(struct iw_cm_id *cm_id,
601 const void *private_data,
604 struct iwcm_id_private *cm_id_priv;
608 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
609 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
611 spin_lock_irqsave(&cm_id_priv->lock, flags);
612 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
613 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
614 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
615 wake_up_all(&cm_id_priv->connect_wait);
618 cm_id_priv->state = IW_CM_STATE_IDLE;
619 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
621 ret = cm_id->device->ops.iw_reject(cm_id, private_data,
624 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
625 wake_up_all(&cm_id_priv->connect_wait);
629 EXPORT_SYMBOL(iw_cm_reject);
632 * CM_ID <-- ESTABLISHED
634 * Accepts an inbound connection request and generates an ESTABLISHED
635 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
636 * until the ESTABLISHED event is received from the provider.
638 int iw_cm_accept(struct iw_cm_id *cm_id,
639 struct iw_cm_conn_param *iw_param)
641 struct iwcm_id_private *cm_id_priv;
646 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
647 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
649 spin_lock_irqsave(&cm_id_priv->lock, flags);
650 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
651 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
652 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
653 wake_up_all(&cm_id_priv->connect_wait);
656 /* Get the ib_qp given the QPN */
657 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
659 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
660 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
661 wake_up_all(&cm_id_priv->connect_wait);
664 cm_id->device->ops.iw_add_ref(qp);
666 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
668 ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
670 /* An error on accept precludes provider events */
671 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
672 cm_id_priv->state = IW_CM_STATE_IDLE;
673 spin_lock_irqsave(&cm_id_priv->lock, flags);
674 if (cm_id_priv->qp) {
675 cm_id->device->ops.iw_rem_ref(qp);
676 cm_id_priv->qp = NULL;
678 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
679 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
680 wake_up_all(&cm_id_priv->connect_wait);
685 EXPORT_SYMBOL(iw_cm_accept);
688 * Active Side: CM_ID <-- CONN_SENT
690 * If successful, results in the generation of a CONNECT_REPLY
691 * event. iw_cm_disconnect and iw_cm_destroy will block until the
692 * CONNECT_REPLY event is received from the provider.
694 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
696 struct iwcm_id_private *cm_id_priv;
701 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
703 ret = alloc_work_entries(cm_id_priv, 4);
707 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
708 spin_lock_irqsave(&cm_id_priv->lock, flags);
710 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
715 /* Get the ib_qp given the QPN */
716 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
721 cm_id->device->ops.iw_add_ref(qp);
723 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
724 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
726 ret = iw_cm_map(cm_id, true);
728 ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
730 return 0; /* success */
732 spin_lock_irqsave(&cm_id_priv->lock, flags);
733 if (cm_id_priv->qp) {
734 cm_id->device->ops.iw_rem_ref(qp);
735 cm_id_priv->qp = NULL;
737 cm_id_priv->state = IW_CM_STATE_IDLE;
739 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
740 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
741 wake_up_all(&cm_id_priv->connect_wait);
744 EXPORT_SYMBOL(iw_cm_connect);
747 * Passive Side: new CM_ID <-- CONN_RECV
749 * Handles an inbound connect request. The function creates a new
750 * iw_cm_id to represent the new connection and inherits the client
751 * callback function and other attributes from the listening parent.
753 * The work item contains a pointer to the listen_cm_id and the event. The
754 * listen_cm_id contains the client cm_handler, context and
755 * device. These are copied when the device is cloned. The event
756 * contains the new four tuple.
758 * An error on the child should not affect the parent, so this
759 * function does not return a value.
761 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
762 struct iw_cm_event *iw_event)
765 struct iw_cm_id *cm_id;
766 struct iwcm_id_private *cm_id_priv;
770 * The provider should never generate a connection request
771 * event with a bad status.
773 BUG_ON(iw_event->status);
775 cm_id = iw_create_cm_id(listen_id_priv->id.device,
776 listen_id_priv->id.cm_handler,
777 listen_id_priv->id.context);
778 /* If the cm_id could not be created, ignore the request */
782 cm_id->provider_data = iw_event->provider_data;
783 cm_id->m_local_addr = iw_event->local_addr;
784 cm_id->m_remote_addr = iw_event->remote_addr;
785 cm_id->local_addr = listen_id_priv->id.local_addr;
787 ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
788 &iw_event->remote_addr,
792 cm_id->remote_addr = iw_event->remote_addr;
794 iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
795 &iw_event->local_addr,
797 iw_event->local_addr = cm_id->local_addr;
798 iw_event->remote_addr = cm_id->remote_addr;
801 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
802 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
805 * We could be destroying the listening id. If so, ignore this
808 spin_lock_irqsave(&listen_id_priv->lock, flags);
809 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
810 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
811 iw_cm_reject(cm_id, NULL, 0);
812 iw_destroy_cm_id(cm_id);
815 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
817 ret = alloc_work_entries(cm_id_priv, 3);
819 iw_cm_reject(cm_id, NULL, 0);
820 iw_destroy_cm_id(cm_id);
824 /* Call the client CM handler */
825 ret = cm_id->cm_handler(cm_id, iw_event);
827 iw_cm_reject(cm_id, NULL, 0);
828 iw_destroy_cm_id(cm_id);
832 if (iw_event->private_data_len)
833 kfree(iw_event->private_data);
837 * Passive Side: CM_ID <-- ESTABLISHED
839 * The provider generated an ESTABLISHED event which means that
840 * the MPA negotion has completed successfully and we are now in MPA
843 * This event can only be received in the CONN_RECV state. If the
844 * remote peer closed, the ESTABLISHED event would be received followed
845 * by the CLOSE event. If the app closes, it will block until we wake
846 * it up after processing this event.
848 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
849 struct iw_cm_event *iw_event)
854 spin_lock_irqsave(&cm_id_priv->lock, flags);
857 * We clear the CONNECT_WAIT bit here to allow the callback
858 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
859 * from a callback handler is not allowed.
861 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
862 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
863 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
864 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
865 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
866 wake_up_all(&cm_id_priv->connect_wait);
872 * Active Side: CM_ID <-- ESTABLISHED
874 * The app has called connect and is waiting for the established event to
875 * post it's requests to the server. This event will wake up anyone
876 * blocked in iw_cm_disconnect or iw_destroy_id.
878 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
879 struct iw_cm_event *iw_event)
884 spin_lock_irqsave(&cm_id_priv->lock, flags);
886 * Clear the connect wait bit so a callback function calling
887 * iw_cm_disconnect will not wait and deadlock this thread
889 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
890 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
891 if (iw_event->status == 0) {
892 cm_id_priv->id.m_local_addr = iw_event->local_addr;
893 cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
894 iw_event->local_addr = cm_id_priv->id.local_addr;
895 iw_event->remote_addr = cm_id_priv->id.remote_addr;
896 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
898 /* REJECTED or RESET */
899 cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
900 cm_id_priv->qp = NULL;
901 cm_id_priv->state = IW_CM_STATE_IDLE;
903 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
904 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
906 if (iw_event->private_data_len)
907 kfree(iw_event->private_data);
909 /* Wake up waiters on connect complete */
910 wake_up_all(&cm_id_priv->connect_wait);
918 * If in the ESTABLISHED state, move to CLOSING.
920 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
921 struct iw_cm_event *iw_event)
925 spin_lock_irqsave(&cm_id_priv->lock, flags);
926 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
927 cm_id_priv->state = IW_CM_STATE_CLOSING;
928 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
934 * If in the ESTBLISHED or CLOSING states, the QP will have have been
935 * moved by the provider to the ERR state. Disassociate the CM_ID from
936 * the QP, move to IDLE, and remove the 'connected' reference.
938 * If in some other state, the cm_id was destroyed asynchronously.
939 * This is the last reference that will result in waking up
940 * the app thread blocked in iw_destroy_cm_id.
942 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
943 struct iw_cm_event *iw_event)
947 spin_lock_irqsave(&cm_id_priv->lock, flags);
949 if (cm_id_priv->qp) {
950 cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
951 cm_id_priv->qp = NULL;
953 switch (cm_id_priv->state) {
954 case IW_CM_STATE_ESTABLISHED:
955 case IW_CM_STATE_CLOSING:
956 cm_id_priv->state = IW_CM_STATE_IDLE;
957 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
958 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
959 spin_lock_irqsave(&cm_id_priv->lock, flags);
961 case IW_CM_STATE_DESTROYING:
966 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
971 static int process_event(struct iwcm_id_private *cm_id_priv,
972 struct iw_cm_event *iw_event)
976 switch (iw_event->event) {
977 case IW_CM_EVENT_CONNECT_REQUEST:
978 cm_conn_req_handler(cm_id_priv, iw_event);
980 case IW_CM_EVENT_CONNECT_REPLY:
981 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
983 case IW_CM_EVENT_ESTABLISHED:
984 ret = cm_conn_est_handler(cm_id_priv, iw_event);
986 case IW_CM_EVENT_DISCONNECT:
987 cm_disconnect_handler(cm_id_priv, iw_event);
989 case IW_CM_EVENT_CLOSE:
990 ret = cm_close_handler(cm_id_priv, iw_event);
1000 * Process events on the work_list for the cm_id. If the callback
1001 * function requests that the cm_id be deleted, a flag is set in the
1002 * cm_id flags to indicate that when the last reference is
1003 * removed, the cm_id is to be destroyed. This is necessary to
1004 * distinguish between an object that will be destroyed by the app
1005 * thread asleep on the destroy_comp list vs. an object destroyed
1006 * here synchronously when the last reference is removed.
1008 static void cm_work_handler(struct work_struct *_work)
1010 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
1011 struct iw_cm_event levent;
1012 struct iwcm_id_private *cm_id_priv = work->cm_id;
1013 unsigned long flags;
1017 spin_lock_irqsave(&cm_id_priv->lock, flags);
1018 empty = list_empty(&cm_id_priv->work_list);
1020 work = list_entry(cm_id_priv->work_list.next,
1021 struct iwcm_work, list);
1022 list_del_init(&work->list);
1023 empty = list_empty(&cm_id_priv->work_list);
1024 levent = work->event;
1026 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1028 if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
1029 ret = process_event(cm_id_priv, &levent);
1031 destroy_cm_id(&cm_id_priv->id);
1033 pr_debug("dropping event %d\n", levent.event);
1034 if (iwcm_deref_id(cm_id_priv))
1038 spin_lock_irqsave(&cm_id_priv->lock, flags);
1040 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1044 * This function is called on interrupt context. Schedule events on
1045 * the iwcm_wq thread to allow callback functions to downcall into
1046 * the CM and/or block. Events are queued to a per-CM_ID
1047 * work_list. If this is the first event on the work_list, the work
1048 * element is also queued on the iwcm_wq thread.
1050 * Each event holds a reference on the cm_id. Until the last posted
1051 * event has been delivered and processed, the cm_id cannot be
1055 * 0 - the event was handled.
1056 * -ENOMEM - the event was not handled due to lack of resources.
1058 static int cm_event_handler(struct iw_cm_id *cm_id,
1059 struct iw_cm_event *iw_event)
1061 struct iwcm_work *work;
1062 struct iwcm_id_private *cm_id_priv;
1063 unsigned long flags;
1066 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1068 spin_lock_irqsave(&cm_id_priv->lock, flags);
1069 work = get_work(cm_id_priv);
1075 INIT_WORK(&work->work, cm_work_handler);
1076 work->cm_id = cm_id_priv;
1077 work->event = *iw_event;
1079 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
1080 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
1081 work->event.private_data_len) {
1082 ret = copy_private_data(&work->event);
1089 atomic_inc(&cm_id_priv->refcount);
1090 if (list_empty(&cm_id_priv->work_list)) {
1091 list_add_tail(&work->list, &cm_id_priv->work_list);
1092 queue_work(iwcm_wq, &work->work);
1094 list_add_tail(&work->list, &cm_id_priv->work_list);
1096 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1100 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
1101 struct ib_qp_attr *qp_attr,
1104 unsigned long flags;
1107 spin_lock_irqsave(&cm_id_priv->lock, flags);
1108 switch (cm_id_priv->state) {
1109 case IW_CM_STATE_IDLE:
1110 case IW_CM_STATE_CONN_SENT:
1111 case IW_CM_STATE_CONN_RECV:
1112 case IW_CM_STATE_ESTABLISHED:
1113 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1114 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
1115 IB_ACCESS_REMOTE_READ;
1122 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1126 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
1127 struct ib_qp_attr *qp_attr,
1130 unsigned long flags;
1133 spin_lock_irqsave(&cm_id_priv->lock, flags);
1134 switch (cm_id_priv->state) {
1135 case IW_CM_STATE_IDLE:
1136 case IW_CM_STATE_CONN_SENT:
1137 case IW_CM_STATE_CONN_RECV:
1138 case IW_CM_STATE_ESTABLISHED:
1146 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1150 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1151 struct ib_qp_attr *qp_attr,
1154 struct iwcm_id_private *cm_id_priv;
1157 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1158 switch (qp_attr->qp_state) {
1161 ret = iwcm_init_qp_init_attr(cm_id_priv,
1162 qp_attr, qp_attr_mask);
1165 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1166 qp_attr, qp_attr_mask);
1174 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1176 static int __init iw_cm_init(void)
1180 ret = iwpm_init(RDMA_NL_IWCM);
1182 pr_err("iw_cm: couldn't init iwpm\n");
1184 rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
1185 iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
1189 iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1191 if (!iwcm_ctl_table_hdr) {
1192 pr_err("iw_cm: couldn't register sysctl paths\n");
1193 destroy_workqueue(iwcm_wq);
1200 static void __exit iw_cm_cleanup(void)
1202 unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1203 destroy_workqueue(iwcm_wq);
1204 rdma_nl_unregister(RDMA_NL_IWCM);
1205 iwpm_exit(RDMA_NL_IWCM);
1208 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
1210 module_init(iw_cm_init);
1211 module_exit(iw_cm_cleanup);