1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
20 #include <rdma/ib_verbs.h>
21 #include <rdma/ib_cache.h>
30 #include "smc_close.h"
33 #define SMC_LGR_NUM_INCR 256
34 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
35 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
36 #define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
38 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
39 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
40 .list = LIST_HEAD_INIT(smc_lgr_list.list),
44 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
48 struct smc_buf_desc *buf_desc);
50 /* return head of link group list and its lock for a given link group */
51 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
52 spinlock_t **lgr_lock)
55 *lgr_lock = &lgr->smcd->lgr_lock;
56 return &lgr->smcd->lgr_list;
59 *lgr_lock = &smc_lgr_list.lock;
60 return &smc_lgr_list.list;
63 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
65 /* client link group creation always follows the server link group
66 * creation. For client use a somewhat higher removal delay time,
67 * otherwise there is a risk of out-of-sync link groups.
69 if (!lgr->freeing && !lgr->freefast) {
70 mod_delayed_work(system_wq, &lgr->free_work,
71 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
72 SMC_LGR_FREE_DELAY_CLNT :
73 SMC_LGR_FREE_DELAY_SERV);
77 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
79 if (!lgr->freeing && !lgr->freefast) {
81 mod_delayed_work(system_wq, &lgr->free_work,
82 SMC_LGR_FREE_DELAY_FAST);
86 /* Register connection's alert token in our lookup structure.
87 * To use rbtrees we have to implement our own insert core.
88 * Requires @conns_lock
89 * @smc connection to register
90 * Returns 0 on success, != otherwise.
92 static void smc_lgr_add_alert_token(struct smc_connection *conn)
94 struct rb_node **link, *parent = NULL;
95 u32 token = conn->alert_token_local;
97 link = &conn->lgr->conns_all.rb_node;
99 struct smc_connection *cur = rb_entry(*link,
100 struct smc_connection, alert_node);
103 if (cur->alert_token_local > token)
104 link = &parent->rb_left;
106 link = &parent->rb_right;
108 /* Put the new node there */
109 rb_link_node(&conn->alert_node, parent, link);
110 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
113 /* Register connection in link group by assigning an alert token
114 * registered in a search tree.
115 * Requires @conns_lock
116 * Note that '0' is a reserved value and not assigned.
118 static void smc_lgr_register_conn(struct smc_connection *conn)
120 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
121 static atomic_t nexttoken = ATOMIC_INIT(0);
123 /* find a new alert_token_local value not yet used by some connection
126 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
127 while (!conn->alert_token_local) {
128 conn->alert_token_local = atomic_inc_return(&nexttoken);
129 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
130 conn->alert_token_local = 0;
132 smc_lgr_add_alert_token(conn);
133 conn->lgr->conns_num++;
136 /* Unregister connection and reset the alert token of the given connection<
138 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
140 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
141 struct smc_link_group *lgr = conn->lgr;
143 rb_erase(&conn->alert_node, &lgr->conns_all);
145 conn->alert_token_local = 0;
146 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
149 /* Unregister connection from lgr
151 static void smc_lgr_unregister_conn(struct smc_connection *conn)
153 struct smc_link_group *lgr = conn->lgr;
157 write_lock_bh(&lgr->conns_lock);
158 if (conn->alert_token_local) {
159 __smc_lgr_unregister_conn(conn);
161 write_unlock_bh(&lgr->conns_lock);
165 /* Send delete link, either as client to request the initiation
166 * of the DELETE LINK sequence from server; or as server to
167 * initiate the delete processing. See smc_llc_rx_delete_link().
169 static int smc_link_send_delete(struct smc_link *lnk, bool orderly)
171 if (lnk->state == SMC_LNK_ACTIVE &&
172 !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
173 smc_llc_link_deleting(lnk);
179 static void smc_lgr_free(struct smc_link_group *lgr);
181 static void smc_lgr_free_work(struct work_struct *work)
183 struct smc_link_group *lgr = container_of(to_delayed_work(work),
184 struct smc_link_group,
186 spinlock_t *lgr_lock;
187 struct smc_link *lnk;
190 smc_lgr_list_head(lgr, &lgr_lock);
191 spin_lock_bh(lgr_lock);
193 spin_unlock_bh(lgr_lock);
196 read_lock_bh(&lgr->conns_lock);
197 conns = RB_EMPTY_ROOT(&lgr->conns_all);
198 read_unlock_bh(&lgr->conns_lock);
199 if (!conns) { /* number of lgr connections is no longer zero */
200 spin_unlock_bh(lgr_lock);
203 list_del_init(&lgr->list); /* remove from smc_lgr_list */
205 lnk = &lgr->lnk[SMC_SINGLE_LINK];
206 if (!lgr->is_smcd && !lgr->terminating) {
207 /* try to send del link msg, on error free lgr immediately */
208 if (lnk->state == SMC_LNK_ACTIVE &&
209 !smc_link_send_delete(lnk, true)) {
210 /* reschedule in case we never receive a response */
211 smc_lgr_schedule_free_work(lgr);
212 spin_unlock_bh(lgr_lock);
216 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
217 spin_unlock_bh(lgr_lock);
218 cancel_delayed_work(&lgr->free_work);
220 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
221 smc_llc_link_inactive(lnk);
222 if (lgr->is_smcd && !lgr->terminating)
223 smc_ism_signal_shutdown(lgr);
227 static void smc_lgr_terminate_work(struct work_struct *work)
229 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
232 smc_lgr_terminate(lgr, true);
235 /* create a new SMC link group */
236 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
238 struct smc_link_group *lgr;
239 struct list_head *lgr_list;
240 struct smc_link *lnk;
241 spinlock_t *lgr_lock;
246 if (ini->is_smcd && ini->vlan_id) {
247 if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
248 rc = SMC_CLC_DECL_ISMVLANERR;
253 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
255 rc = SMC_CLC_DECL_MEM;
258 lgr->is_smcd = ini->is_smcd;
260 lgr->terminating = 0;
263 lgr->vlan_id = ini->vlan_id;
264 rwlock_init(&lgr->sndbufs_lock);
265 rwlock_init(&lgr->rmbs_lock);
266 rwlock_init(&lgr->conns_lock);
267 for (i = 0; i < SMC_RMBE_SIZES; i++) {
268 INIT_LIST_HEAD(&lgr->sndbufs[i]);
269 INIT_LIST_HEAD(&lgr->rmbs[i]);
271 smc_lgr_list.num += SMC_LGR_NUM_INCR;
272 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
273 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
274 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
275 lgr->conns_all = RB_ROOT;
277 /* SMC-D specific settings */
278 get_device(&ini->ism_dev->dev);
279 lgr->peer_gid = ini->ism_gid;
280 lgr->smcd = ini->ism_dev;
281 lgr_list = &ini->ism_dev->lgr_list;
282 lgr_lock = &lgr->smcd->lgr_lock;
283 lgr->peer_shutdown = 0;
284 atomic_inc(&ini->ism_dev->lgr_cnt);
286 /* SMC-R specific settings */
287 get_device(&ini->ib_dev->ibdev->dev);
288 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
289 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
292 lnk = &lgr->lnk[SMC_SINGLE_LINK];
293 /* initialize link */
294 lnk->state = SMC_LNK_ACTIVATING;
295 lnk->link_id = SMC_SINGLE_LINK;
296 lnk->smcibdev = ini->ib_dev;
297 lnk->ibport = ini->ib_port;
298 lgr_list = &smc_lgr_list.list;
299 lgr_lock = &smc_lgr_list.lock;
301 ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
302 if (!ini->ib_dev->initialized)
303 smc_ib_setup_per_ibdev(ini->ib_dev);
304 get_random_bytes(rndvec, sizeof(rndvec));
305 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
307 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
308 ini->vlan_id, lnk->gid,
312 rc = smc_llc_link_init(lnk);
315 rc = smc_wr_alloc_link_mem(lnk);
318 rc = smc_ib_create_protection_domain(lnk);
321 rc = smc_ib_create_queue_pair(lnk);
324 rc = smc_wr_create_link(lnk);
327 atomic_inc(&lgr_cnt);
328 atomic_inc(&ini->ib_dev->lnk_cnt);
331 spin_lock_bh(lgr_lock);
332 list_add(&lgr->list, lgr_list);
333 spin_unlock_bh(lgr_lock);
337 smc_ib_destroy_queue_pair(lnk);
339 smc_ib_dealloc_protection_domain(lnk);
341 smc_wr_free_link_mem(lnk);
343 smc_llc_link_clear(lnk);
347 if (ini->is_smcd && ini->vlan_id)
348 smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
352 rc = SMC_CLC_DECL_MEM;
354 rc = SMC_CLC_DECL_INTERR;
359 static void smc_buf_unuse(struct smc_connection *conn,
360 struct smc_link_group *lgr)
362 if (conn->sndbuf_desc)
363 conn->sndbuf_desc->used = 0;
364 if (conn->rmb_desc) {
365 if (!conn->rmb_desc->regerr) {
366 if (!lgr->is_smcd && !list_empty(&lgr->list)) {
367 /* unregister rmb with peer */
368 smc_llc_do_delete_rkey(
369 &lgr->lnk[SMC_SINGLE_LINK],
372 conn->rmb_desc->used = 0;
374 /* buf registration failed, reuse not possible */
375 write_lock_bh(&lgr->rmbs_lock);
376 list_del(&conn->rmb_desc->list);
377 write_unlock_bh(&lgr->rmbs_lock);
379 smc_buf_free(lgr, true, conn->rmb_desc);
384 /* remove a finished connection from its link group */
385 void smc_conn_free(struct smc_connection *conn)
387 struct smc_link_group *lgr = conn->lgr;
392 if (!list_empty(&lgr->list))
393 smc_ism_unset_conn(conn);
394 tasklet_kill(&conn->rx_tsklet);
396 smc_cdc_tx_dismiss_slots(conn);
398 if (!list_empty(&lgr->list)) {
399 smc_lgr_unregister_conn(conn);
400 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
404 smc_lgr_schedule_free_work(lgr);
407 static void smc_link_clear(struct smc_link *lnk)
410 smc_llc_link_clear(lnk);
411 smc_ib_modify_qp_reset(lnk);
412 smc_wr_free_link(lnk);
413 smc_ib_destroy_queue_pair(lnk);
414 smc_ib_dealloc_protection_domain(lnk);
415 smc_wr_free_link_mem(lnk);
416 if (!atomic_dec_return(&lnk->smcibdev->lnk_cnt))
417 wake_up(&lnk->smcibdev->lnks_deleted);
420 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
421 struct smc_buf_desc *buf_desc)
423 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
426 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
427 smc_ib_put_memory_region(
428 buf_desc->mr_rx[SMC_SINGLE_LINK]);
429 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
432 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
435 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
437 __free_pages(buf_desc->pages, buf_desc->order);
441 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
442 struct smc_buf_desc *buf_desc)
445 /* restore original buf len */
446 buf_desc->len += sizeof(struct smcd_cdc_msg);
447 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
449 kfree(buf_desc->cpu_addr);
454 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
455 struct smc_buf_desc *buf_desc)
458 smcd_buf_free(lgr, is_rmb, buf_desc);
460 smcr_buf_free(lgr, is_rmb, buf_desc);
463 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
465 struct smc_buf_desc *buf_desc, *bf_desc;
466 struct list_head *buf_list;
469 for (i = 0; i < SMC_RMBE_SIZES; i++) {
471 buf_list = &lgr->rmbs[i];
473 buf_list = &lgr->sndbufs[i];
474 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
476 list_del(&buf_desc->list);
477 smc_buf_free(lgr, is_rmb, buf_desc);
482 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
484 /* free send buffers */
485 __smc_lgr_free_bufs(lgr, false);
487 __smc_lgr_free_bufs(lgr, true);
490 /* remove a link group */
491 static void smc_lgr_free(struct smc_link_group *lgr)
493 smc_lgr_free_bufs(lgr);
495 if (!lgr->terminating) {
496 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
497 put_device(&lgr->smcd->dev);
499 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
500 wake_up(&lgr->smcd->lgrs_deleted);
502 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
503 put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
504 if (!atomic_dec_return(&lgr_cnt))
505 wake_up(&lgrs_deleted);
510 void smc_lgr_forget(struct smc_link_group *lgr)
512 struct list_head *lgr_list;
513 spinlock_t *lgr_lock;
515 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
516 spin_lock_bh(lgr_lock);
517 /* do not use this link group for new connections */
518 if (!list_empty(lgr_list))
519 list_del_init(lgr_list);
520 spin_unlock_bh(lgr_lock);
523 static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
527 for (i = 0; i < SMC_RMBE_SIZES; i++) {
528 struct smc_buf_desc *buf_desc;
530 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
531 buf_desc->len += sizeof(struct smcd_cdc_msg);
532 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
537 static void smc_sk_wake_ups(struct smc_sock *smc)
539 smc->sk.sk_write_space(&smc->sk);
540 smc->sk.sk_data_ready(&smc->sk);
541 smc->sk.sk_state_change(&smc->sk);
544 /* kill a connection */
545 static void smc_conn_kill(struct smc_connection *conn, bool soft)
547 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
549 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
550 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
552 smc_close_abort(conn);
554 smc->sk.sk_err = ECONNABORTED;
555 smc_sk_wake_ups(smc);
556 if (conn->lgr->is_smcd) {
557 smc_ism_unset_conn(conn);
559 tasklet_kill(&conn->rx_tsklet);
561 tasklet_unlock_wait(&conn->rx_tsklet);
563 smc_cdc_tx_dismiss_slots(conn);
565 smc_lgr_unregister_conn(conn);
566 smc_close_active_abort(smc);
569 static void smc_lgr_cleanup(struct smc_link_group *lgr)
572 smc_ism_signal_shutdown(lgr);
573 smcd_unregister_all_dmbs(lgr);
574 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
575 put_device(&lgr->smcd->dev);
577 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
579 wake_up(&lnk->wr_reg_wait);
580 if (lnk->state != SMC_LNK_INACTIVE) {
581 smc_link_send_delete(lnk, false);
582 smc_llc_link_inactive(lnk);
587 /* terminate link group */
588 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
590 struct smc_connection *conn;
591 struct smc_sock *smc;
592 struct rb_node *node;
594 if (lgr->terminating)
595 return; /* lgr already terminating */
597 cancel_delayed_work_sync(&lgr->free_work);
598 lgr->terminating = 1;
600 smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
602 /* kill remaining link group connections */
603 read_lock_bh(&lgr->conns_lock);
604 node = rb_first(&lgr->conns_all);
606 read_unlock_bh(&lgr->conns_lock);
607 conn = rb_entry(node, struct smc_connection, alert_node);
608 smc = container_of(conn, struct smc_sock, conn);
609 sock_hold(&smc->sk); /* sock_put below */
611 smc_conn_kill(conn, soft);
612 release_sock(&smc->sk);
613 sock_put(&smc->sk); /* sock_hold above */
614 read_lock_bh(&lgr->conns_lock);
615 node = rb_first(&lgr->conns_all);
617 read_unlock_bh(&lgr->conns_lock);
618 smc_lgr_cleanup(lgr);
620 smc_lgr_schedule_free_work_fast(lgr);
625 /* unlink and terminate link group
626 * @soft: true if link group shutdown can take its time
627 * false if immediate link group shutdown is required
629 void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
631 spinlock_t *lgr_lock;
633 smc_lgr_list_head(lgr, &lgr_lock);
634 spin_lock_bh(lgr_lock);
635 if (lgr->terminating) {
636 spin_unlock_bh(lgr_lock);
637 return; /* lgr already terminating */
641 list_del_init(&lgr->list);
642 spin_unlock_bh(lgr_lock);
643 __smc_lgr_terminate(lgr, soft);
646 /* Called when IB port is terminated */
647 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
649 struct smc_link_group *lgr, *l;
650 LIST_HEAD(lgr_free_list);
652 spin_lock_bh(&smc_lgr_list.lock);
653 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
655 lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
656 lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
657 list_move(&lgr->list, &lgr_free_list);
661 spin_unlock_bh(&smc_lgr_list.lock);
663 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
664 list_del_init(&lgr->list);
665 __smc_lgr_terminate(lgr, false);
669 /* Called when peer lgr shutdown (regularly or abnormally) is received */
670 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
672 struct smc_link_group *lgr, *l;
673 LIST_HEAD(lgr_free_list);
675 /* run common cleanup function and build free list */
676 spin_lock_bh(&dev->lgr_lock);
677 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
678 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
679 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
680 if (peer_gid) /* peer triggered termination */
681 lgr->peer_shutdown = 1;
682 list_move(&lgr->list, &lgr_free_list);
685 spin_unlock_bh(&dev->lgr_lock);
687 /* cancel the regular free workers and actually free lgrs */
688 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
689 list_del_init(&lgr->list);
690 schedule_work(&lgr->terminate_work);
694 /* Called when an SMCD device is removed or the smc module is unloaded */
695 void smc_smcd_terminate_all(struct smcd_dev *smcd)
697 struct smc_link_group *lgr, *lg;
698 LIST_HEAD(lgr_free_list);
700 spin_lock_bh(&smcd->lgr_lock);
701 list_splice_init(&smcd->lgr_list, &lgr_free_list);
702 list_for_each_entry(lgr, &lgr_free_list, list)
704 spin_unlock_bh(&smcd->lgr_lock);
706 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
707 list_del_init(&lgr->list);
708 __smc_lgr_terminate(lgr, false);
711 if (atomic_read(&smcd->lgr_cnt))
712 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
715 /* Called when an SMCR device is removed or the smc module is unloaded.
716 * If smcibdev is given, all SMCR link groups using this device are terminated.
717 * If smcibdev is NULL, all SMCR link groups are terminated.
719 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
721 struct smc_link_group *lgr, *lg;
722 LIST_HEAD(lgr_free_list);
724 spin_lock_bh(&smc_lgr_list.lock);
726 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
727 list_for_each_entry(lgr, &lgr_free_list, list)
730 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
731 if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
732 list_move(&lgr->list, &lgr_free_list);
737 spin_unlock_bh(&smc_lgr_list.lock);
739 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
740 list_del_init(&lgr->list);
741 __smc_lgr_terminate(lgr, false);
745 if (atomic_read(&smcibdev->lnk_cnt))
746 wait_event(smcibdev->lnks_deleted,
747 !atomic_read(&smcibdev->lnk_cnt));
749 if (atomic_read(&lgr_cnt))
750 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
754 /* Determine vlan of internal TCP socket.
755 * @vlan_id: address to store the determined vlan id into
757 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
759 struct dst_entry *dst = sk_dst_get(clcsock->sk);
760 struct net_device *ndev;
761 int i, nest_lvl, rc = 0;
774 if (is_vlan_dev(ndev)) {
775 ini->vlan_id = vlan_dev_vlan_id(ndev);
780 nest_lvl = ndev->lower_level;
781 for (i = 0; i < nest_lvl; i++) {
782 struct list_head *lower = &ndev->adj_list.lower;
784 if (list_empty(lower))
787 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
788 if (is_vlan_dev(ndev)) {
789 ini->vlan_id = vlan_dev_vlan_id(ndev);
801 static bool smcr_lgr_match(struct smc_link_group *lgr,
802 struct smc_clc_msg_local *lcl,
803 enum smc_lgr_role role, u32 clcqpn)
805 return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
807 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
809 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
812 (lgr->role == SMC_SERV ||
813 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
816 static bool smcd_lgr_match(struct smc_link_group *lgr,
817 struct smcd_dev *smcismdev, u64 peer_gid)
819 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
822 /* create a new SMC connection (and a new link group if necessary) */
823 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
825 struct smc_connection *conn = &smc->conn;
826 struct list_head *lgr_list;
827 struct smc_link_group *lgr;
828 enum smc_lgr_role role;
829 spinlock_t *lgr_lock;
832 lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
833 lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
834 ini->cln_first_contact = SMC_FIRST_CONTACT;
835 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
836 if (role == SMC_CLNT && ini->srv_first_contact)
837 /* create new link group as well */
840 /* determine if an existing link group can be reused */
841 spin_lock_bh(lgr_lock);
842 list_for_each_entry(lgr, lgr_list, list) {
843 write_lock_bh(&lgr->conns_lock);
845 smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
846 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
848 lgr->vlan_id == ini->vlan_id &&
850 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
851 /* link group found */
852 ini->cln_first_contact = SMC_REUSE_CONTACT;
854 smc_lgr_register_conn(conn); /* add smc conn to lgr */
855 if (delayed_work_pending(&lgr->free_work))
856 cancel_delayed_work(&lgr->free_work);
857 write_unlock_bh(&lgr->conns_lock);
860 write_unlock_bh(&lgr->conns_lock);
862 spin_unlock_bh(lgr_lock);
864 if (role == SMC_CLNT && !ini->srv_first_contact &&
865 ini->cln_first_contact == SMC_FIRST_CONTACT) {
866 /* Server reuses a link group, but Client wants to start
868 * send out_of_sync decline, reason synchr. error
870 return SMC_CLC_DECL_SYNCERR;
874 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
875 rc = smc_lgr_create(smc, ini);
879 write_lock_bh(&lgr->conns_lock);
880 smc_lgr_register_conn(conn); /* add smc conn to lgr */
881 write_unlock_bh(&lgr->conns_lock);
883 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
884 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
885 conn->urg_state = SMC_URG_READ;
887 conn->rx_off = sizeof(struct smcd_cdc_msg);
888 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
890 #ifndef KERNEL_HAS_ATOMIC64
891 spin_lock_init(&conn->acurs_lock);
898 /* convert the RMB size into the compressed notation - minimum 16K.
899 * In contrast to plain ilog2, this rounds towards the next power of 2,
900 * so the socket application gets at least its desired sndbuf / rcvbuf size.
902 static u8 smc_compress_bufsize(int size)
906 if (size <= SMC_BUF_MIN_SIZE)
909 size = (size - 1) >> 14;
910 compressed = ilog2(size) + 1;
911 if (compressed >= SMC_RMBE_SIZES)
912 compressed = SMC_RMBE_SIZES - 1;
916 /* convert the RMB size from compressed notation into integer */
917 int smc_uncompress_bufsize(u8 compressed)
921 size = 0x00000001 << (((int)compressed) + 14);
925 /* try to reuse a sndbuf or rmb description slot for a certain
926 * buffer size; if not available, return NULL
928 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
930 struct list_head *buf_list)
932 struct smc_buf_desc *buf_slot;
935 list_for_each_entry(buf_slot, buf_list, list) {
936 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
937 read_unlock_bh(lock);
941 read_unlock_bh(lock);
945 /* one of the conditions for announcing a receiver's current window size is
946 * that it "results in a minimum increase in the window size of 10% of the
947 * receive buffer space" [RFC7609]
949 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
951 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
954 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
955 bool is_rmb, int bufsize)
957 struct smc_buf_desc *buf_desc;
958 struct smc_link *lnk;
961 /* try to alloc a new buffer */
962 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
964 return ERR_PTR(-ENOMEM);
966 buf_desc->order = get_order(bufsize);
967 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
968 __GFP_NOMEMALLOC | __GFP_COMP |
969 __GFP_NORETRY | __GFP_ZERO,
971 if (!buf_desc->pages) {
973 return ERR_PTR(-EAGAIN);
975 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
977 /* build the sg table from the pages */
978 lnk = &lgr->lnk[SMC_SINGLE_LINK];
979 rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
982 smc_buf_free(lgr, is_rmb, buf_desc);
985 sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
986 buf_desc->cpu_addr, bufsize);
988 /* map sg table to DMA address */
989 rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
990 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
991 /* SMC protocol depends on mapping to one DMA address only */
993 smc_buf_free(lgr, is_rmb, buf_desc);
994 return ERR_PTR(-EAGAIN);
997 /* create a new memory region for the RMB */
999 rc = smc_ib_get_memory_region(lnk->roce_pd,
1000 IB_ACCESS_REMOTE_WRITE |
1001 IB_ACCESS_LOCAL_WRITE,
1004 smc_buf_free(lgr, is_rmb, buf_desc);
1009 buf_desc->len = bufsize;
1013 #define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1015 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1016 bool is_dmb, int bufsize)
1018 struct smc_buf_desc *buf_desc;
1021 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
1022 return ERR_PTR(-EAGAIN);
1024 /* try to alloc a new DMB */
1025 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1027 return ERR_PTR(-ENOMEM);
1029 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
1032 return ERR_PTR(-EAGAIN);
1034 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
1035 /* CDC header stored in buf. So, pretend it was smaller */
1036 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
1038 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
1039 __GFP_NOWARN | __GFP_NORETRY |
1041 if (!buf_desc->cpu_addr) {
1043 return ERR_PTR(-EAGAIN);
1045 buf_desc->len = bufsize;
1050 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
1052 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
1053 struct smc_connection *conn = &smc->conn;
1054 struct smc_link_group *lgr = conn->lgr;
1055 struct list_head *buf_list;
1056 int bufsize, bufsize_short;
1061 /* use socket recv buffer size (w/o overhead) as start value */
1062 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1064 /* use socket send buffer size (w/o overhead) as start value */
1065 sk_buf_size = smc->sk.sk_sndbuf / 2;
1067 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1068 bufsize_short >= 0; bufsize_short--) {
1071 lock = &lgr->rmbs_lock;
1072 buf_list = &lgr->rmbs[bufsize_short];
1074 lock = &lgr->sndbufs_lock;
1075 buf_list = &lgr->sndbufs[bufsize_short];
1077 bufsize = smc_uncompress_bufsize(bufsize_short);
1078 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1081 /* check for reusable slot in the link group */
1082 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1084 memset(buf_desc->cpu_addr, 0, bufsize);
1085 break; /* found reusable slot */
1089 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1091 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1093 if (PTR_ERR(buf_desc) == -ENOMEM)
1095 if (IS_ERR(buf_desc))
1099 write_lock_bh(lock);
1100 list_add(&buf_desc->list, buf_list);
1101 write_unlock_bh(lock);
1105 if (IS_ERR(buf_desc))
1109 conn->rmb_desc = buf_desc;
1110 conn->rmbe_size_short = bufsize_short;
1111 smc->sk.sk_rcvbuf = bufsize * 2;
1112 atomic_set(&conn->bytes_to_rcv, 0);
1113 conn->rmbe_update_limit =
1114 smc_rmb_wnd_update_limit(buf_desc->len);
1116 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
1118 conn->sndbuf_desc = buf_desc;
1119 smc->sk.sk_sndbuf = bufsize * 2;
1120 atomic_set(&conn->sndbuf_space, bufsize);
1125 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1127 struct smc_link_group *lgr = conn->lgr;
1129 if (!conn->lgr || conn->lgr->is_smcd)
1131 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1132 conn->sndbuf_desc, DMA_TO_DEVICE);
1135 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1137 struct smc_link_group *lgr = conn->lgr;
1139 if (!conn->lgr || conn->lgr->is_smcd)
1141 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1142 conn->sndbuf_desc, DMA_TO_DEVICE);
1145 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1147 struct smc_link_group *lgr = conn->lgr;
1149 if (!conn->lgr || conn->lgr->is_smcd)
1151 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1152 conn->rmb_desc, DMA_FROM_DEVICE);
1155 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1157 struct smc_link_group *lgr = conn->lgr;
1159 if (!conn->lgr || conn->lgr->is_smcd)
1161 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1162 conn->rmb_desc, DMA_FROM_DEVICE);
1165 /* create the send and receive buffer for an SMC socket;
1166 * receive buffers are called RMBs;
1167 * (even though the SMC protocol allows more than one RMB-element per RMB,
1168 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1169 * extra RMB for every connection in a link group
1171 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1175 /* create send buffer */
1176 rc = __smc_buf_create(smc, is_smcd, false);
1180 rc = __smc_buf_create(smc, is_smcd, true);
1182 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1186 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1190 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1191 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1197 /* add a new rtoken from peer */
1198 int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1200 u64 dma_addr = be64_to_cpu(nw_vaddr);
1201 u32 rkey = ntohl(nw_rkey);
1204 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1205 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
1206 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1207 test_bit(i, lgr->rtokens_used_mask)) {
1208 /* already in list */
1212 i = smc_rmb_reserve_rtoken_idx(lgr);
1215 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
1216 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1220 /* delete an rtoken */
1221 int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1223 u32 rkey = ntohl(nw_rkey);
1226 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1227 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1228 test_bit(i, lgr->rtokens_used_mask)) {
1229 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
1230 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
1232 clear_bit(i, lgr->rtokens_used_mask);
1239 /* save rkey and dma_addr received from peer during clc handshake */
1240 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1241 struct smc_clc_msg_accept_confirm *clc)
1243 conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1245 if (conn->rtoken_idx < 0)
1246 return conn->rtoken_idx;
1250 static void smc_core_going_away(void)
1252 struct smc_ib_device *smcibdev;
1253 struct smcd_dev *smcd;
1255 spin_lock(&smc_ib_devices.lock);
1256 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1259 for (i = 0; i < SMC_MAX_PORTS; i++)
1260 set_bit(i, smcibdev->ports_going_away);
1262 spin_unlock(&smc_ib_devices.lock);
1264 spin_lock(&smcd_dev_list.lock);
1265 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1266 smcd->going_away = 1;
1268 spin_unlock(&smcd_dev_list.lock);
1271 /* Clean up all SMC link groups */
1272 static void smc_lgrs_shutdown(void)
1274 struct smcd_dev *smcd;
1276 smc_core_going_away();
1278 smc_smcr_terminate_all(NULL);
1280 spin_lock(&smcd_dev_list.lock);
1281 list_for_each_entry(smcd, &smcd_dev_list.list, list)
1282 smc_smcd_terminate_all(smcd);
1283 spin_unlock(&smcd_dev_list.lock);
1286 static int smc_core_reboot_event(struct notifier_block *this,
1287 unsigned long event, void *ptr)
1289 smc_lgrs_shutdown();
1290 smc_ib_unregister_client();
1294 static struct notifier_block smc_reboot_notifier = {
1295 .notifier_call = smc_core_reboot_event,
1298 int __init smc_core_init(void)
1300 return register_reboot_notifier(&smc_reboot_notifier);
1303 /* Called (from smc_exit) when module is removed */
1304 void smc_core_exit(void)
1306 unregister_reboot_notifier(&smc_reboot_notifier);
1307 smc_lgrs_shutdown();