2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifdef CONFIG_SECURITY_INFINIBAND
35 #include <linux/security.h>
36 #include <linux/completion.h>
37 #include <linux/list.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_cache.h>
41 #include "core_priv.h"
44 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
46 struct pkey_index_qp_list *pkey = NULL;
47 struct pkey_index_qp_list *tmp_pkey;
48 struct ib_device *dev = pp->sec->dev;
50 spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
51 list_for_each_entry(tmp_pkey,
52 &dev->port_pkey_list[pp->port_num].pkey_list,
54 if (tmp_pkey->pkey_index == pp->pkey_index) {
59 spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
63 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
67 struct ib_device *dev = pp->sec->dev;
70 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
74 ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
79 static int enforce_qp_pkey_security(u16 pkey,
81 struct ib_qp_security *qp_sec)
83 struct ib_qp_security *shared_qp_sec;
86 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
90 list_for_each_entry(shared_qp_sec,
91 &qp_sec->shared_qp_list,
93 ret = security_ib_pkey_access(shared_qp_sec->security,
102 /* The caller of this function must hold the QP security
103 * mutex of the QP of the security structure in *pps.
105 * It takes separate ports_pkeys and security structure
106 * because in some cases the pps will be for a new settings
107 * or the pps will be for the real QP and security structure
108 * will be for a shared QP.
110 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
111 struct ib_qp_security *sec)
120 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
121 ret = get_pkey_and_subnet_prefix(&pps->main,
127 ret = enforce_qp_pkey_security(pkey,
134 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
135 ret = get_pkey_and_subnet_prefix(&pps->alt,
141 ret = enforce_qp_pkey_security(pkey,
149 /* The caller of this function must hold the QP security
152 static void qp_to_error(struct ib_qp_security *sec)
154 struct ib_qp_security *shared_qp_sec;
155 struct ib_qp_attr attr = {
156 .qp_state = IB_QPS_ERR
158 struct ib_event event = {
159 .event = IB_EVENT_QP_FATAL
162 /* If the QP is in the process of being destroyed
163 * the qp pointer in the security structure is
164 * undefined. It cannot be modified now.
169 ib_modify_qp(sec->qp,
173 if (sec->qp->event_handler && sec->qp->qp_context) {
174 event.element.qp = sec->qp;
175 sec->qp->event_handler(&event,
176 sec->qp->qp_context);
179 list_for_each_entry(shared_qp_sec,
180 &sec->shared_qp_list,
182 struct ib_qp *qp = shared_qp_sec->qp;
184 if (qp->event_handler && qp->qp_context) {
185 event.element.qp = qp;
186 event.device = qp->device;
187 qp->event_handler(&event,
193 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
194 struct ib_device *device,
198 struct ib_port_pkey *pp, *tmp_pp;
200 LIST_HEAD(to_error_list);
203 if (!ib_get_cached_pkey(device,
207 spin_lock(&pkey->qp_list_lock);
208 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
209 if (atomic_read(&pp->sec->error_list_count))
212 if (enforce_qp_pkey_security(pkey_val,
215 atomic_inc(&pp->sec->error_list_count);
216 list_add(&pp->to_error_list,
220 spin_unlock(&pkey->qp_list_lock);
223 list_for_each_entry_safe(pp,
227 mutex_lock(&pp->sec->mutex);
228 qp_to_error(pp->sec);
229 list_del(&pp->to_error_list);
230 atomic_dec(&pp->sec->error_list_count);
231 comp = pp->sec->destroying;
232 mutex_unlock(&pp->sec->mutex);
235 complete(&pp->sec->error_complete);
239 /* The caller of this function must hold the QP security
242 static int port_pkey_list_insert(struct ib_port_pkey *pp)
244 struct pkey_index_qp_list *tmp_pkey;
245 struct pkey_index_qp_list *pkey;
246 struct ib_device *dev;
247 u8 port_num = pp->port_num;
250 if (pp->state != IB_PORT_PKEY_VALID)
255 pkey = get_pkey_idx_qp_list(pp);
260 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
264 spin_lock(&dev->port_pkey_list[port_num].list_lock);
265 /* Check for the PKey again. A racing process may
268 list_for_each_entry(tmp_pkey,
269 &dev->port_pkey_list[port_num].pkey_list,
271 if (tmp_pkey->pkey_index == pp->pkey_index) {
280 pkey->pkey_index = pp->pkey_index;
281 spin_lock_init(&pkey->qp_list_lock);
282 INIT_LIST_HEAD(&pkey->qp_list);
283 list_add(&pkey->pkey_index_list,
284 &dev->port_pkey_list[port_num].pkey_list);
286 spin_unlock(&dev->port_pkey_list[port_num].list_lock);
289 spin_lock(&pkey->qp_list_lock);
290 list_add(&pp->qp_list, &pkey->qp_list);
291 spin_unlock(&pkey->qp_list_lock);
293 pp->state = IB_PORT_PKEY_LISTED;
298 /* The caller of this function must hold the QP security
301 static void port_pkey_list_remove(struct ib_port_pkey *pp)
303 struct pkey_index_qp_list *pkey;
305 if (pp->state != IB_PORT_PKEY_LISTED)
308 pkey = get_pkey_idx_qp_list(pp);
310 spin_lock(&pkey->qp_list_lock);
311 list_del(&pp->qp_list);
312 spin_unlock(&pkey->qp_list_lock);
314 /* The setting may still be valid, i.e. after
315 * a destroy has failed for example.
317 pp->state = IB_PORT_PKEY_VALID;
320 static void destroy_qp_security(struct ib_qp_security *sec)
322 security_ib_free_security(sec->security);
323 kfree(sec->ports_pkeys);
327 /* The caller of this function must hold the QP security
330 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
331 const struct ib_qp_attr *qp_attr,
334 struct ib_ports_pkeys *new_pps;
335 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
337 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
341 if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
343 new_pps->main.port_num = qp_attr->port_num;
344 new_pps->main.pkey_index = qp_attr->pkey_index;
346 new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
348 qp_pps->main.port_num;
350 new_pps->main.pkey_index =
351 (qp_attr_mask & IB_QP_PKEY_INDEX) ?
352 qp_attr->pkey_index :
353 qp_pps->main.pkey_index;
355 new_pps->main.state = IB_PORT_PKEY_VALID;
357 new_pps->main.port_num = qp_pps->main.port_num;
358 new_pps->main.pkey_index = qp_pps->main.pkey_index;
359 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
360 new_pps->main.state = IB_PORT_PKEY_VALID;
363 if (qp_attr_mask & IB_QP_ALT_PATH) {
364 new_pps->alt.port_num = qp_attr->alt_port_num;
365 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
366 new_pps->alt.state = IB_PORT_PKEY_VALID;
368 new_pps->alt.port_num = qp_pps->alt.port_num;
369 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
370 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
371 new_pps->alt.state = IB_PORT_PKEY_VALID;
374 new_pps->main.sec = qp->qp_sec;
375 new_pps->alt.sec = qp->qp_sec;
379 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
381 struct ib_qp *real_qp = qp->real_qp;
384 ret = ib_create_qp_security(qp, dev);
389 mutex_lock(&real_qp->qp_sec->mutex);
390 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
397 list_add(&qp->qp_sec->shared_qp_list,
398 &real_qp->qp_sec->shared_qp_list);
400 mutex_unlock(&real_qp->qp_sec->mutex);
402 destroy_qp_security(qp->qp_sec);
407 void ib_close_shared_qp_security(struct ib_qp_security *sec)
409 struct ib_qp *real_qp = sec->qp->real_qp;
411 mutex_lock(&real_qp->qp_sec->mutex);
412 list_del(&sec->shared_qp_list);
413 mutex_unlock(&real_qp->qp_sec->mutex);
415 destroy_qp_security(sec);
418 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
420 u8 i = rdma_start_port(dev);
424 while (i <= rdma_end_port(dev) && !is_ib)
425 is_ib = rdma_protocol_ib(dev, i++);
427 /* If this isn't an IB device don't create the security context */
431 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
436 qp->qp_sec->dev = dev;
437 mutex_init(&qp->qp_sec->mutex);
438 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
439 atomic_set(&qp->qp_sec->error_list_count, 0);
440 init_completion(&qp->qp_sec->error_complete);
441 ret = security_ib_alloc_security(&qp->qp_sec->security);
449 EXPORT_SYMBOL(ib_create_qp_security);
451 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
453 /* Return if not IB */
457 mutex_lock(&sec->mutex);
459 /* Remove the QP from the lists so it won't get added to
460 * a to_error_list during the destroy process.
462 if (sec->ports_pkeys) {
463 port_pkey_list_remove(&sec->ports_pkeys->main);
464 port_pkey_list_remove(&sec->ports_pkeys->alt);
467 /* If the QP is already in one or more of those lists
468 * the destroying flag will ensure the to error flow
469 * doesn't operate on an undefined QP.
471 sec->destroying = true;
473 /* Record the error list count to know how many completions
476 sec->error_comps_pending = atomic_read(&sec->error_list_count);
478 mutex_unlock(&sec->mutex);
481 void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
486 /* Return if not IB */
490 /* If a concurrent cache update is in progress this
491 * QP security could be marked for an error state
492 * transition. Wait for this to complete.
494 for (i = 0; i < sec->error_comps_pending; i++)
495 wait_for_completion(&sec->error_complete);
497 mutex_lock(&sec->mutex);
498 sec->destroying = false;
500 /* Restore the position in the lists and verify
501 * access is still allowed in case a cache update
502 * occurred while attempting to destroy.
504 * Because these setting were listed already
505 * and removed during ib_destroy_qp_security_begin
506 * we know the pkey_index_qp_list for the PKey
507 * already exists so port_pkey_list_insert won't fail.
509 if (sec->ports_pkeys) {
510 port_pkey_list_insert(&sec->ports_pkeys->main);
511 port_pkey_list_insert(&sec->ports_pkeys->alt);
514 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
518 mutex_unlock(&sec->mutex);
521 void ib_destroy_qp_security_end(struct ib_qp_security *sec)
525 /* Return if not IB */
529 /* If a concurrent cache update is occurring we must
530 * wait until this QP security structure is processed
531 * in the QP to error flow before destroying it because
532 * the to_error_list is in use.
534 for (i = 0; i < sec->error_comps_pending; i++)
535 wait_for_completion(&sec->error_complete);
537 destroy_qp_security(sec);
540 void ib_security_cache_change(struct ib_device *device,
544 struct pkey_index_qp_list *pkey;
546 list_for_each_entry(pkey,
547 &device->port_pkey_list[port_num].pkey_list,
556 void ib_security_destroy_port_pkey_list(struct ib_device *device)
558 struct pkey_index_qp_list *pkey, *tmp_pkey;
561 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
562 spin_lock(&device->port_pkey_list[i].list_lock);
563 list_for_each_entry_safe(pkey,
565 &device->port_pkey_list[i].pkey_list,
567 list_del(&pkey->pkey_index_list);
570 spin_unlock(&device->port_pkey_list[i].list_lock);
574 int ib_security_modify_qp(struct ib_qp *qp,
575 struct ib_qp_attr *qp_attr,
577 struct ib_udata *udata)
580 struct ib_ports_pkeys *tmp_pps;
581 struct ib_ports_pkeys *new_pps = NULL;
582 struct ib_qp *real_qp = qp->real_qp;
583 bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
584 real_qp->qp_type == IB_QPT_GSI ||
585 real_qp->qp_type >= IB_QPT_RESERVED1);
586 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
587 (qp_attr_mask & IB_QP_ALT_PATH));
589 WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
590 rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
592 "%s: QP security is not initialized for IB QP: %d\n",
593 __func__, real_qp->qp_num);
595 /* The port/pkey settings are maintained only for the real QP. Open
596 * handles on the real QP will be in the shared_qp_list. When
597 * enforcing security on the real QP all the shared QPs will be
601 if (pps_change && !special_qp && real_qp->qp_sec) {
602 mutex_lock(&real_qp->qp_sec->mutex);
603 new_pps = get_new_pps(real_qp,
607 mutex_unlock(&real_qp->qp_sec->mutex);
610 /* Add this QP to the lists for the new port
611 * and pkey settings before checking for permission
612 * in case there is a concurrent cache update
613 * occurring. Walking the list for a cache change
614 * doesn't acquire the security mutex unless it's
615 * sending the QP to error.
617 ret = port_pkey_list_insert(&new_pps->main);
620 ret = port_pkey_list_insert(&new_pps->alt);
623 ret = check_qp_port_pkey_settings(new_pps,
628 ret = real_qp->device->modify_qp(real_qp,
634 /* Clean up the lists and free the appropriate
635 * ports_pkeys structure.
640 tmp_pps = real_qp->qp_sec->ports_pkeys;
641 real_qp->qp_sec->ports_pkeys = new_pps;
645 port_pkey_list_remove(&tmp_pps->main);
646 port_pkey_list_remove(&tmp_pps->alt);
649 mutex_unlock(&real_qp->qp_sec->mutex);
653 EXPORT_SYMBOL(ib_security_modify_qp);
655 int ib_security_pkey_access(struct ib_device *dev,
664 if (!rdma_protocol_ib(dev, port_num))
667 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
671 ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
676 return security_ib_pkey_access(sec, subnet_prefix, pkey);
678 EXPORT_SYMBOL(ib_security_pkey_access);
680 static int ib_mad_agent_security_change(struct notifier_block *nb,
684 struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
686 if (event != LSM_POLICY_CHANGE)
689 ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
696 int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
697 enum ib_qp_type qp_type)
701 if (!rdma_protocol_ib(agent->device, agent->port_num))
704 ret = security_ib_alloc_security(&agent->security);
708 if (qp_type != IB_QPT_SMI)
711 ret = security_ib_endport_manage_subnet(agent->security,
717 agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
718 ret = register_lsm_notifier(&agent->lsm_nb);
722 agent->smp_allowed = true;
723 agent->lsm_nb_reg = true;
727 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
729 if (!rdma_protocol_ib(agent->device, agent->port_num))
732 security_ib_free_security(agent->security);
733 if (agent->lsm_nb_reg)
734 unregister_lsm_notifier(&agent->lsm_nb);
737 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
742 if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
745 return ib_security_pkey_access(map->agent.device,
748 map->agent.security);
751 #endif /* CONFIG_SECURITY_INFINIBAND */