]> Git Repo - linux.git/blob - drivers/infiniband/core/security.c
IB/core: Only enforce security for InfiniBand
[linux.git] / drivers / infiniband / core / security.c
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #ifdef CONFIG_SECURITY_INFINIBAND
34
35 #include <linux/security.h>
36 #include <linux/completion.h>
37 #include <linux/list.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_cache.h>
41 #include "core_priv.h"
42 #include "mad_priv.h"
43
44 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
45 {
46         struct pkey_index_qp_list *pkey = NULL;
47         struct pkey_index_qp_list *tmp_pkey;
48         struct ib_device *dev = pp->sec->dev;
49
50         spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
51         list_for_each_entry(tmp_pkey,
52                             &dev->port_pkey_list[pp->port_num].pkey_list,
53                             pkey_index_list) {
54                 if (tmp_pkey->pkey_index == pp->pkey_index) {
55                         pkey = tmp_pkey;
56                         break;
57                 }
58         }
59         spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
60         return pkey;
61 }
62
63 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
64                                       u16 *pkey,
65                                       u64 *subnet_prefix)
66 {
67         struct ib_device *dev = pp->sec->dev;
68         int ret;
69
70         ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
71         if (ret)
72                 return ret;
73
74         ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
75
76         return ret;
77 }
78
79 static int enforce_qp_pkey_security(u16 pkey,
80                                     u64 subnet_prefix,
81                                     struct ib_qp_security *qp_sec)
82 {
83         struct ib_qp_security *shared_qp_sec;
84         int ret;
85
86         ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
87         if (ret)
88                 return ret;
89
90         list_for_each_entry(shared_qp_sec,
91                             &qp_sec->shared_qp_list,
92                             shared_qp_list) {
93                 ret = security_ib_pkey_access(shared_qp_sec->security,
94                                               subnet_prefix,
95                                               pkey);
96                 if (ret)
97                         return ret;
98         }
99         return 0;
100 }
101
102 /* The caller of this function must hold the QP security
103  * mutex of the QP of the security structure in *pps.
104  *
105  * It takes separate ports_pkeys and security structure
106  * because in some cases the pps will be for a new settings
107  * or the pps will be for the real QP and security structure
108  * will be for a shared QP.
109  */
110 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
111                                        struct ib_qp_security *sec)
112 {
113         u64 subnet_prefix;
114         u16 pkey;
115         int ret = 0;
116
117         if (!pps)
118                 return 0;
119
120         if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
121                 ret = get_pkey_and_subnet_prefix(&pps->main,
122                                                  &pkey,
123                                                  &subnet_prefix);
124                 if (ret)
125                         return ret;
126
127                 ret = enforce_qp_pkey_security(pkey,
128                                                subnet_prefix,
129                                                sec);
130                 if (ret)
131                         return ret;
132         }
133
134         if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
135                 ret = get_pkey_and_subnet_prefix(&pps->alt,
136                                                  &pkey,
137                                                  &subnet_prefix);
138                 if (ret)
139                         return ret;
140
141                 ret = enforce_qp_pkey_security(pkey,
142                                                subnet_prefix,
143                                                sec);
144         }
145
146         return ret;
147 }
148
149 /* The caller of this function must hold the QP security
150  * mutex.
151  */
152 static void qp_to_error(struct ib_qp_security *sec)
153 {
154         struct ib_qp_security *shared_qp_sec;
155         struct ib_qp_attr attr = {
156                 .qp_state = IB_QPS_ERR
157         };
158         struct ib_event event = {
159                 .event = IB_EVENT_QP_FATAL
160         };
161
162         /* If the QP is in the process of being destroyed
163          * the qp pointer in the security structure is
164          * undefined.  It cannot be modified now.
165          */
166         if (sec->destroying)
167                 return;
168
169         ib_modify_qp(sec->qp,
170                      &attr,
171                      IB_QP_STATE);
172
173         if (sec->qp->event_handler && sec->qp->qp_context) {
174                 event.element.qp = sec->qp;
175                 sec->qp->event_handler(&event,
176                                        sec->qp->qp_context);
177         }
178
179         list_for_each_entry(shared_qp_sec,
180                             &sec->shared_qp_list,
181                             shared_qp_list) {
182                 struct ib_qp *qp = shared_qp_sec->qp;
183
184                 if (qp->event_handler && qp->qp_context) {
185                         event.element.qp = qp;
186                         event.device = qp->device;
187                         qp->event_handler(&event,
188                                           qp->qp_context);
189                 }
190         }
191 }
192
193 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
194                                   struct ib_device *device,
195                                   u8 port_num,
196                                   u64 subnet_prefix)
197 {
198         struct ib_port_pkey *pp, *tmp_pp;
199         bool comp;
200         LIST_HEAD(to_error_list);
201         u16 pkey_val;
202
203         if (!ib_get_cached_pkey(device,
204                                 port_num,
205                                 pkey->pkey_index,
206                                 &pkey_val)) {
207                 spin_lock(&pkey->qp_list_lock);
208                 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
209                         if (atomic_read(&pp->sec->error_list_count))
210                                 continue;
211
212                         if (enforce_qp_pkey_security(pkey_val,
213                                                      subnet_prefix,
214                                                      pp->sec)) {
215                                 atomic_inc(&pp->sec->error_list_count);
216                                 list_add(&pp->to_error_list,
217                                          &to_error_list);
218                         }
219                 }
220                 spin_unlock(&pkey->qp_list_lock);
221         }
222
223         list_for_each_entry_safe(pp,
224                                  tmp_pp,
225                                  &to_error_list,
226                                  to_error_list) {
227                 mutex_lock(&pp->sec->mutex);
228                 qp_to_error(pp->sec);
229                 list_del(&pp->to_error_list);
230                 atomic_dec(&pp->sec->error_list_count);
231                 comp = pp->sec->destroying;
232                 mutex_unlock(&pp->sec->mutex);
233
234                 if (comp)
235                         complete(&pp->sec->error_complete);
236         }
237 }
238
239 /* The caller of this function must hold the QP security
240  * mutex.
241  */
242 static int port_pkey_list_insert(struct ib_port_pkey *pp)
243 {
244         struct pkey_index_qp_list *tmp_pkey;
245         struct pkey_index_qp_list *pkey;
246         struct ib_device *dev;
247         u8 port_num = pp->port_num;
248         int ret = 0;
249
250         if (pp->state != IB_PORT_PKEY_VALID)
251                 return 0;
252
253         dev = pp->sec->dev;
254
255         pkey = get_pkey_idx_qp_list(pp);
256
257         if (!pkey) {
258                 bool found = false;
259
260                 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
261                 if (!pkey)
262                         return -ENOMEM;
263
264                 spin_lock(&dev->port_pkey_list[port_num].list_lock);
265                 /* Check for the PKey again.  A racing process may
266                  * have created it.
267                  */
268                 list_for_each_entry(tmp_pkey,
269                                     &dev->port_pkey_list[port_num].pkey_list,
270                                     pkey_index_list) {
271                         if (tmp_pkey->pkey_index == pp->pkey_index) {
272                                 kfree(pkey);
273                                 pkey = tmp_pkey;
274                                 found = true;
275                                 break;
276                         }
277                 }
278
279                 if (!found) {
280                         pkey->pkey_index = pp->pkey_index;
281                         spin_lock_init(&pkey->qp_list_lock);
282                         INIT_LIST_HEAD(&pkey->qp_list);
283                         list_add(&pkey->pkey_index_list,
284                                  &dev->port_pkey_list[port_num].pkey_list);
285                 }
286                 spin_unlock(&dev->port_pkey_list[port_num].list_lock);
287         }
288
289         spin_lock(&pkey->qp_list_lock);
290         list_add(&pp->qp_list, &pkey->qp_list);
291         spin_unlock(&pkey->qp_list_lock);
292
293         pp->state = IB_PORT_PKEY_LISTED;
294
295         return ret;
296 }
297
298 /* The caller of this function must hold the QP security
299  * mutex.
300  */
301 static void port_pkey_list_remove(struct ib_port_pkey *pp)
302 {
303         struct pkey_index_qp_list *pkey;
304
305         if (pp->state != IB_PORT_PKEY_LISTED)
306                 return;
307
308         pkey = get_pkey_idx_qp_list(pp);
309
310         spin_lock(&pkey->qp_list_lock);
311         list_del(&pp->qp_list);
312         spin_unlock(&pkey->qp_list_lock);
313
314         /* The setting may still be valid, i.e. after
315          * a destroy has failed for example.
316          */
317         pp->state = IB_PORT_PKEY_VALID;
318 }
319
320 static void destroy_qp_security(struct ib_qp_security *sec)
321 {
322         security_ib_free_security(sec->security);
323         kfree(sec->ports_pkeys);
324         kfree(sec);
325 }
326
327 /* The caller of this function must hold the QP security
328  * mutex.
329  */
330 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
331                                           const struct ib_qp_attr *qp_attr,
332                                           int qp_attr_mask)
333 {
334         struct ib_ports_pkeys *new_pps;
335         struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
336
337         new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
338         if (!new_pps)
339                 return NULL;
340
341         if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
342                 if (!qp_pps) {
343                         new_pps->main.port_num = qp_attr->port_num;
344                         new_pps->main.pkey_index = qp_attr->pkey_index;
345                 } else {
346                         new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
347                                                   qp_attr->port_num :
348                                                   qp_pps->main.port_num;
349
350                         new_pps->main.pkey_index =
351                                         (qp_attr_mask & IB_QP_PKEY_INDEX) ?
352                                          qp_attr->pkey_index :
353                                          qp_pps->main.pkey_index;
354                 }
355                 new_pps->main.state = IB_PORT_PKEY_VALID;
356         } else if (qp_pps) {
357                 new_pps->main.port_num = qp_pps->main.port_num;
358                 new_pps->main.pkey_index = qp_pps->main.pkey_index;
359                 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
360                         new_pps->main.state = IB_PORT_PKEY_VALID;
361         }
362
363         if (qp_attr_mask & IB_QP_ALT_PATH) {
364                 new_pps->alt.port_num = qp_attr->alt_port_num;
365                 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
366                 new_pps->alt.state = IB_PORT_PKEY_VALID;
367         } else if (qp_pps) {
368                 new_pps->alt.port_num = qp_pps->alt.port_num;
369                 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
370                 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
371                         new_pps->alt.state = IB_PORT_PKEY_VALID;
372         }
373
374         new_pps->main.sec = qp->qp_sec;
375         new_pps->alt.sec = qp->qp_sec;
376         return new_pps;
377 }
378
379 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
380 {
381         struct ib_qp *real_qp = qp->real_qp;
382         int ret;
383
384         ret = ib_create_qp_security(qp, dev);
385
386         if (ret)
387                 return ret;
388
389         mutex_lock(&real_qp->qp_sec->mutex);
390         ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
391                                           qp->qp_sec);
392
393         if (ret)
394                 goto ret;
395
396         if (qp != real_qp)
397                 list_add(&qp->qp_sec->shared_qp_list,
398                          &real_qp->qp_sec->shared_qp_list);
399 ret:
400         mutex_unlock(&real_qp->qp_sec->mutex);
401         if (ret)
402                 destroy_qp_security(qp->qp_sec);
403
404         return ret;
405 }
406
407 void ib_close_shared_qp_security(struct ib_qp_security *sec)
408 {
409         struct ib_qp *real_qp = sec->qp->real_qp;
410
411         mutex_lock(&real_qp->qp_sec->mutex);
412         list_del(&sec->shared_qp_list);
413         mutex_unlock(&real_qp->qp_sec->mutex);
414
415         destroy_qp_security(sec);
416 }
417
418 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
419 {
420         u8 i = rdma_start_port(dev);
421         bool is_ib = false;
422         int ret;
423
424         while (i <= rdma_end_port(dev) && !is_ib)
425                 is_ib = rdma_protocol_ib(dev, i++);
426
427         /* If this isn't an IB device don't create the security context */
428         if (!is_ib)
429                 return 0;
430
431         qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
432         if (!qp->qp_sec)
433                 return -ENOMEM;
434
435         qp->qp_sec->qp = qp;
436         qp->qp_sec->dev = dev;
437         mutex_init(&qp->qp_sec->mutex);
438         INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
439         atomic_set(&qp->qp_sec->error_list_count, 0);
440         init_completion(&qp->qp_sec->error_complete);
441         ret = security_ib_alloc_security(&qp->qp_sec->security);
442         if (ret) {
443                 kfree(qp->qp_sec);
444                 qp->qp_sec = NULL;
445         }
446
447         return ret;
448 }
449 EXPORT_SYMBOL(ib_create_qp_security);
450
451 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
452 {
453         /* Return if not IB */
454         if (!sec)
455                 return;
456
457         mutex_lock(&sec->mutex);
458
459         /* Remove the QP from the lists so it won't get added to
460          * a to_error_list during the destroy process.
461          */
462         if (sec->ports_pkeys) {
463                 port_pkey_list_remove(&sec->ports_pkeys->main);
464                 port_pkey_list_remove(&sec->ports_pkeys->alt);
465         }
466
467         /* If the QP is already in one or more of those lists
468          * the destroying flag will ensure the to error flow
469          * doesn't operate on an undefined QP.
470          */
471         sec->destroying = true;
472
473         /* Record the error list count to know how many completions
474          * to wait for.
475          */
476         sec->error_comps_pending = atomic_read(&sec->error_list_count);
477
478         mutex_unlock(&sec->mutex);
479 }
480
481 void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
482 {
483         int ret;
484         int i;
485
486         /* Return if not IB */
487         if (!sec)
488                 return;
489
490         /* If a concurrent cache update is in progress this
491          * QP security could be marked for an error state
492          * transition.  Wait for this to complete.
493          */
494         for (i = 0; i < sec->error_comps_pending; i++)
495                 wait_for_completion(&sec->error_complete);
496
497         mutex_lock(&sec->mutex);
498         sec->destroying = false;
499
500         /* Restore the position in the lists and verify
501          * access is still allowed in case a cache update
502          * occurred while attempting to destroy.
503          *
504          * Because these setting were listed already
505          * and removed during ib_destroy_qp_security_begin
506          * we know the pkey_index_qp_list for the PKey
507          * already exists so port_pkey_list_insert won't fail.
508          */
509         if (sec->ports_pkeys) {
510                 port_pkey_list_insert(&sec->ports_pkeys->main);
511                 port_pkey_list_insert(&sec->ports_pkeys->alt);
512         }
513
514         ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
515         if (ret)
516                 qp_to_error(sec);
517
518         mutex_unlock(&sec->mutex);
519 }
520
521 void ib_destroy_qp_security_end(struct ib_qp_security *sec)
522 {
523         int i;
524
525         /* Return if not IB */
526         if (!sec)
527                 return;
528
529         /* If a concurrent cache update is occurring we must
530          * wait until this QP security structure is processed
531          * in the QP to error flow before destroying it because
532          * the to_error_list is in use.
533          */
534         for (i = 0; i < sec->error_comps_pending; i++)
535                 wait_for_completion(&sec->error_complete);
536
537         destroy_qp_security(sec);
538 }
539
540 void ib_security_cache_change(struct ib_device *device,
541                               u8 port_num,
542                               u64 subnet_prefix)
543 {
544         struct pkey_index_qp_list *pkey;
545
546         list_for_each_entry(pkey,
547                             &device->port_pkey_list[port_num].pkey_list,
548                             pkey_index_list) {
549                 check_pkey_qps(pkey,
550                                device,
551                                port_num,
552                                subnet_prefix);
553         }
554 }
555
556 void ib_security_destroy_port_pkey_list(struct ib_device *device)
557 {
558         struct pkey_index_qp_list *pkey, *tmp_pkey;
559         int i;
560
561         for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
562                 spin_lock(&device->port_pkey_list[i].list_lock);
563                 list_for_each_entry_safe(pkey,
564                                          tmp_pkey,
565                                          &device->port_pkey_list[i].pkey_list,
566                                          pkey_index_list) {
567                         list_del(&pkey->pkey_index_list);
568                         kfree(pkey);
569                 }
570                 spin_unlock(&device->port_pkey_list[i].list_lock);
571         }
572 }
573
574 int ib_security_modify_qp(struct ib_qp *qp,
575                           struct ib_qp_attr *qp_attr,
576                           int qp_attr_mask,
577                           struct ib_udata *udata)
578 {
579         int ret = 0;
580         struct ib_ports_pkeys *tmp_pps;
581         struct ib_ports_pkeys *new_pps = NULL;
582         struct ib_qp *real_qp = qp->real_qp;
583         bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
584                            real_qp->qp_type == IB_QPT_GSI ||
585                            real_qp->qp_type >= IB_QPT_RESERVED1);
586         bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
587                            (qp_attr_mask & IB_QP_ALT_PATH));
588
589         WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
590                    rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
591                    !real_qp->qp_sec),
592                    "%s: QP security is not initialized for IB QP: %d\n",
593                    __func__, real_qp->qp_num);
594
595         /* The port/pkey settings are maintained only for the real QP. Open
596          * handles on the real QP will be in the shared_qp_list. When
597          * enforcing security on the real QP all the shared QPs will be
598          * checked as well.
599          */
600
601         if (pps_change && !special_qp && real_qp->qp_sec) {
602                 mutex_lock(&real_qp->qp_sec->mutex);
603                 new_pps = get_new_pps(real_qp,
604                                       qp_attr,
605                                       qp_attr_mask);
606                 if (!new_pps) {
607                         mutex_unlock(&real_qp->qp_sec->mutex);
608                         return -ENOMEM;
609                 }
610                 /* Add this QP to the lists for the new port
611                  * and pkey settings before checking for permission
612                  * in case there is a concurrent cache update
613                  * occurring.  Walking the list for a cache change
614                  * doesn't acquire the security mutex unless it's
615                  * sending the QP to error.
616                  */
617                 ret = port_pkey_list_insert(&new_pps->main);
618
619                 if (!ret)
620                         ret = port_pkey_list_insert(&new_pps->alt);
621
622                 if (!ret)
623                         ret = check_qp_port_pkey_settings(new_pps,
624                                                           real_qp->qp_sec);
625         }
626
627         if (!ret)
628                 ret = real_qp->device->modify_qp(real_qp,
629                                                  qp_attr,
630                                                  qp_attr_mask,
631                                                  udata);
632
633         if (new_pps) {
634                 /* Clean up the lists and free the appropriate
635                  * ports_pkeys structure.
636                  */
637                 if (ret) {
638                         tmp_pps = new_pps;
639                 } else {
640                         tmp_pps = real_qp->qp_sec->ports_pkeys;
641                         real_qp->qp_sec->ports_pkeys = new_pps;
642                 }
643
644                 if (tmp_pps) {
645                         port_pkey_list_remove(&tmp_pps->main);
646                         port_pkey_list_remove(&tmp_pps->alt);
647                 }
648                 kfree(tmp_pps);
649                 mutex_unlock(&real_qp->qp_sec->mutex);
650         }
651         return ret;
652 }
653 EXPORT_SYMBOL(ib_security_modify_qp);
654
655 int ib_security_pkey_access(struct ib_device *dev,
656                             u8 port_num,
657                             u16 pkey_index,
658                             void *sec)
659 {
660         u64 subnet_prefix;
661         u16 pkey;
662         int ret;
663
664         if (!rdma_protocol_ib(dev, port_num))
665                 return 0;
666
667         ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
668         if (ret)
669                 return ret;
670
671         ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
672
673         if (ret)
674                 return ret;
675
676         return security_ib_pkey_access(sec, subnet_prefix, pkey);
677 }
678 EXPORT_SYMBOL(ib_security_pkey_access);
679
680 static int ib_mad_agent_security_change(struct notifier_block *nb,
681                                         unsigned long event,
682                                         void *data)
683 {
684         struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
685
686         if (event != LSM_POLICY_CHANGE)
687                 return NOTIFY_DONE;
688
689         ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
690                                                              ag->device->name,
691                                                              ag->port_num);
692
693         return NOTIFY_OK;
694 }
695
696 int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
697                                 enum ib_qp_type qp_type)
698 {
699         int ret;
700
701         if (!rdma_protocol_ib(agent->device, agent->port_num))
702                 return 0;
703
704         ret = security_ib_alloc_security(&agent->security);
705         if (ret)
706                 return ret;
707
708         if (qp_type != IB_QPT_SMI)
709                 return 0;
710
711         ret = security_ib_endport_manage_subnet(agent->security,
712                                                 agent->device->name,
713                                                 agent->port_num);
714         if (ret)
715                 return ret;
716
717         agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
718         ret = register_lsm_notifier(&agent->lsm_nb);
719         if (ret)
720                 return ret;
721
722         agent->smp_allowed = true;
723         agent->lsm_nb_reg = true;
724         return 0;
725 }
726
727 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
728 {
729         if (!rdma_protocol_ib(agent->device, agent->port_num))
730                 return;
731
732         security_ib_free_security(agent->security);
733         if (agent->lsm_nb_reg)
734                 unregister_lsm_notifier(&agent->lsm_nb);
735 }
736
737 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
738 {
739         if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
740                 return 0;
741
742         if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
743                 return -EACCES;
744
745         return ib_security_pkey_access(map->agent.device,
746                                        map->agent.port_num,
747                                        pkey_index,
748                                        map->agent.security);
749 }
750
751 #endif /* CONFIG_SECURITY_INFINIBAND */
This page took 0.079708 seconds and 4 git commands to generate.