2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache {
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
61 static const struct ib_gid_attr zattr;
63 enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
70 enum gid_table_entry_props {
71 GID_TABLE_ENTRY_INVALID = 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
75 enum gid_table_write_action {
76 GID_TABLE_WRITE_ACTION_ADD,
77 GID_TABLE_WRITE_ACTION_DEL,
78 /* MODIFY only updates the GID table. Currently only used by
81 GID_TABLE_WRITE_ACTION_MODIFY
84 struct ib_gid_table_entry {
87 struct ib_gid_attr attr;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
98 * Delete requires different set of operations:
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
113 struct ib_gid_table_entry *data_vec;
116 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
118 if (rdma_cap_roce_gid_table(ib_dev, port)) {
119 struct ib_event event;
121 event.device = ib_dev;
122 event.element.port_num = port;
123 event.event = IB_EVENT_GID_CHANGE;
125 ib_dispatch_event(&event);
129 static const char * const gid_type_str[] = {
130 [IB_GID_TYPE_IB] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
136 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137 return gid_type_str[gid_type];
139 return "Invalid GID type";
141 EXPORT_SYMBOL(ib_cache_gid_type_str);
143 int ib_cache_gid_parse_type_str(const char *buf)
153 if (buf[len - 1] == '\n')
156 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158 len == strlen(gid_type_str[i])) {
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
167 /* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
171 static int write_gid(struct ib_device *ib_dev, u8 port,
172 struct ib_gid_table *table, int ix,
173 const union ib_gid *gid,
174 const struct ib_gid_attr *attr,
175 enum gid_table_write_action action,
177 __releases(&table->rwlock) __acquires(&table->rwlock)
180 struct net_device *old_net_dev;
181 enum ib_gid_type old_gid_type;
183 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
187 if (rdma_cap_roce_gid_table(ib_dev, port)) {
188 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
189 write_unlock_irq(&table->rwlock);
190 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
191 * RoCE providers and thus only updates the cache.
193 if (action == GID_TABLE_WRITE_ACTION_ADD)
194 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
195 &table->data_vec[ix].context);
196 else if (action == GID_TABLE_WRITE_ACTION_DEL)
197 ret = ib_dev->del_gid(ib_dev, port, ix,
198 &table->data_vec[ix].context);
199 write_lock_irq(&table->rwlock);
202 old_net_dev = table->data_vec[ix].attr.ndev;
203 old_gid_type = table->data_vec[ix].attr.gid_type;
204 if (old_net_dev && old_net_dev != attr->ndev)
205 dev_put(old_net_dev);
206 /* if modify_gid failed, just delete the old gid */
207 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
210 table->data_vec[ix].context = NULL;
213 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
214 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
216 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
217 if (action == GID_TABLE_WRITE_ACTION_DEL)
218 table->data_vec[ix].attr.gid_type = old_gid_type;
220 if (table->data_vec[ix].attr.ndev &&
221 table->data_vec[ix].attr.ndev != old_net_dev)
222 dev_hold(table->data_vec[ix].attr.ndev);
224 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
229 static int add_gid(struct ib_device *ib_dev, u8 port,
230 struct ib_gid_table *table, int ix,
231 const union ib_gid *gid,
232 const struct ib_gid_attr *attr,
234 return write_gid(ib_dev, port, table, ix, gid, attr,
235 GID_TABLE_WRITE_ACTION_ADD, default_gid);
238 static int modify_gid(struct ib_device *ib_dev, u8 port,
239 struct ib_gid_table *table, int ix,
240 const union ib_gid *gid,
241 const struct ib_gid_attr *attr,
243 return write_gid(ib_dev, port, table, ix, gid, attr,
244 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
247 static int del_gid(struct ib_device *ib_dev, u8 port,
248 struct ib_gid_table *table, int ix,
250 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
251 GID_TABLE_WRITE_ACTION_DEL, default_gid);
254 /* rwlock should be read locked */
255 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
256 const struct ib_gid_attr *val, bool default_gid,
257 unsigned long mask, int *pempty)
261 int empty = pempty ? -1 : 0;
263 while (i < table->sz && (found < 0 || empty < 0)) {
264 struct ib_gid_table_entry *data = &table->data_vec[i];
265 struct ib_gid_attr *attr = &data->attr;
270 if (data->props & GID_TABLE_ENTRY_INVALID)
274 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
275 !memcmp(attr, &zattr, sizeof(*attr)) &&
282 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
283 attr->gid_type != val->gid_type)
286 if (mask & GID_ATTR_FIND_MASK_GID &&
287 memcmp(gid, &data->gid, sizeof(*gid)))
290 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
291 attr->ndev != val->ndev)
294 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
295 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
308 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
310 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
311 addrconf_ifid_eui48(&gid->raw[8], dev);
314 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
315 union ib_gid *gid, struct ib_gid_attr *attr)
317 struct ib_gid_table *table;
320 struct net_device *idev;
323 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
325 if (!memcmp(gid, &zgid, sizeof(*gid)))
328 if (ib_dev->get_netdev) {
329 idev = ib_dev->get_netdev(ib_dev, port);
330 if (idev && attr->ndev != idev) {
331 union ib_gid default_gid;
333 /* Adding default GIDs in not permitted */
334 make_default_gid(idev, &default_gid);
335 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
344 mutex_lock(&table->lock);
345 write_lock_irq(&table->rwlock);
347 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
348 GID_ATTR_FIND_MASK_GID_TYPE |
349 GID_ATTR_FIND_MASK_NETDEV, &empty);
358 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
360 dispatch_gid_change_event(ib_dev, port);
363 write_unlock_irq(&table->rwlock);
364 mutex_unlock(&table->lock);
368 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
369 union ib_gid *gid, struct ib_gid_attr *attr)
371 struct ib_gid_table *table;
374 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
376 mutex_lock(&table->lock);
377 write_lock_irq(&table->rwlock);
379 ix = find_gid(table, gid, attr, false,
380 GID_ATTR_FIND_MASK_GID |
381 GID_ATTR_FIND_MASK_GID_TYPE |
382 GID_ATTR_FIND_MASK_NETDEV |
383 GID_ATTR_FIND_MASK_DEFAULT,
388 if (!del_gid(ib_dev, port, table, ix, false))
389 dispatch_gid_change_event(ib_dev, port);
392 write_unlock_irq(&table->rwlock);
393 mutex_unlock(&table->lock);
397 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
398 struct net_device *ndev)
400 struct ib_gid_table *table;
402 bool deleted = false;
404 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
406 mutex_lock(&table->lock);
407 write_lock_irq(&table->rwlock);
409 for (ix = 0; ix < table->sz; ix++)
410 if (table->data_vec[ix].attr.ndev == ndev)
411 if (!del_gid(ib_dev, port, table, ix,
412 !!(table->data_vec[ix].props &
413 GID_TABLE_ENTRY_DEFAULT)))
416 write_unlock_irq(&table->rwlock);
417 mutex_unlock(&table->lock);
420 dispatch_gid_change_event(ib_dev, port);
425 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
426 union ib_gid *gid, struct ib_gid_attr *attr)
428 struct ib_gid_table *table;
430 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
432 if (index < 0 || index >= table->sz)
435 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
438 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
440 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
442 dev_hold(attr->ndev);
448 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
449 const union ib_gid *gid,
450 const struct ib_gid_attr *val,
452 u8 *port, u16 *index)
454 struct ib_gid_table *table;
459 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
460 table = ib_dev->cache.ports[p].gid;
461 read_lock_irqsave(&table->rwlock, flags);
462 local_index = find_gid(table, gid, val, false, mask, NULL);
463 if (local_index >= 0) {
465 *index = local_index;
467 *port = p + rdma_start_port(ib_dev);
468 read_unlock_irqrestore(&table->rwlock, flags);
471 read_unlock_irqrestore(&table->rwlock, flags);
477 static int ib_cache_gid_find(struct ib_device *ib_dev,
478 const union ib_gid *gid,
479 enum ib_gid_type gid_type,
480 struct net_device *ndev, u8 *port,
483 unsigned long mask = GID_ATTR_FIND_MASK_GID |
484 GID_ATTR_FIND_MASK_GID_TYPE;
485 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
488 mask |= GID_ATTR_FIND_MASK_NETDEV;
490 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
494 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
495 const union ib_gid *gid,
496 enum ib_gid_type gid_type,
497 u8 port, struct net_device *ndev,
501 struct ib_gid_table *table;
502 unsigned long mask = GID_ATTR_FIND_MASK_GID |
503 GID_ATTR_FIND_MASK_GID_TYPE;
504 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
507 if (!rdma_is_port_valid(ib_dev, port))
510 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
513 mask |= GID_ATTR_FIND_MASK_NETDEV;
515 read_lock_irqsave(&table->rwlock, flags);
516 local_index = find_gid(table, gid, &val, false, mask, NULL);
517 if (local_index >= 0) {
519 *index = local_index;
520 read_unlock_irqrestore(&table->rwlock, flags);
524 read_unlock_irqrestore(&table->rwlock, flags);
527 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
530 * ib_find_gid_by_filter - Returns the GID table index where a specified
532 * @device: The device to query.
533 * @gid: The GID value to search for.
534 * @port_num: The port number of the device where the GID value could be
536 * @filter: The filter function is executed on any matching GID in the table.
537 * If the filter function returns true, the corresponding index is returned,
538 * otherwise, we continue searching the GID table. It's guaranteed that
539 * while filter is executed, ndev field is valid and the structure won't
540 * change. filter is executed in an atomic context. filter must not be NULL.
541 * @index: The index into the cached GID table where the GID was found. This
542 * parameter may be NULL.
544 * ib_cache_gid_find_by_filter() searches for the specified GID value
545 * of which the filter function returns true in the port's GID table.
546 * This function is only supported on RoCE ports.
549 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
550 const union ib_gid *gid,
552 bool (*filter)(const union ib_gid *,
553 const struct ib_gid_attr *,
558 struct ib_gid_table *table;
564 if (!rdma_is_port_valid(ib_dev, port) ||
565 !rdma_protocol_roce(ib_dev, port))
566 return -EPROTONOSUPPORT;
568 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
570 read_lock_irqsave(&table->rwlock, flags);
571 for (i = 0; i < table->sz; i++) {
572 struct ib_gid_attr attr;
574 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
577 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
580 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
582 if (filter(gid, &attr, context))
589 read_unlock_irqrestore(&table->rwlock, flags);
599 static struct ib_gid_table *alloc_gid_table(int sz)
601 struct ib_gid_table *table =
602 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
607 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
608 if (!table->data_vec)
611 mutex_init(&table->lock);
614 rwlock_init(&table->rwlock);
623 static void release_gid_table(struct ib_gid_table *table)
626 kfree(table->data_vec);
631 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
632 struct ib_gid_table *table)
635 bool deleted = false;
640 write_lock_irq(&table->rwlock);
641 for (i = 0; i < table->sz; ++i) {
642 if (memcmp(&table->data_vec[i].gid, &zgid,
643 sizeof(table->data_vec[i].gid)))
644 if (!del_gid(ib_dev, port, table, i,
645 table->data_vec[i].props &
646 GID_ATTR_FIND_MASK_DEFAULT))
649 write_unlock_irq(&table->rwlock);
652 dispatch_gid_change_event(ib_dev, port);
655 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
656 struct net_device *ndev,
657 unsigned long gid_type_mask,
658 enum ib_cache_gid_default_mode mode)
661 struct ib_gid_attr gid_attr;
662 struct ib_gid_attr zattr_type = zattr;
663 struct ib_gid_table *table;
664 unsigned int gid_type;
666 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
668 make_default_gid(ndev, &gid);
669 memset(&gid_attr, 0, sizeof(gid_attr));
670 gid_attr.ndev = ndev;
672 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
674 union ib_gid current_gid;
675 struct ib_gid_attr current_gid_attr = {};
677 if (1UL << gid_type & ~gid_type_mask)
680 gid_attr.gid_type = gid_type;
682 mutex_lock(&table->lock);
683 write_lock_irq(&table->rwlock);
684 ix = find_gid(table, NULL, &gid_attr, true,
685 GID_ATTR_FIND_MASK_GID_TYPE |
686 GID_ATTR_FIND_MASK_DEFAULT,
689 /* Coudn't find default GID location */
693 zattr_type.gid_type = gid_type;
695 if (!__ib_cache_gid_get(ib_dev, port, ix,
696 ¤t_gid, ¤t_gid_attr) &&
697 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
698 !memcmp(&gid, ¤t_gid, sizeof(gid)) &&
699 !memcmp(&gid_attr, ¤t_gid_attr, sizeof(gid_attr)))
702 if (memcmp(¤t_gid, &zgid, sizeof(current_gid)) ||
703 memcmp(¤t_gid_attr, &zattr_type,
704 sizeof(current_gid_attr))) {
705 if (del_gid(ib_dev, port, table, ix, true)) {
706 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
710 dispatch_gid_change_event(ib_dev, port);
714 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
715 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
716 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
719 dispatch_gid_change_event(ib_dev, port);
723 if (current_gid_attr.ndev)
724 dev_put(current_gid_attr.ndev);
725 write_unlock_irq(&table->rwlock);
726 mutex_unlock(&table->lock);
730 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
731 struct ib_gid_table *table)
734 unsigned long roce_gid_type_mask;
735 unsigned int num_default_gids;
736 unsigned int current_gid = 0;
738 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
739 num_default_gids = hweight_long(roce_gid_type_mask);
740 for (i = 0; i < num_default_gids && i < table->sz; i++) {
741 struct ib_gid_table_entry *entry =
744 entry->props |= GID_TABLE_ENTRY_DEFAULT;
745 current_gid = find_next_bit(&roce_gid_type_mask,
748 entry->attr.gid_type = current_gid++;
754 static int _gid_table_setup_one(struct ib_device *ib_dev)
757 struct ib_gid_table *table;
760 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
761 u8 rdma_port = port + rdma_start_port(ib_dev);
765 ib_dev->port_immutable[rdma_port].gid_tbl_len);
768 goto rollback_table_setup;
771 err = gid_table_reserve_default(ib_dev,
772 port + rdma_start_port(ib_dev),
775 goto rollback_table_setup;
776 ib_dev->cache.ports[port].gid = table;
781 rollback_table_setup:
782 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
783 table = ib_dev->cache.ports[port].gid;
785 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
787 release_gid_table(table);
793 static void gid_table_release_one(struct ib_device *ib_dev)
795 struct ib_gid_table *table;
798 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
799 table = ib_dev->cache.ports[port].gid;
800 release_gid_table(table);
801 ib_dev->cache.ports[port].gid = NULL;
805 static void gid_table_cleanup_one(struct ib_device *ib_dev)
807 struct ib_gid_table *table;
810 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
811 table = ib_dev->cache.ports[port].gid;
812 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
817 static int gid_table_setup_one(struct ib_device *ib_dev)
821 err = _gid_table_setup_one(ib_dev);
826 err = roce_rescan_device(ib_dev);
829 gid_table_cleanup_one(ib_dev);
830 gid_table_release_one(ib_dev);
836 int ib_get_cached_gid(struct ib_device *device,
840 struct ib_gid_attr *gid_attr)
844 struct ib_gid_table *table;
846 if (!rdma_is_port_valid(device, port_num))
849 table = device->cache.ports[port_num - rdma_start_port(device)].gid;
850 read_lock_irqsave(&table->rwlock, flags);
851 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
852 read_unlock_irqrestore(&table->rwlock, flags);
856 EXPORT_SYMBOL(ib_get_cached_gid);
858 int ib_find_cached_gid(struct ib_device *device,
859 const union ib_gid *gid,
860 enum ib_gid_type gid_type,
861 struct net_device *ndev,
865 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
867 EXPORT_SYMBOL(ib_find_cached_gid);
869 int ib_find_gid_by_filter(struct ib_device *device,
870 const union ib_gid *gid,
872 bool (*filter)(const union ib_gid *gid,
873 const struct ib_gid_attr *,
875 void *context, u16 *index)
877 /* Only RoCE GID table supports filter function */
878 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
879 return -EPROTONOSUPPORT;
881 return ib_cache_gid_find_by_filter(device, gid,
885 EXPORT_SYMBOL(ib_find_gid_by_filter);
887 int ib_get_cached_pkey(struct ib_device *device,
892 struct ib_pkey_cache *cache;
896 if (!rdma_is_port_valid(device, port_num))
899 read_lock_irqsave(&device->cache.lock, flags);
901 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
903 if (index < 0 || index >= cache->table_len)
906 *pkey = cache->table[index];
908 read_unlock_irqrestore(&device->cache.lock, flags);
912 EXPORT_SYMBOL(ib_get_cached_pkey);
914 int ib_find_cached_pkey(struct ib_device *device,
919 struct ib_pkey_cache *cache;
925 if (!rdma_is_port_valid(device, port_num))
928 read_lock_irqsave(&device->cache.lock, flags);
930 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
934 for (i = 0; i < cache->table_len; ++i)
935 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
936 if (cache->table[i] & 0x8000) {
944 if (ret && partial_ix >= 0) {
949 read_unlock_irqrestore(&device->cache.lock, flags);
953 EXPORT_SYMBOL(ib_find_cached_pkey);
955 int ib_find_exact_cached_pkey(struct ib_device *device,
960 struct ib_pkey_cache *cache;
965 if (!rdma_is_port_valid(device, port_num))
968 read_lock_irqsave(&device->cache.lock, flags);
970 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
974 for (i = 0; i < cache->table_len; ++i)
975 if (cache->table[i] == pkey) {
981 read_unlock_irqrestore(&device->cache.lock, flags);
985 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
987 int ib_get_cached_lmc(struct ib_device *device,
994 if (!rdma_is_port_valid(device, port_num))
997 read_lock_irqsave(&device->cache.lock, flags);
998 *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
999 read_unlock_irqrestore(&device->cache.lock, flags);
1003 EXPORT_SYMBOL(ib_get_cached_lmc);
1005 int ib_get_cached_port_state(struct ib_device *device,
1007 enum ib_port_state *port_state)
1009 unsigned long flags;
1012 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1015 read_lock_irqsave(&device->cache.lock, flags);
1016 *port_state = device->cache.ports[port_num
1017 - rdma_start_port(device)].port_state;
1018 read_unlock_irqrestore(&device->cache.lock, flags);
1022 EXPORT_SYMBOL(ib_get_cached_port_state);
1024 static void ib_cache_update(struct ib_device *device,
1027 struct ib_port_attr *tprops = NULL;
1028 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1029 struct ib_gid_cache {
1031 union ib_gid table[0];
1032 } *gid_cache = NULL;
1035 struct ib_gid_table *table;
1036 bool use_roce_gid_table =
1037 rdma_cap_roce_gid_table(device, port);
1039 if (!rdma_is_port_valid(device, port))
1042 table = device->cache.ports[port - rdma_start_port(device)].gid;
1044 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1048 ret = ib_query_port(device, port, tprops);
1050 pr_warn("ib_query_port failed (%d) for %s\n",
1055 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1056 sizeof *pkey_cache->table, GFP_KERNEL);
1060 pkey_cache->table_len = tprops->pkey_tbl_len;
1062 if (!use_roce_gid_table) {
1063 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1064 sizeof(*gid_cache->table), GFP_KERNEL);
1068 gid_cache->table_len = tprops->gid_tbl_len;
1071 for (i = 0; i < pkey_cache->table_len; ++i) {
1072 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1074 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1075 ret, device->name, i);
1080 if (!use_roce_gid_table) {
1081 for (i = 0; i < gid_cache->table_len; ++i) {
1082 ret = ib_query_gid(device, port, i,
1083 gid_cache->table + i, NULL);
1085 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1086 ret, device->name, i);
1092 write_lock_irq(&device->cache.lock);
1094 old_pkey_cache = device->cache.ports[port -
1095 rdma_start_port(device)].pkey;
1097 device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1098 if (!use_roce_gid_table) {
1099 write_lock(&table->rwlock);
1100 for (i = 0; i < gid_cache->table_len; i++) {
1101 modify_gid(device, port, table, i, gid_cache->table + i,
1104 write_unlock(&table->rwlock);
1107 device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1108 device->cache.ports[port - rdma_start_port(device)].port_state =
1111 write_unlock_irq(&device->cache.lock);
1114 kfree(old_pkey_cache);
1124 static void ib_cache_task(struct work_struct *_work)
1126 struct ib_update_work *work =
1127 container_of(_work, struct ib_update_work, work);
1129 ib_cache_update(work->device, work->port_num);
1133 static void ib_cache_event(struct ib_event_handler *handler,
1134 struct ib_event *event)
1136 struct ib_update_work *work;
1138 if (event->event == IB_EVENT_PORT_ERR ||
1139 event->event == IB_EVENT_PORT_ACTIVE ||
1140 event->event == IB_EVENT_LID_CHANGE ||
1141 event->event == IB_EVENT_PKEY_CHANGE ||
1142 event->event == IB_EVENT_SM_CHANGE ||
1143 event->event == IB_EVENT_CLIENT_REREGISTER ||
1144 event->event == IB_EVENT_GID_CHANGE) {
1145 work = kmalloc(sizeof *work, GFP_ATOMIC);
1147 INIT_WORK(&work->work, ib_cache_task);
1148 work->device = event->device;
1149 work->port_num = event->element.port_num;
1150 queue_work(ib_wq, &work->work);
1155 int ib_cache_setup_one(struct ib_device *device)
1160 rwlock_init(&device->cache.lock);
1162 device->cache.ports =
1163 kzalloc(sizeof(*device->cache.ports) *
1164 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1165 if (!device->cache.ports) {
1170 err = gid_table_setup_one(device);
1174 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1175 ib_cache_update(device, p + rdma_start_port(device));
1177 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1178 device, ib_cache_event);
1179 err = ib_register_event_handler(&device->cache.event_handler);
1186 gid_table_cleanup_one(device);
1191 void ib_cache_release_one(struct ib_device *device)
1196 * The release function frees all the cache elements.
1197 * This function should be called as part of freeing
1198 * all the device's resources when the cache could no
1199 * longer be accessed.
1201 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1202 kfree(device->cache.ports[p].pkey);
1204 gid_table_release_one(device);
1205 kfree(device->cache.ports);
1208 void ib_cache_cleanup_one(struct ib_device *device)
1210 /* The cleanup function unregisters the event handler,
1211 * waits for all in-progress workqueue elements and cleans
1212 * up the GID cache. This function should be called after
1213 * the device was removed from the devices list and all
1214 * clients were removed, so the cache exists but is
1215 * non-functional and shouldn't be updated anymore.
1217 ib_unregister_event_handler(&device->cache.event_handler);
1218 flush_workqueue(ib_wq);
1219 gid_table_cleanup_one(device);
1222 void __init ib_cache_setup(void)
1224 roce_gid_mgmt_init();
1227 void __exit ib_cache_cleanup(void)
1229 roce_gid_mgmt_cleanup();