]> Git Repo - linux.git/blob - drivers/infiniband/core/cache.c
drm/i915: Store a direct lookup from object handle to vma
[linux.git] / drivers / infiniband / core / cache.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42
43 #include <rdma/ib_cache.h>
44
45 #include "core_priv.h"
46
47 struct ib_pkey_cache {
48         int             table_len;
49         u16             table[0];
50 };
51
52 struct ib_update_work {
53         struct work_struct work;
54         struct ib_device  *device;
55         u8                 port_num;
56 };
57
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60
61 static const struct ib_gid_attr zattr;
62
63 enum gid_attr_find_mask {
64         GID_ATTR_FIND_MASK_GID          = 1UL << 0,
65         GID_ATTR_FIND_MASK_NETDEV       = 1UL << 1,
66         GID_ATTR_FIND_MASK_DEFAULT      = 1UL << 2,
67         GID_ATTR_FIND_MASK_GID_TYPE     = 1UL << 3,
68 };
69
70 enum gid_table_entry_props {
71         GID_TABLE_ENTRY_INVALID         = 1UL << 0,
72         GID_TABLE_ENTRY_DEFAULT         = 1UL << 1,
73 };
74
75 enum gid_table_write_action {
76         GID_TABLE_WRITE_ACTION_ADD,
77         GID_TABLE_WRITE_ACTION_DEL,
78         /* MODIFY only updates the GID table. Currently only used by
79          * ib_cache_update.
80          */
81         GID_TABLE_WRITE_ACTION_MODIFY
82 };
83
84 struct ib_gid_table_entry {
85         unsigned long       props;
86         union ib_gid        gid;
87         struct ib_gid_attr  attr;
88         void               *context;
89 };
90
91 struct ib_gid_table {
92         int                  sz;
93         /* In RoCE, adding a GID to the table requires:
94          * (a) Find if this GID is already exists.
95          * (b) Find a free space.
96          * (c) Write the new GID
97          *
98          * Delete requires different set of operations:
99          * (a) Find the GID
100          * (b) Delete it.
101          *
102          * Add/delete should be carried out atomically.
103          * This is done by locking this mutex from multiple
104          * writers. We don't need this lock for IB, as the MAD
105          * layer replaces all entries. All data_vec entries
106          * are locked by this lock.
107          **/
108         struct mutex         lock;
109         /* This lock protects the table entries from being
110          * read and written simultaneously.
111          */
112         rwlock_t             rwlock;
113         struct ib_gid_table_entry *data_vec;
114 };
115
116 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117 {
118         if (rdma_cap_roce_gid_table(ib_dev, port)) {
119                 struct ib_event event;
120
121                 event.device            = ib_dev;
122                 event.element.port_num  = port;
123                 event.event             = IB_EVENT_GID_CHANGE;
124
125                 ib_dispatch_event(&event);
126         }
127 }
128
129 static const char * const gid_type_str[] = {
130         [IB_GID_TYPE_IB]        = "IB/RoCE v1",
131         [IB_GID_TYPE_ROCE_UDP_ENCAP]    = "RoCE v2",
132 };
133
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135 {
136         if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137                 return gid_type_str[gid_type];
138
139         return "Invalid GID type";
140 }
141 EXPORT_SYMBOL(ib_cache_gid_type_str);
142
143 int ib_cache_gid_parse_type_str(const char *buf)
144 {
145         unsigned int i;
146         size_t len;
147         int err = -EINVAL;
148
149         len = strlen(buf);
150         if (len == 0)
151                 return -EINVAL;
152
153         if (buf[len - 1] == '\n')
154                 len--;
155
156         for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157                 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158                     len == strlen(gid_type_str[i])) {
159                         err = i;
160                         break;
161                 }
162
163         return err;
164 }
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166
167 /* This function expects that rwlock will be write locked in all
168  * scenarios and that lock will be locked in sleep-able (RoCE)
169  * scenarios.
170  */
171 static int write_gid(struct ib_device *ib_dev, u8 port,
172                      struct ib_gid_table *table, int ix,
173                      const union ib_gid *gid,
174                      const struct ib_gid_attr *attr,
175                      enum gid_table_write_action action,
176                      bool  default_gid)
177         __releases(&table->rwlock) __acquires(&table->rwlock)
178 {
179         int ret = 0;
180         struct net_device *old_net_dev;
181         enum ib_gid_type old_gid_type;
182
183         /* in rdma_cap_roce_gid_table, this funciton should be protected by a
184          * sleep-able lock.
185          */
186
187         if (rdma_cap_roce_gid_table(ib_dev, port)) {
188                 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
189                 write_unlock_irq(&table->rwlock);
190                 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
191                  * RoCE providers and thus only updates the cache.
192                  */
193                 if (action == GID_TABLE_WRITE_ACTION_ADD)
194                         ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
195                                               &table->data_vec[ix].context);
196                 else if (action == GID_TABLE_WRITE_ACTION_DEL)
197                         ret = ib_dev->del_gid(ib_dev, port, ix,
198                                               &table->data_vec[ix].context);
199                 write_lock_irq(&table->rwlock);
200         }
201
202         old_net_dev = table->data_vec[ix].attr.ndev;
203         old_gid_type = table->data_vec[ix].attr.gid_type;
204         if (old_net_dev && old_net_dev != attr->ndev)
205                 dev_put(old_net_dev);
206         /* if modify_gid failed, just delete the old gid */
207         if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
208                 gid = &zgid;
209                 attr = &zattr;
210                 table->data_vec[ix].context = NULL;
211         }
212
213         memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
214         memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
215         if (default_gid) {
216                 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
217                 if (action == GID_TABLE_WRITE_ACTION_DEL)
218                         table->data_vec[ix].attr.gid_type = old_gid_type;
219         }
220         if (table->data_vec[ix].attr.ndev &&
221             table->data_vec[ix].attr.ndev != old_net_dev)
222                 dev_hold(table->data_vec[ix].attr.ndev);
223
224         table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
225
226         return ret;
227 }
228
229 static int add_gid(struct ib_device *ib_dev, u8 port,
230                    struct ib_gid_table *table, int ix,
231                    const union ib_gid *gid,
232                    const struct ib_gid_attr *attr,
233                    bool  default_gid) {
234         return write_gid(ib_dev, port, table, ix, gid, attr,
235                          GID_TABLE_WRITE_ACTION_ADD, default_gid);
236 }
237
238 static int modify_gid(struct ib_device *ib_dev, u8 port,
239                       struct ib_gid_table *table, int ix,
240                       const union ib_gid *gid,
241                       const struct ib_gid_attr *attr,
242                       bool  default_gid) {
243         return write_gid(ib_dev, port, table, ix, gid, attr,
244                          GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
245 }
246
247 static int del_gid(struct ib_device *ib_dev, u8 port,
248                    struct ib_gid_table *table, int ix,
249                    bool  default_gid) {
250         return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
251                          GID_TABLE_WRITE_ACTION_DEL, default_gid);
252 }
253
254 /* rwlock should be read locked */
255 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
256                     const struct ib_gid_attr *val, bool default_gid,
257                     unsigned long mask, int *pempty)
258 {
259         int i = 0;
260         int found = -1;
261         int empty = pempty ? -1 : 0;
262
263         while (i < table->sz && (found < 0 || empty < 0)) {
264                 struct ib_gid_table_entry *data = &table->data_vec[i];
265                 struct ib_gid_attr *attr = &data->attr;
266                 int curr_index = i;
267
268                 i++;
269
270                 if (data->props & GID_TABLE_ENTRY_INVALID)
271                         continue;
272
273                 if (empty < 0)
274                         if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
275                             !memcmp(attr, &zattr, sizeof(*attr)) &&
276                             !data->props)
277                                 empty = curr_index;
278
279                 if (found >= 0)
280                         continue;
281
282                 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
283                     attr->gid_type != val->gid_type)
284                         continue;
285
286                 if (mask & GID_ATTR_FIND_MASK_GID &&
287                     memcmp(gid, &data->gid, sizeof(*gid)))
288                         continue;
289
290                 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
291                     attr->ndev != val->ndev)
292                         continue;
293
294                 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
295                     !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
296                     default_gid)
297                         continue;
298
299                 found = curr_index;
300         }
301
302         if (pempty)
303                 *pempty = empty;
304
305         return found;
306 }
307
308 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
309 {
310         gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
311         addrconf_ifid_eui48(&gid->raw[8], dev);
312 }
313
314 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
315                      union ib_gid *gid, struct ib_gid_attr *attr)
316 {
317         struct ib_gid_table *table;
318         int ix;
319         int ret = 0;
320         struct net_device *idev;
321         int empty;
322
323         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
324
325         if (!memcmp(gid, &zgid, sizeof(*gid)))
326                 return -EINVAL;
327
328         if (ib_dev->get_netdev) {
329                 idev = ib_dev->get_netdev(ib_dev, port);
330                 if (idev && attr->ndev != idev) {
331                         union ib_gid default_gid;
332
333                         /* Adding default GIDs in not permitted */
334                         make_default_gid(idev, &default_gid);
335                         if (!memcmp(gid, &default_gid, sizeof(*gid))) {
336                                 dev_put(idev);
337                                 return -EPERM;
338                         }
339                 }
340                 if (idev)
341                         dev_put(idev);
342         }
343
344         mutex_lock(&table->lock);
345         write_lock_irq(&table->rwlock);
346
347         ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
348                       GID_ATTR_FIND_MASK_GID_TYPE |
349                       GID_ATTR_FIND_MASK_NETDEV, &empty);
350         if (ix >= 0)
351                 goto out_unlock;
352
353         if (empty < 0) {
354                 ret = -ENOSPC;
355                 goto out_unlock;
356         }
357
358         ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
359         if (!ret)
360                 dispatch_gid_change_event(ib_dev, port);
361
362 out_unlock:
363         write_unlock_irq(&table->rwlock);
364         mutex_unlock(&table->lock);
365         return ret;
366 }
367
368 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
369                      union ib_gid *gid, struct ib_gid_attr *attr)
370 {
371         struct ib_gid_table *table;
372         int ix;
373
374         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
375
376         mutex_lock(&table->lock);
377         write_lock_irq(&table->rwlock);
378
379         ix = find_gid(table, gid, attr, false,
380                       GID_ATTR_FIND_MASK_GID      |
381                       GID_ATTR_FIND_MASK_GID_TYPE |
382                       GID_ATTR_FIND_MASK_NETDEV   |
383                       GID_ATTR_FIND_MASK_DEFAULT,
384                       NULL);
385         if (ix < 0)
386                 goto out_unlock;
387
388         if (!del_gid(ib_dev, port, table, ix, false))
389                 dispatch_gid_change_event(ib_dev, port);
390
391 out_unlock:
392         write_unlock_irq(&table->rwlock);
393         mutex_unlock(&table->lock);
394         return 0;
395 }
396
397 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
398                                      struct net_device *ndev)
399 {
400         struct ib_gid_table *table;
401         int ix;
402         bool deleted = false;
403
404         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
405
406         mutex_lock(&table->lock);
407         write_lock_irq(&table->rwlock);
408
409         for (ix = 0; ix < table->sz; ix++)
410                 if (table->data_vec[ix].attr.ndev == ndev)
411                         if (!del_gid(ib_dev, port, table, ix,
412                                      !!(table->data_vec[ix].props &
413                                         GID_TABLE_ENTRY_DEFAULT)))
414                                 deleted = true;
415
416         write_unlock_irq(&table->rwlock);
417         mutex_unlock(&table->lock);
418
419         if (deleted)
420                 dispatch_gid_change_event(ib_dev, port);
421
422         return 0;
423 }
424
425 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
426                               union ib_gid *gid, struct ib_gid_attr *attr)
427 {
428         struct ib_gid_table *table;
429
430         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
431
432         if (index < 0 || index >= table->sz)
433                 return -EINVAL;
434
435         if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
436                 return -EAGAIN;
437
438         memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
439         if (attr) {
440                 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
441                 if (attr->ndev)
442                         dev_hold(attr->ndev);
443         }
444
445         return 0;
446 }
447
448 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
449                                     const union ib_gid *gid,
450                                     const struct ib_gid_attr *val,
451                                     unsigned long mask,
452                                     u8 *port, u16 *index)
453 {
454         struct ib_gid_table *table;
455         u8 p;
456         int local_index;
457         unsigned long flags;
458
459         for (p = 0; p < ib_dev->phys_port_cnt; p++) {
460                 table = ib_dev->cache.ports[p].gid;
461                 read_lock_irqsave(&table->rwlock, flags);
462                 local_index = find_gid(table, gid, val, false, mask, NULL);
463                 if (local_index >= 0) {
464                         if (index)
465                                 *index = local_index;
466                         if (port)
467                                 *port = p + rdma_start_port(ib_dev);
468                         read_unlock_irqrestore(&table->rwlock, flags);
469                         return 0;
470                 }
471                 read_unlock_irqrestore(&table->rwlock, flags);
472         }
473
474         return -ENOENT;
475 }
476
477 static int ib_cache_gid_find(struct ib_device *ib_dev,
478                              const union ib_gid *gid,
479                              enum ib_gid_type gid_type,
480                              struct net_device *ndev, u8 *port,
481                              u16 *index)
482 {
483         unsigned long mask = GID_ATTR_FIND_MASK_GID |
484                              GID_ATTR_FIND_MASK_GID_TYPE;
485         struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
486
487         if (ndev)
488                 mask |= GID_ATTR_FIND_MASK_NETDEV;
489
490         return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
491                                         mask, port, index);
492 }
493
494 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
495                                const union ib_gid *gid,
496                                enum ib_gid_type gid_type,
497                                u8 port, struct net_device *ndev,
498                                u16 *index)
499 {
500         int local_index;
501         struct ib_gid_table *table;
502         unsigned long mask = GID_ATTR_FIND_MASK_GID |
503                              GID_ATTR_FIND_MASK_GID_TYPE;
504         struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
505         unsigned long flags;
506
507         if (!rdma_is_port_valid(ib_dev, port))
508                 return -ENOENT;
509
510         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
511
512         if (ndev)
513                 mask |= GID_ATTR_FIND_MASK_NETDEV;
514
515         read_lock_irqsave(&table->rwlock, flags);
516         local_index = find_gid(table, gid, &val, false, mask, NULL);
517         if (local_index >= 0) {
518                 if (index)
519                         *index = local_index;
520                 read_unlock_irqrestore(&table->rwlock, flags);
521                 return 0;
522         }
523
524         read_unlock_irqrestore(&table->rwlock, flags);
525         return -ENOENT;
526 }
527 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
528
529 /**
530  * ib_find_gid_by_filter - Returns the GID table index where a specified
531  * GID value occurs
532  * @device: The device to query.
533  * @gid: The GID value to search for.
534  * @port_num: The port number of the device where the GID value could be
535  *   searched.
536  * @filter: The filter function is executed on any matching GID in the table.
537  *   If the filter function returns true, the corresponding index is returned,
538  *   otherwise, we continue searching the GID table. It's guaranteed that
539  *   while filter is executed, ndev field is valid and the structure won't
540  *   change. filter is executed in an atomic context. filter must not be NULL.
541  * @index: The index into the cached GID table where the GID was found.  This
542  *   parameter may be NULL.
543  *
544  * ib_cache_gid_find_by_filter() searches for the specified GID value
545  * of which the filter function returns true in the port's GID table.
546  * This function is only supported on RoCE ports.
547  *
548  */
549 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
550                                        const union ib_gid *gid,
551                                        u8 port,
552                                        bool (*filter)(const union ib_gid *,
553                                                       const struct ib_gid_attr *,
554                                                       void *),
555                                        void *context,
556                                        u16 *index)
557 {
558         struct ib_gid_table *table;
559         unsigned int i;
560         unsigned long flags;
561         bool found = false;
562
563
564         if (!rdma_is_port_valid(ib_dev, port) ||
565             !rdma_protocol_roce(ib_dev, port))
566                 return -EPROTONOSUPPORT;
567
568         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
569
570         read_lock_irqsave(&table->rwlock, flags);
571         for (i = 0; i < table->sz; i++) {
572                 struct ib_gid_attr attr;
573
574                 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
575                         goto next;
576
577                 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
578                         goto next;
579
580                 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
581
582                 if (filter(gid, &attr, context))
583                         found = true;
584
585 next:
586                 if (found)
587                         break;
588         }
589         read_unlock_irqrestore(&table->rwlock, flags);
590
591         if (!found)
592                 return -ENOENT;
593
594         if (index)
595                 *index = i;
596         return 0;
597 }
598
599 static struct ib_gid_table *alloc_gid_table(int sz)
600 {
601         struct ib_gid_table *table =
602                 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
603
604         if (!table)
605                 return NULL;
606
607         table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
608         if (!table->data_vec)
609                 goto err_free_table;
610
611         mutex_init(&table->lock);
612
613         table->sz = sz;
614         rwlock_init(&table->rwlock);
615
616         return table;
617
618 err_free_table:
619         kfree(table);
620         return NULL;
621 }
622
623 static void release_gid_table(struct ib_gid_table *table)
624 {
625         if (table) {
626                 kfree(table->data_vec);
627                 kfree(table);
628         }
629 }
630
631 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
632                                    struct ib_gid_table *table)
633 {
634         int i;
635         bool deleted = false;
636
637         if (!table)
638                 return;
639
640         write_lock_irq(&table->rwlock);
641         for (i = 0; i < table->sz; ++i) {
642                 if (memcmp(&table->data_vec[i].gid, &zgid,
643                            sizeof(table->data_vec[i].gid)))
644                         if (!del_gid(ib_dev, port, table, i,
645                                      table->data_vec[i].props &
646                                      GID_ATTR_FIND_MASK_DEFAULT))
647                                 deleted = true;
648         }
649         write_unlock_irq(&table->rwlock);
650
651         if (deleted)
652                 dispatch_gid_change_event(ib_dev, port);
653 }
654
655 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
656                                   struct net_device *ndev,
657                                   unsigned long gid_type_mask,
658                                   enum ib_cache_gid_default_mode mode)
659 {
660         union ib_gid gid;
661         struct ib_gid_attr gid_attr;
662         struct ib_gid_attr zattr_type = zattr;
663         struct ib_gid_table *table;
664         unsigned int gid_type;
665
666         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
667
668         make_default_gid(ndev, &gid);
669         memset(&gid_attr, 0, sizeof(gid_attr));
670         gid_attr.ndev = ndev;
671
672         for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
673                 int ix;
674                 union ib_gid current_gid;
675                 struct ib_gid_attr current_gid_attr = {};
676
677                 if (1UL << gid_type & ~gid_type_mask)
678                         continue;
679
680                 gid_attr.gid_type = gid_type;
681
682                 mutex_lock(&table->lock);
683                 write_lock_irq(&table->rwlock);
684                 ix = find_gid(table, NULL, &gid_attr, true,
685                               GID_ATTR_FIND_MASK_GID_TYPE |
686                               GID_ATTR_FIND_MASK_DEFAULT,
687                               NULL);
688
689                 /* Coudn't find default GID location */
690                 if (WARN_ON(ix < 0))
691                         goto release;
692
693                 zattr_type.gid_type = gid_type;
694
695                 if (!__ib_cache_gid_get(ib_dev, port, ix,
696                                         &current_gid, &current_gid_attr) &&
697                     mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
698                     !memcmp(&gid, &current_gid, sizeof(gid)) &&
699                     !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
700                         goto release;
701
702                 if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
703                     memcmp(&current_gid_attr, &zattr_type,
704                            sizeof(current_gid_attr))) {
705                         if (del_gid(ib_dev, port, table, ix, true)) {
706                                 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
707                                         ix, gid.raw);
708                                 goto release;
709                         } else {
710                                 dispatch_gid_change_event(ib_dev, port);
711                         }
712                 }
713
714                 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
715                         if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
716                                 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
717                                         gid.raw);
718                         else
719                                 dispatch_gid_change_event(ib_dev, port);
720                 }
721
722 release:
723                 if (current_gid_attr.ndev)
724                         dev_put(current_gid_attr.ndev);
725                 write_unlock_irq(&table->rwlock);
726                 mutex_unlock(&table->lock);
727         }
728 }
729
730 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
731                                      struct ib_gid_table *table)
732 {
733         unsigned int i;
734         unsigned long roce_gid_type_mask;
735         unsigned int num_default_gids;
736         unsigned int current_gid = 0;
737
738         roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
739         num_default_gids = hweight_long(roce_gid_type_mask);
740         for (i = 0; i < num_default_gids && i < table->sz; i++) {
741                 struct ib_gid_table_entry *entry =
742                         &table->data_vec[i];
743
744                 entry->props |= GID_TABLE_ENTRY_DEFAULT;
745                 current_gid = find_next_bit(&roce_gid_type_mask,
746                                             BITS_PER_LONG,
747                                             current_gid);
748                 entry->attr.gid_type = current_gid++;
749         }
750
751         return 0;
752 }
753
754 static int _gid_table_setup_one(struct ib_device *ib_dev)
755 {
756         u8 port;
757         struct ib_gid_table *table;
758         int err = 0;
759
760         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
761                 u8 rdma_port = port + rdma_start_port(ib_dev);
762
763                 table =
764                         alloc_gid_table(
765                                 ib_dev->port_immutable[rdma_port].gid_tbl_len);
766                 if (!table) {
767                         err = -ENOMEM;
768                         goto rollback_table_setup;
769                 }
770
771                 err = gid_table_reserve_default(ib_dev,
772                                                 port + rdma_start_port(ib_dev),
773                                                 table);
774                 if (err)
775                         goto rollback_table_setup;
776                 ib_dev->cache.ports[port].gid = table;
777         }
778
779         return 0;
780
781 rollback_table_setup:
782         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
783                 table = ib_dev->cache.ports[port].gid;
784
785                 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
786                                        table);
787                 release_gid_table(table);
788         }
789
790         return err;
791 }
792
793 static void gid_table_release_one(struct ib_device *ib_dev)
794 {
795         struct ib_gid_table *table;
796         u8 port;
797
798         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
799                 table = ib_dev->cache.ports[port].gid;
800                 release_gid_table(table);
801                 ib_dev->cache.ports[port].gid = NULL;
802         }
803 }
804
805 static void gid_table_cleanup_one(struct ib_device *ib_dev)
806 {
807         struct ib_gid_table *table;
808         u8 port;
809
810         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
811                 table = ib_dev->cache.ports[port].gid;
812                 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
813                                        table);
814         }
815 }
816
817 static int gid_table_setup_one(struct ib_device *ib_dev)
818 {
819         int err;
820
821         err = _gid_table_setup_one(ib_dev);
822
823         if (err)
824                 return err;
825
826         err = roce_rescan_device(ib_dev);
827
828         if (err) {
829                 gid_table_cleanup_one(ib_dev);
830                 gid_table_release_one(ib_dev);
831         }
832
833         return err;
834 }
835
836 int ib_get_cached_gid(struct ib_device *device,
837                       u8                port_num,
838                       int               index,
839                       union ib_gid     *gid,
840                       struct ib_gid_attr *gid_attr)
841 {
842         int res;
843         unsigned long flags;
844         struct ib_gid_table *table;
845
846         if (!rdma_is_port_valid(device, port_num))
847                 return -EINVAL;
848
849         table = device->cache.ports[port_num - rdma_start_port(device)].gid;
850         read_lock_irqsave(&table->rwlock, flags);
851         res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
852         read_unlock_irqrestore(&table->rwlock, flags);
853
854         return res;
855 }
856 EXPORT_SYMBOL(ib_get_cached_gid);
857
858 int ib_find_cached_gid(struct ib_device *device,
859                        const union ib_gid *gid,
860                        enum ib_gid_type gid_type,
861                        struct net_device *ndev,
862                        u8               *port_num,
863                        u16              *index)
864 {
865         return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
866 }
867 EXPORT_SYMBOL(ib_find_cached_gid);
868
869 int ib_find_gid_by_filter(struct ib_device *device,
870                           const union ib_gid *gid,
871                           u8 port_num,
872                           bool (*filter)(const union ib_gid *gid,
873                                          const struct ib_gid_attr *,
874                                          void *),
875                           void *context, u16 *index)
876 {
877         /* Only RoCE GID table supports filter function */
878         if (!rdma_cap_roce_gid_table(device, port_num) && filter)
879                 return -EPROTONOSUPPORT;
880
881         return ib_cache_gid_find_by_filter(device, gid,
882                                            port_num, filter,
883                                            context, index);
884 }
885 EXPORT_SYMBOL(ib_find_gid_by_filter);
886
887 int ib_get_cached_pkey(struct ib_device *device,
888                        u8                port_num,
889                        int               index,
890                        u16              *pkey)
891 {
892         struct ib_pkey_cache *cache;
893         unsigned long flags;
894         int ret = 0;
895
896         if (!rdma_is_port_valid(device, port_num))
897                 return -EINVAL;
898
899         read_lock_irqsave(&device->cache.lock, flags);
900
901         cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
902
903         if (index < 0 || index >= cache->table_len)
904                 ret = -EINVAL;
905         else
906                 *pkey = cache->table[index];
907
908         read_unlock_irqrestore(&device->cache.lock, flags);
909
910         return ret;
911 }
912 EXPORT_SYMBOL(ib_get_cached_pkey);
913
914 int ib_find_cached_pkey(struct ib_device *device,
915                         u8                port_num,
916                         u16               pkey,
917                         u16              *index)
918 {
919         struct ib_pkey_cache *cache;
920         unsigned long flags;
921         int i;
922         int ret = -ENOENT;
923         int partial_ix = -1;
924
925         if (!rdma_is_port_valid(device, port_num))
926                 return -EINVAL;
927
928         read_lock_irqsave(&device->cache.lock, flags);
929
930         cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
931
932         *index = -1;
933
934         for (i = 0; i < cache->table_len; ++i)
935                 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
936                         if (cache->table[i] & 0x8000) {
937                                 *index = i;
938                                 ret = 0;
939                                 break;
940                         } else
941                                 partial_ix = i;
942                 }
943
944         if (ret && partial_ix >= 0) {
945                 *index = partial_ix;
946                 ret = 0;
947         }
948
949         read_unlock_irqrestore(&device->cache.lock, flags);
950
951         return ret;
952 }
953 EXPORT_SYMBOL(ib_find_cached_pkey);
954
955 int ib_find_exact_cached_pkey(struct ib_device *device,
956                               u8                port_num,
957                               u16               pkey,
958                               u16              *index)
959 {
960         struct ib_pkey_cache *cache;
961         unsigned long flags;
962         int i;
963         int ret = -ENOENT;
964
965         if (!rdma_is_port_valid(device, port_num))
966                 return -EINVAL;
967
968         read_lock_irqsave(&device->cache.lock, flags);
969
970         cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
971
972         *index = -1;
973
974         for (i = 0; i < cache->table_len; ++i)
975                 if (cache->table[i] == pkey) {
976                         *index = i;
977                         ret = 0;
978                         break;
979                 }
980
981         read_unlock_irqrestore(&device->cache.lock, flags);
982
983         return ret;
984 }
985 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
986
987 int ib_get_cached_lmc(struct ib_device *device,
988                       u8                port_num,
989                       u8                *lmc)
990 {
991         unsigned long flags;
992         int ret = 0;
993
994         if (!rdma_is_port_valid(device, port_num))
995                 return -EINVAL;
996
997         read_lock_irqsave(&device->cache.lock, flags);
998         *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
999         read_unlock_irqrestore(&device->cache.lock, flags);
1000
1001         return ret;
1002 }
1003 EXPORT_SYMBOL(ib_get_cached_lmc);
1004
1005 int ib_get_cached_port_state(struct ib_device   *device,
1006                              u8                  port_num,
1007                              enum ib_port_state *port_state)
1008 {
1009         unsigned long flags;
1010         int ret = 0;
1011
1012         if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1013                 return -EINVAL;
1014
1015         read_lock_irqsave(&device->cache.lock, flags);
1016         *port_state = device->cache.ports[port_num
1017                 - rdma_start_port(device)].port_state;
1018         read_unlock_irqrestore(&device->cache.lock, flags);
1019
1020         return ret;
1021 }
1022 EXPORT_SYMBOL(ib_get_cached_port_state);
1023
1024 static void ib_cache_update(struct ib_device *device,
1025                             u8                port)
1026 {
1027         struct ib_port_attr       *tprops = NULL;
1028         struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1029         struct ib_gid_cache {
1030                 int             table_len;
1031                 union ib_gid    table[0];
1032         }                         *gid_cache = NULL;
1033         int                        i;
1034         int                        ret;
1035         struct ib_gid_table       *table;
1036         bool                       use_roce_gid_table =
1037                                         rdma_cap_roce_gid_table(device, port);
1038
1039         if (!rdma_is_port_valid(device, port))
1040                 return;
1041
1042         table = device->cache.ports[port - rdma_start_port(device)].gid;
1043
1044         tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1045         if (!tprops)
1046                 return;
1047
1048         ret = ib_query_port(device, port, tprops);
1049         if (ret) {
1050                 pr_warn("ib_query_port failed (%d) for %s\n",
1051                         ret, device->name);
1052                 goto err;
1053         }
1054
1055         pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1056                              sizeof *pkey_cache->table, GFP_KERNEL);
1057         if (!pkey_cache)
1058                 goto err;
1059
1060         pkey_cache->table_len = tprops->pkey_tbl_len;
1061
1062         if (!use_roce_gid_table) {
1063                 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1064                             sizeof(*gid_cache->table), GFP_KERNEL);
1065                 if (!gid_cache)
1066                         goto err;
1067
1068                 gid_cache->table_len = tprops->gid_tbl_len;
1069         }
1070
1071         for (i = 0; i < pkey_cache->table_len; ++i) {
1072                 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1073                 if (ret) {
1074                         pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1075                                 ret, device->name, i);
1076                         goto err;
1077                 }
1078         }
1079
1080         if (!use_roce_gid_table) {
1081                 for (i = 0;  i < gid_cache->table_len; ++i) {
1082                         ret = ib_query_gid(device, port, i,
1083                                            gid_cache->table + i, NULL);
1084                         if (ret) {
1085                                 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1086                                         ret, device->name, i);
1087                                 goto err;
1088                         }
1089                 }
1090         }
1091
1092         write_lock_irq(&device->cache.lock);
1093
1094         old_pkey_cache = device->cache.ports[port -
1095                 rdma_start_port(device)].pkey;
1096
1097         device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1098         if (!use_roce_gid_table) {
1099                 write_lock(&table->rwlock);
1100                 for (i = 0; i < gid_cache->table_len; i++) {
1101                         modify_gid(device, port, table, i, gid_cache->table + i,
1102                                    &zattr, false);
1103                 }
1104                 write_unlock(&table->rwlock);
1105         }
1106
1107         device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1108         device->cache.ports[port - rdma_start_port(device)].port_state =
1109                 tprops->state;
1110
1111         write_unlock_irq(&device->cache.lock);
1112
1113         kfree(gid_cache);
1114         kfree(old_pkey_cache);
1115         kfree(tprops);
1116         return;
1117
1118 err:
1119         kfree(pkey_cache);
1120         kfree(gid_cache);
1121         kfree(tprops);
1122 }
1123
1124 static void ib_cache_task(struct work_struct *_work)
1125 {
1126         struct ib_update_work *work =
1127                 container_of(_work, struct ib_update_work, work);
1128
1129         ib_cache_update(work->device, work->port_num);
1130         kfree(work);
1131 }
1132
1133 static void ib_cache_event(struct ib_event_handler *handler,
1134                            struct ib_event *event)
1135 {
1136         struct ib_update_work *work;
1137
1138         if (event->event == IB_EVENT_PORT_ERR    ||
1139             event->event == IB_EVENT_PORT_ACTIVE ||
1140             event->event == IB_EVENT_LID_CHANGE  ||
1141             event->event == IB_EVENT_PKEY_CHANGE ||
1142             event->event == IB_EVENT_SM_CHANGE   ||
1143             event->event == IB_EVENT_CLIENT_REREGISTER ||
1144             event->event == IB_EVENT_GID_CHANGE) {
1145                 work = kmalloc(sizeof *work, GFP_ATOMIC);
1146                 if (work) {
1147                         INIT_WORK(&work->work, ib_cache_task);
1148                         work->device   = event->device;
1149                         work->port_num = event->element.port_num;
1150                         queue_work(ib_wq, &work->work);
1151                 }
1152         }
1153 }
1154
1155 int ib_cache_setup_one(struct ib_device *device)
1156 {
1157         int p;
1158         int err;
1159
1160         rwlock_init(&device->cache.lock);
1161
1162         device->cache.ports =
1163                 kzalloc(sizeof(*device->cache.ports) *
1164                         (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1165         if (!device->cache.ports) {
1166                 err = -ENOMEM;
1167                 goto out;
1168         }
1169
1170         err = gid_table_setup_one(device);
1171         if (err)
1172                 goto out;
1173
1174         for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1175                 ib_cache_update(device, p + rdma_start_port(device));
1176
1177         INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1178                               device, ib_cache_event);
1179         err = ib_register_event_handler(&device->cache.event_handler);
1180         if (err)
1181                 goto err;
1182
1183         return 0;
1184
1185 err:
1186         gid_table_cleanup_one(device);
1187 out:
1188         return err;
1189 }
1190
1191 void ib_cache_release_one(struct ib_device *device)
1192 {
1193         int p;
1194
1195         /*
1196          * The release function frees all the cache elements.
1197          * This function should be called as part of freeing
1198          * all the device's resources when the cache could no
1199          * longer be accessed.
1200          */
1201         for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1202                 kfree(device->cache.ports[p].pkey);
1203
1204         gid_table_release_one(device);
1205         kfree(device->cache.ports);
1206 }
1207
1208 void ib_cache_cleanup_one(struct ib_device *device)
1209 {
1210         /* The cleanup function unregisters the event handler,
1211          * waits for all in-progress workqueue elements and cleans
1212          * up the GID cache. This function should be called after
1213          * the device was removed from the devices list and all
1214          * clients were removed, so the cache exists but is
1215          * non-functional and shouldn't be updated anymore.
1216          */
1217         ib_unregister_event_handler(&device->cache.event_handler);
1218         flush_workqueue(ib_wq);
1219         gid_table_cleanup_one(device);
1220 }
1221
1222 void __init ib_cache_setup(void)
1223 {
1224         roce_gid_mgmt_init();
1225 }
1226
1227 void __exit ib_cache_cleanup(void)
1228 {
1229         roce_gid_mgmt_cleanup();
1230 }
This page took 0.101322 seconds and 4 git commands to generate.