]> Git Repo - linux.git/blob - drivers/infiniband/hw/hns/hns_roce_main.c
enetc: Migrate to PHYLINK and PCS_LYNX
[linux.git] / drivers / infiniband / hw / hns / hns_roce_main.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <linux/module.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include <rdma/hns-abi.h>
43 #include "hns_roce_hem.h"
44
45 /**
46  * hns_get_gid_index - Get gid index.
47  * @hr_dev: pointer to structure hns_roce_dev.
48  * @port:  port, value range: 0 ~ MAX
49  * @gid_index:  gid_index, value range: 0 ~ MAX
50  * Description:
51  *    N ports shared gids, allocation method as follow:
52  *              GID[0][0], GID[1][0],.....GID[N - 1][0],
53  *              GID[0][0], GID[1][0],.....GID[N - 1][0],
54  *              And so on
55  */
56 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
57 {
58         return gid_index * hr_dev->caps.num_ports + port;
59 }
60
61 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
62 {
63         u8 phy_port;
64         u32 i = 0;
65
66         if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
67                 return 0;
68
69         for (i = 0; i < ETH_ALEN; i++)
70                 hr_dev->dev_addr[port][i] = addr[i];
71
72         phy_port = hr_dev->iboe.phy_port[port];
73         return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
74 }
75
76 static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
77 {
78         struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
79         u8 port = attr->port_num - 1;
80         int ret;
81
82         if (port >= hr_dev->caps.num_ports)
83                 return -EINVAL;
84
85         ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
86
87         return ret;
88 }
89
90 static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
91 {
92         struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
93         struct ib_gid_attr zattr = {};
94         u8 port = attr->port_num - 1;
95         int ret;
96
97         if (port >= hr_dev->caps.num_ports)
98                 return -EINVAL;
99
100         ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
101
102         return ret;
103 }
104
105 static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
106                            unsigned long event)
107 {
108         struct device *dev = hr_dev->dev;
109         struct net_device *netdev;
110         int ret = 0;
111
112         netdev = hr_dev->iboe.netdevs[port];
113         if (!netdev) {
114                 dev_err(dev, "Can't find netdev on port(%u)!\n", port);
115                 return -ENODEV;
116         }
117
118         switch (event) {
119         case NETDEV_UP:
120         case NETDEV_CHANGE:
121         case NETDEV_REGISTER:
122         case NETDEV_CHANGEADDR:
123                 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
124                 break;
125         case NETDEV_DOWN:
126                 /*
127                  * In v1 engine, only support all ports closed together.
128                  */
129                 break;
130         default:
131                 dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
132                 break;
133         }
134
135         return ret;
136 }
137
138 static int hns_roce_netdev_event(struct notifier_block *self,
139                                  unsigned long event, void *ptr)
140 {
141         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
142         struct hns_roce_ib_iboe *iboe = NULL;
143         struct hns_roce_dev *hr_dev = NULL;
144         u8 port = 0;
145         int ret = 0;
146
147         hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
148         iboe = &hr_dev->iboe;
149
150         for (port = 0; port < hr_dev->caps.num_ports; port++) {
151                 if (dev == iboe->netdevs[port]) {
152                         ret = handle_en_event(hr_dev, port, event);
153                         if (ret)
154                                 return NOTIFY_DONE;
155                         break;
156                 }
157         }
158
159         return NOTIFY_DONE;
160 }
161
162 static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
163 {
164         int ret;
165         u8 i;
166
167         for (i = 0; i < hr_dev->caps.num_ports; i++) {
168                 if (hr_dev->hw->set_mtu)
169                         hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
170                                             hr_dev->caps.max_mtu);
171                 ret = hns_roce_set_mac(hr_dev, i,
172                                        hr_dev->iboe.netdevs[i]->dev_addr);
173                 if (ret)
174                         return ret;
175         }
176
177         return 0;
178 }
179
180 static int hns_roce_query_device(struct ib_device *ib_dev,
181                                  struct ib_device_attr *props,
182                                  struct ib_udata *uhw)
183 {
184         struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
185
186         memset(props, 0, sizeof(*props));
187
188         props->fw_ver = hr_dev->caps.fw_ver;
189         props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
190         props->max_mr_size = (u64)(~(0ULL));
191         props->page_size_cap = hr_dev->caps.page_size_cap;
192         props->vendor_id = hr_dev->vendor_id;
193         props->vendor_part_id = hr_dev->vendor_part_id;
194         props->hw_ver = hr_dev->hw_rev;
195         props->max_qp = hr_dev->caps.num_qps;
196         props->max_qp_wr = hr_dev->caps.max_wqes;
197         props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
198                                   IB_DEVICE_RC_RNR_NAK_GEN;
199         props->max_send_sge = hr_dev->caps.max_sq_sg;
200         props->max_recv_sge = hr_dev->caps.max_rq_sg;
201         props->max_sge_rd = 1;
202         props->max_cq = hr_dev->caps.num_cqs;
203         props->max_cqe = hr_dev->caps.max_cqes;
204         props->max_mr = hr_dev->caps.num_mtpts;
205         props->max_pd = hr_dev->caps.num_pds;
206         props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
207         props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
208         props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
209                             IB_ATOMIC_HCA : IB_ATOMIC_NONE;
210         props->max_pkeys = 1;
211         props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
212         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
213                 props->max_srq = hr_dev->caps.num_srqs;
214                 props->max_srq_wr = hr_dev->caps.max_srq_wrs;
215                 props->max_srq_sge = hr_dev->caps.max_srq_sges;
216         }
217
218         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
219                 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
220                 props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
221         }
222
223         return 0;
224 }
225
226 static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
227                                struct ib_port_attr *props)
228 {
229         struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
230         struct device *dev = hr_dev->dev;
231         struct net_device *net_dev;
232         unsigned long flags;
233         enum ib_mtu mtu;
234         u8 port;
235
236         port = port_num - 1;
237
238         /* props being zeroed by the caller, avoid zeroing it here */
239
240         props->max_mtu = hr_dev->caps.max_mtu;
241         props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
242         props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
243                                 IB_PORT_VENDOR_CLASS_SUP |
244                                 IB_PORT_BOOT_MGMT_SUP;
245         props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
246         props->pkey_tbl_len = 1;
247         props->active_width = IB_WIDTH_4X;
248         props->active_speed = 1;
249
250         spin_lock_irqsave(&hr_dev->iboe.lock, flags);
251
252         net_dev = hr_dev->iboe.netdevs[port];
253         if (!net_dev) {
254                 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
255                 dev_err(dev, "Find netdev %u failed!\n", port);
256                 return -EINVAL;
257         }
258
259         mtu = iboe_get_mtu(net_dev->mtu);
260         props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
261         props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
262                                IB_PORT_ACTIVE :
263                                IB_PORT_DOWN;
264         props->phys_state = props->state == IB_PORT_ACTIVE ?
265                                     IB_PORT_PHYS_STATE_LINK_UP :
266                                     IB_PORT_PHYS_STATE_DISABLED;
267
268         spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
269
270         return 0;
271 }
272
273 static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
274                                                     u8 port_num)
275 {
276         return IB_LINK_LAYER_ETHERNET;
277 }
278
279 static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
280                                u16 *pkey)
281 {
282         *pkey = PKEY_ID;
283
284         return 0;
285 }
286
287 static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
288                                   struct ib_device_modify *props)
289 {
290         unsigned long flags;
291
292         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
293                 return -EOPNOTSUPP;
294
295         if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
296                 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
297                 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
298                 spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
299         }
300
301         return 0;
302 }
303
304 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
305                                    struct ib_udata *udata)
306 {
307         int ret;
308         struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
309         struct hns_roce_ib_alloc_ucontext_resp resp = {};
310         struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
311
312         if (!hr_dev->active)
313                 return -EAGAIN;
314
315         resp.qp_tab_size = hr_dev->caps.num_qps;
316
317         ret = hns_roce_uar_alloc(hr_dev, &context->uar);
318         if (ret)
319                 goto error_fail_uar_alloc;
320
321         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
322                 INIT_LIST_HEAD(&context->page_list);
323                 mutex_init(&context->page_mutex);
324         }
325
326         ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
327         if (ret)
328                 goto error_fail_copy_to_udata;
329
330         return 0;
331
332 error_fail_copy_to_udata:
333         hns_roce_uar_free(hr_dev, &context->uar);
334
335 error_fail_uar_alloc:
336         return ret;
337 }
338
339 static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
340 {
341         struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
342
343         hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
344 }
345
346 static int hns_roce_mmap(struct ib_ucontext *context,
347                          struct vm_area_struct *vma)
348 {
349         struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
350
351         switch (vma->vm_pgoff) {
352         case 0:
353                 return rdma_user_mmap_io(context, vma,
354                                          to_hr_ucontext(context)->uar.pfn,
355                                          PAGE_SIZE,
356                                          pgprot_noncached(vma->vm_page_prot),
357                                          NULL);
358
359         /* vm_pgoff: 1 -- TPTR */
360         case 1:
361                 if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
362                         return -EINVAL;
363                 /*
364                  * FIXME: using io_remap_pfn_range on the dma address returned
365                  * by dma_alloc_coherent is totally wrong.
366                  */
367                 return rdma_user_mmap_io(context, vma,
368                                          hr_dev->tptr_dma_addr >> PAGE_SHIFT,
369                                          hr_dev->tptr_size,
370                                          vma->vm_page_prot,
371                                          NULL);
372
373         default:
374                 return -EINVAL;
375         }
376 }
377
378 static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
379                                    struct ib_port_immutable *immutable)
380 {
381         struct ib_port_attr attr;
382         int ret;
383
384         ret = ib_query_port(ib_dev, port_num, &attr);
385         if (ret)
386                 return ret;
387
388         immutable->pkey_tbl_len = attr.pkey_tbl_len;
389         immutable->gid_tbl_len = attr.gid_tbl_len;
390
391         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
392         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
393         if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
394                 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
395
396         return 0;
397 }
398
399 static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
400 {
401 }
402
403 static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
404 {
405         struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
406
407         hr_dev->active = false;
408         unregister_netdevice_notifier(&iboe->nb);
409         ib_unregister_device(&hr_dev->ib_dev);
410 }
411
412 static const struct ib_device_ops hns_roce_dev_ops = {
413         .owner = THIS_MODULE,
414         .driver_id = RDMA_DRIVER_HNS,
415         .uverbs_abi_ver = 1,
416         .uverbs_no_driver_id_binding = 1,
417
418         .add_gid = hns_roce_add_gid,
419         .alloc_pd = hns_roce_alloc_pd,
420         .alloc_ucontext = hns_roce_alloc_ucontext,
421         .create_ah = hns_roce_create_ah,
422         .create_cq = hns_roce_create_cq,
423         .create_qp = hns_roce_create_qp,
424         .dealloc_pd = hns_roce_dealloc_pd,
425         .dealloc_ucontext = hns_roce_dealloc_ucontext,
426         .del_gid = hns_roce_del_gid,
427         .dereg_mr = hns_roce_dereg_mr,
428         .destroy_ah = hns_roce_destroy_ah,
429         .destroy_cq = hns_roce_destroy_cq,
430         .disassociate_ucontext = hns_roce_disassociate_ucontext,
431         .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
432         .get_dma_mr = hns_roce_get_dma_mr,
433         .get_link_layer = hns_roce_get_link_layer,
434         .get_port_immutable = hns_roce_port_immutable,
435         .mmap = hns_roce_mmap,
436         .modify_device = hns_roce_modify_device,
437         .modify_qp = hns_roce_modify_qp,
438         .query_ah = hns_roce_query_ah,
439         .query_device = hns_roce_query_device,
440         .query_pkey = hns_roce_query_pkey,
441         .query_port = hns_roce_query_port,
442         .reg_user_mr = hns_roce_reg_user_mr,
443
444         INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
445         INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
446         INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
447         INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
448 };
449
450 static const struct ib_device_ops hns_roce_dev_mr_ops = {
451         .rereg_user_mr = hns_roce_rereg_user_mr,
452 };
453
454 static const struct ib_device_ops hns_roce_dev_mw_ops = {
455         .alloc_mw = hns_roce_alloc_mw,
456         .dealloc_mw = hns_roce_dealloc_mw,
457 };
458
459 static const struct ib_device_ops hns_roce_dev_frmr_ops = {
460         .alloc_mr = hns_roce_alloc_mr,
461         .map_mr_sg = hns_roce_map_mr_sg,
462 };
463
464 static const struct ib_device_ops hns_roce_dev_srq_ops = {
465         .create_srq = hns_roce_create_srq,
466         .destroy_srq = hns_roce_destroy_srq,
467
468         INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
469 };
470
471 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
472 {
473         int ret;
474         struct hns_roce_ib_iboe *iboe = NULL;
475         struct ib_device *ib_dev = NULL;
476         struct device *dev = hr_dev->dev;
477         unsigned int i;
478
479         iboe = &hr_dev->iboe;
480         spin_lock_init(&iboe->lock);
481
482         ib_dev = &hr_dev->ib_dev;
483
484         ib_dev->node_type = RDMA_NODE_IB_CA;
485         ib_dev->dev.parent = dev;
486
487         ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
488         ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
489         ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
490         ib_dev->uverbs_cmd_mask =
491                 (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
492                 (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
493                 (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
494                 (1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
495                 (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
496                 (1ULL << IB_USER_VERBS_CMD_REG_MR) |
497                 (1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
498                 (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
499                 (1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
500                 (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
501                 (1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
502                 (1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
503                 (1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
504                 (1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
505
506         ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
507
508         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
509                 ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
510                 ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
511         }
512
513         /* MW */
514         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
515                 ib_dev->uverbs_cmd_mask |=
516                                         (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
517                                         (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
518                 ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
519         }
520
521         /* FRMR */
522         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
523                 ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
524
525         /* SRQ */
526         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
527                 ib_dev->uverbs_cmd_mask |=
528                                 (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
529                                 (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
530                                 (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
531                                 (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
532                                 (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV);
533                 ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
534                 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
535         }
536
537         ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
538         ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
539         for (i = 0; i < hr_dev->caps.num_ports; i++) {
540                 if (!hr_dev->iboe.netdevs[i])
541                         continue;
542
543                 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
544                                            i + 1);
545                 if (ret)
546                         return ret;
547         }
548         ret = ib_register_device(ib_dev, "hns_%d");
549         if (ret) {
550                 dev_err(dev, "ib_register_device failed!\n");
551                 return ret;
552         }
553
554         ret = hns_roce_setup_mtu_mac(hr_dev);
555         if (ret) {
556                 dev_err(dev, "setup_mtu_mac failed!\n");
557                 goto error_failed_setup_mtu_mac;
558         }
559
560         iboe->nb.notifier_call = hns_roce_netdev_event;
561         ret = register_netdevice_notifier(&iboe->nb);
562         if (ret) {
563                 dev_err(dev, "register_netdevice_notifier failed!\n");
564                 goto error_failed_setup_mtu_mac;
565         }
566
567         hr_dev->active = true;
568         return 0;
569
570 error_failed_setup_mtu_mac:
571         ib_unregister_device(ib_dev);
572
573         return ret;
574 }
575
576 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
577 {
578         int ret;
579         struct device *dev = hr_dev->dev;
580
581         ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
582                                       HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
583                                       hr_dev->caps.num_mtpts, 1);
584         if (ret) {
585                 dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
586                 return ret;
587         }
588
589         ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
590                                       HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
591                                       hr_dev->caps.num_qps, 1);
592         if (ret) {
593                 dev_err(dev, "Failed to init QP context memory, aborting.\n");
594                 goto err_unmap_dmpt;
595         }
596
597         ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
598                                       HEM_TYPE_IRRL,
599                                       hr_dev->caps.irrl_entry_sz *
600                                       hr_dev->caps.max_qp_init_rdma,
601                                       hr_dev->caps.num_qps, 1);
602         if (ret) {
603                 dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
604                 goto err_unmap_qp;
605         }
606
607         if (hr_dev->caps.trrl_entry_sz) {
608                 ret = hns_roce_init_hem_table(hr_dev,
609                                               &hr_dev->qp_table.trrl_table,
610                                               HEM_TYPE_TRRL,
611                                               hr_dev->caps.trrl_entry_sz *
612                                               hr_dev->caps.max_qp_dest_rdma,
613                                               hr_dev->caps.num_qps, 1);
614                 if (ret) {
615                         dev_err(dev,
616                                 "Failed to init trrl_table memory, aborting.\n");
617                         goto err_unmap_irrl;
618                 }
619         }
620
621         ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
622                                       HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
623                                       hr_dev->caps.num_cqs, 1);
624         if (ret) {
625                 dev_err(dev, "Failed to init CQ context memory, aborting.\n");
626                 goto err_unmap_trrl;
627         }
628
629         if (hr_dev->caps.srqc_entry_sz) {
630                 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
631                                               HEM_TYPE_SRQC,
632                                               hr_dev->caps.srqc_entry_sz,
633                                               hr_dev->caps.num_srqs, 1);
634                 if (ret) {
635                         dev_err(dev,
636                                 "Failed to init SRQ context memory, aborting.\n");
637                         goto err_unmap_cq;
638                 }
639         }
640
641         if (hr_dev->caps.sccc_entry_sz) {
642                 ret = hns_roce_init_hem_table(hr_dev,
643                                               &hr_dev->qp_table.sccc_table,
644                                               HEM_TYPE_SCCC,
645                                               hr_dev->caps.sccc_entry_sz,
646                                               hr_dev->caps.num_qps, 1);
647                 if (ret) {
648                         dev_err(dev,
649                                 "Failed to init SCC context memory, aborting.\n");
650                         goto err_unmap_srq;
651                 }
652         }
653
654         if (hr_dev->caps.qpc_timer_entry_sz) {
655                 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
656                                               HEM_TYPE_QPC_TIMER,
657                                               hr_dev->caps.qpc_timer_entry_sz,
658                                               hr_dev->caps.num_qpc_timer, 1);
659                 if (ret) {
660                         dev_err(dev,
661                                 "Failed to init QPC timer memory, aborting.\n");
662                         goto err_unmap_ctx;
663                 }
664         }
665
666         if (hr_dev->caps.cqc_timer_entry_sz) {
667                 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
668                                               HEM_TYPE_CQC_TIMER,
669                                               hr_dev->caps.cqc_timer_entry_sz,
670                                               hr_dev->caps.num_cqc_timer, 1);
671                 if (ret) {
672                         dev_err(dev,
673                                 "Failed to init CQC timer memory, aborting.\n");
674                         goto err_unmap_qpc_timer;
675                 }
676         }
677
678         return 0;
679
680 err_unmap_qpc_timer:
681         if (hr_dev->caps.qpc_timer_entry_sz)
682                 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
683
684 err_unmap_ctx:
685         if (hr_dev->caps.sccc_entry_sz)
686                 hns_roce_cleanup_hem_table(hr_dev,
687                                            &hr_dev->qp_table.sccc_table);
688 err_unmap_srq:
689         if (hr_dev->caps.srqc_entry_sz)
690                 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
691
692 err_unmap_cq:
693         hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
694
695 err_unmap_trrl:
696         if (hr_dev->caps.trrl_entry_sz)
697                 hns_roce_cleanup_hem_table(hr_dev,
698                                            &hr_dev->qp_table.trrl_table);
699
700 err_unmap_irrl:
701         hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
702
703 err_unmap_qp:
704         hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
705
706 err_unmap_dmpt:
707         hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
708
709         return ret;
710 }
711
712 /**
713  * hns_roce_setup_hca - setup host channel adapter
714  * @hr_dev: pointer to hns roce device
715  * Return : int
716  */
717 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
718 {
719         int ret;
720         struct device *dev = hr_dev->dev;
721
722         spin_lock_init(&hr_dev->sm_lock);
723         spin_lock_init(&hr_dev->bt_cmd_lock);
724
725         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
726                 INIT_LIST_HEAD(&hr_dev->pgdir_list);
727                 mutex_init(&hr_dev->pgdir_mutex);
728         }
729
730         ret = hns_roce_init_uar_table(hr_dev);
731         if (ret) {
732                 dev_err(dev, "Failed to initialize uar table. aborting\n");
733                 return ret;
734         }
735
736         ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
737         if (ret) {
738                 dev_err(dev, "Failed to allocate priv_uar.\n");
739                 goto err_uar_table_free;
740         }
741
742         ret = hns_roce_init_pd_table(hr_dev);
743         if (ret) {
744                 dev_err(dev, "Failed to init protected domain table.\n");
745                 goto err_uar_alloc_free;
746         }
747
748         ret = hns_roce_init_mr_table(hr_dev);
749         if (ret) {
750                 dev_err(dev, "Failed to init memory region table.\n");
751                 goto err_pd_table_free;
752         }
753
754         ret = hns_roce_init_cq_table(hr_dev);
755         if (ret) {
756                 dev_err(dev, "Failed to init completion queue table.\n");
757                 goto err_mr_table_free;
758         }
759
760         ret = hns_roce_init_qp_table(hr_dev);
761         if (ret) {
762                 dev_err(dev, "Failed to init queue pair table.\n");
763                 goto err_cq_table_free;
764         }
765
766         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
767                 ret = hns_roce_init_srq_table(hr_dev);
768                 if (ret) {
769                         dev_err(dev,
770                                 "Failed to init share receive queue table.\n");
771                         goto err_qp_table_free;
772                 }
773         }
774
775         return 0;
776
777 err_qp_table_free:
778         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
779                 hns_roce_cleanup_qp_table(hr_dev);
780
781 err_cq_table_free:
782         hns_roce_cleanup_cq_table(hr_dev);
783
784 err_mr_table_free:
785         hns_roce_cleanup_mr_table(hr_dev);
786
787 err_pd_table_free:
788         hns_roce_cleanup_pd_table(hr_dev);
789
790 err_uar_alloc_free:
791         hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
792
793 err_uar_table_free:
794         hns_roce_cleanup_uar_table(hr_dev);
795         return ret;
796 }
797
798 static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
799 {
800         struct hns_roce_cq *hr_cq = to_hr_cq(cq);
801         unsigned long flags;
802
803         spin_lock_irqsave(&hr_cq->lock, flags);
804         if (cq->comp_handler) {
805                 if (!hr_cq->is_armed) {
806                         hr_cq->is_armed = 1;
807                         list_add_tail(&hr_cq->node, cq_list);
808                 }
809         }
810         spin_unlock_irqrestore(&hr_cq->lock, flags);
811 }
812
813 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
814 {
815         struct hns_roce_qp *hr_qp;
816         struct hns_roce_cq *hr_cq;
817         struct list_head cq_list;
818         unsigned long flags_qp;
819         unsigned long flags;
820
821         INIT_LIST_HEAD(&cq_list);
822
823         spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
824         list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
825                 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
826                 if (hr_qp->sq.tail != hr_qp->sq.head)
827                         check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
828                 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
829
830                 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
831                 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
832                         check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
833                 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
834         }
835
836         list_for_each_entry(hr_cq, &cq_list, node)
837                 hns_roce_cq_completion(hr_dev, hr_cq->cqn);
838
839         spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
840 }
841
842 int hns_roce_init(struct hns_roce_dev *hr_dev)
843 {
844         int ret;
845         struct device *dev = hr_dev->dev;
846
847         if (hr_dev->hw->reset) {
848                 ret = hr_dev->hw->reset(hr_dev, true);
849                 if (ret) {
850                         dev_err(dev, "Reset RoCE engine failed!\n");
851                         return ret;
852                 }
853         }
854         hr_dev->is_reset = false;
855
856         if (hr_dev->hw->cmq_init) {
857                 ret = hr_dev->hw->cmq_init(hr_dev);
858                 if (ret) {
859                         dev_err(dev, "Init RoCE Command Queue failed!\n");
860                         goto error_failed_cmq_init;
861                 }
862         }
863
864         ret = hr_dev->hw->hw_profile(hr_dev);
865         if (ret) {
866                 dev_err(dev, "Get RoCE engine profile failed!\n");
867                 goto error_failed_cmd_init;
868         }
869
870         ret = hns_roce_cmd_init(hr_dev);
871         if (ret) {
872                 dev_err(dev, "cmd init failed!\n");
873                 goto error_failed_cmd_init;
874         }
875
876         /* EQ depends on poll mode, event mode depends on EQ */
877         ret = hr_dev->hw->init_eq(hr_dev);
878         if (ret) {
879                 dev_err(dev, "eq init failed!\n");
880                 goto error_failed_eq_table;
881         }
882
883         if (hr_dev->cmd_mod) {
884                 ret = hns_roce_cmd_use_events(hr_dev);
885                 if (ret) {
886                         dev_warn(dev,
887                                  "Cmd event  mode failed, set back to poll!\n");
888                         hns_roce_cmd_use_polling(hr_dev);
889                 }
890         }
891
892         ret = hns_roce_init_hem(hr_dev);
893         if (ret) {
894                 dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
895                 goto error_failed_init_hem;
896         }
897
898         ret = hns_roce_setup_hca(hr_dev);
899         if (ret) {
900                 dev_err(dev, "setup hca failed!\n");
901                 goto error_failed_setup_hca;
902         }
903
904         if (hr_dev->hw->hw_init) {
905                 ret = hr_dev->hw->hw_init(hr_dev);
906                 if (ret) {
907                         dev_err(dev, "hw_init failed!\n");
908                         goto error_failed_engine_init;
909                 }
910         }
911
912         INIT_LIST_HEAD(&hr_dev->qp_list);
913         spin_lock_init(&hr_dev->qp_list_lock);
914
915         ret = hns_roce_register_device(hr_dev);
916         if (ret)
917                 goto error_failed_register_device;
918
919         return 0;
920
921 error_failed_register_device:
922         if (hr_dev->hw->hw_exit)
923                 hr_dev->hw->hw_exit(hr_dev);
924
925 error_failed_engine_init:
926         hns_roce_cleanup_bitmap(hr_dev);
927
928 error_failed_setup_hca:
929         hns_roce_cleanup_hem(hr_dev);
930
931 error_failed_init_hem:
932         if (hr_dev->cmd_mod)
933                 hns_roce_cmd_use_polling(hr_dev);
934         hr_dev->hw->cleanup_eq(hr_dev);
935
936 error_failed_eq_table:
937         hns_roce_cmd_cleanup(hr_dev);
938
939 error_failed_cmd_init:
940         if (hr_dev->hw->cmq_exit)
941                 hr_dev->hw->cmq_exit(hr_dev);
942
943 error_failed_cmq_init:
944         if (hr_dev->hw->reset) {
945                 if (hr_dev->hw->reset(hr_dev, false))
946                         dev_err(dev, "Dereset RoCE engine failed!\n");
947         }
948
949         return ret;
950 }
951
952 void hns_roce_exit(struct hns_roce_dev *hr_dev)
953 {
954         hns_roce_unregister_device(hr_dev);
955
956         if (hr_dev->hw->hw_exit)
957                 hr_dev->hw->hw_exit(hr_dev);
958         hns_roce_cleanup_bitmap(hr_dev);
959         hns_roce_cleanup_hem(hr_dev);
960
961         if (hr_dev->cmd_mod)
962                 hns_roce_cmd_use_polling(hr_dev);
963
964         hr_dev->hw->cleanup_eq(hr_dev);
965         hns_roce_cmd_cleanup(hr_dev);
966         if (hr_dev->hw->cmq_exit)
967                 hr_dev->hw->cmq_exit(hr_dev);
968         if (hr_dev->hw->reset)
969                 hr_dev->hw->reset(hr_dev, false);
970 }
971
972 MODULE_LICENSE("Dual BSD/GPL");
973 MODULE_AUTHOR("Wei Hu <[email protected]>");
974 MODULE_AUTHOR("Nenglong Zhao <[email protected]>");
975 MODULE_AUTHOR("Lijun Ou <[email protected]>");
976 MODULE_DESCRIPTION("HNS RoCE Driver");
This page took 0.088895 seconds and 4 git commands to generate.