]> Git Repo - linux.git/blob - drivers/infiniband/core/mad.c
Merge branch 'thermal-soc' into next
[linux.git] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  * Copyright (c) 2009 HNR Consulting. All rights reserved.
6  * Copyright (c) 2014 Intel Corporation.  All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  *
36  */
37
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
44
45 #include "mad_priv.h"
46 #include "mad_rmpp.h"
47 #include "smi.h"
48 #include "opa_smi.h"
49 #include "agent.h"
50 #include "core_priv.h"
51
52 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
53 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
54
55 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
56 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
57 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
58 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
59
60 static struct list_head ib_mad_port_list;
61 static u32 ib_mad_client_id = 0;
62
63 /* Port list lock */
64 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
65
66 /* Forward declarations */
67 static int method_in_use(struct ib_mad_mgmt_method_table **method,
68                          struct ib_mad_reg_req *mad_reg_req);
69 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70 static struct ib_mad_agent_private *find_mad_agent(
71                                         struct ib_mad_port_private *port_priv,
72                                         const struct ib_mad_hdr *mad);
73 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74                                     struct ib_mad_private *mad);
75 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
76 static void timeout_sends(struct work_struct *work);
77 static void local_completions(struct work_struct *work);
78 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79                               struct ib_mad_agent_private *agent_priv,
80                               u8 mgmt_class);
81 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82                            struct ib_mad_agent_private *agent_priv);
83 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
84                               struct ib_wc *wc);
85 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
86
87 /*
88  * Returns a ib_mad_port_private structure or NULL for a device/port
89  * Assumes ib_mad_port_list_lock is being held
90  */
91 static inline struct ib_mad_port_private *
92 __ib_get_mad_port(struct ib_device *device, int port_num)
93 {
94         struct ib_mad_port_private *entry;
95
96         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
97                 if (entry->device == device && entry->port_num == port_num)
98                         return entry;
99         }
100         return NULL;
101 }
102
103 /*
104  * Wrapper function to return a ib_mad_port_private structure or NULL
105  * for a device/port
106  */
107 static inline struct ib_mad_port_private *
108 ib_get_mad_port(struct ib_device *device, int port_num)
109 {
110         struct ib_mad_port_private *entry;
111         unsigned long flags;
112
113         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
114         entry = __ib_get_mad_port(device, port_num);
115         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
116
117         return entry;
118 }
119
120 static inline u8 convert_mgmt_class(u8 mgmt_class)
121 {
122         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
123         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
124                 0 : mgmt_class;
125 }
126
127 static int get_spl_qp_index(enum ib_qp_type qp_type)
128 {
129         switch (qp_type)
130         {
131         case IB_QPT_SMI:
132                 return 0;
133         case IB_QPT_GSI:
134                 return 1;
135         default:
136                 return -1;
137         }
138 }
139
140 static int vendor_class_index(u8 mgmt_class)
141 {
142         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
143 }
144
145 static int is_vendor_class(u8 mgmt_class)
146 {
147         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
148             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
149                 return 0;
150         return 1;
151 }
152
153 static int is_vendor_oui(char *oui)
154 {
155         if (oui[0] || oui[1] || oui[2])
156                 return 1;
157         return 0;
158 }
159
160 static int is_vendor_method_in_use(
161                 struct ib_mad_mgmt_vendor_class *vendor_class,
162                 struct ib_mad_reg_req *mad_reg_req)
163 {
164         struct ib_mad_mgmt_method_table *method;
165         int i;
166
167         for (i = 0; i < MAX_MGMT_OUI; i++) {
168                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
169                         method = vendor_class->method_table[i];
170                         if (method) {
171                                 if (method_in_use(&method, mad_reg_req))
172                                         return 1;
173                                 else
174                                         break;
175                         }
176                 }
177         }
178         return 0;
179 }
180
181 int ib_response_mad(const struct ib_mad_hdr *hdr)
182 {
183         return ((hdr->method & IB_MGMT_METHOD_RESP) ||
184                 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
185                 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
186                  (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
187 }
188 EXPORT_SYMBOL(ib_response_mad);
189
190 /*
191  * ib_register_mad_agent - Register to send/receive MADs
192  */
193 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
194                                            u8 port_num,
195                                            enum ib_qp_type qp_type,
196                                            struct ib_mad_reg_req *mad_reg_req,
197                                            u8 rmpp_version,
198                                            ib_mad_send_handler send_handler,
199                                            ib_mad_recv_handler recv_handler,
200                                            void *context,
201                                            u32 registration_flags)
202 {
203         struct ib_mad_port_private *port_priv;
204         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
205         struct ib_mad_agent_private *mad_agent_priv;
206         struct ib_mad_reg_req *reg_req = NULL;
207         struct ib_mad_mgmt_class_table *class;
208         struct ib_mad_mgmt_vendor_class_table *vendor;
209         struct ib_mad_mgmt_vendor_class *vendor_class;
210         struct ib_mad_mgmt_method_table *method;
211         int ret2, qpn;
212         unsigned long flags;
213         u8 mgmt_class, vclass;
214
215         /* Validate parameters */
216         qpn = get_spl_qp_index(qp_type);
217         if (qpn == -1) {
218                 dev_notice(&device->dev,
219                            "ib_register_mad_agent: invalid QP Type %d\n",
220                            qp_type);
221                 goto error1;
222         }
223
224         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
225                 dev_notice(&device->dev,
226                            "ib_register_mad_agent: invalid RMPP Version %u\n",
227                            rmpp_version);
228                 goto error1;
229         }
230
231         /* Validate MAD registration request if supplied */
232         if (mad_reg_req) {
233                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
234                         dev_notice(&device->dev,
235                                    "ib_register_mad_agent: invalid Class Version %u\n",
236                                    mad_reg_req->mgmt_class_version);
237                         goto error1;
238                 }
239                 if (!recv_handler) {
240                         dev_notice(&device->dev,
241                                    "ib_register_mad_agent: no recv_handler\n");
242                         goto error1;
243                 }
244                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
245                         /*
246                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
247                          * one in this range currently allowed
248                          */
249                         if (mad_reg_req->mgmt_class !=
250                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
251                                 dev_notice(&device->dev,
252                                            "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
253                                            mad_reg_req->mgmt_class);
254                                 goto error1;
255                         }
256                 } else if (mad_reg_req->mgmt_class == 0) {
257                         /*
258                          * Class 0 is reserved in IBA and is used for
259                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
260                          */
261                         dev_notice(&device->dev,
262                                    "ib_register_mad_agent: Invalid Mgmt Class 0\n");
263                         goto error1;
264                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
265                         /*
266                          * If class is in "new" vendor range,
267                          * ensure supplied OUI is not zero
268                          */
269                         if (!is_vendor_oui(mad_reg_req->oui)) {
270                                 dev_notice(&device->dev,
271                                            "ib_register_mad_agent: No OUI specified for class 0x%x\n",
272                                            mad_reg_req->mgmt_class);
273                                 goto error1;
274                         }
275                 }
276                 /* Make sure class supplied is consistent with RMPP */
277                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
278                         if (rmpp_version) {
279                                 dev_notice(&device->dev,
280                                            "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
281                                            mad_reg_req->mgmt_class);
282                                 goto error1;
283                         }
284                 }
285
286                 /* Make sure class supplied is consistent with QP type */
287                 if (qp_type == IB_QPT_SMI) {
288                         if ((mad_reg_req->mgmt_class !=
289                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290                             (mad_reg_req->mgmt_class !=
291                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292                                 dev_notice(&device->dev,
293                                            "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294                                            mad_reg_req->mgmt_class);
295                                 goto error1;
296                         }
297                 } else {
298                         if ((mad_reg_req->mgmt_class ==
299                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300                             (mad_reg_req->mgmt_class ==
301                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302                                 dev_notice(&device->dev,
303                                            "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304                                            mad_reg_req->mgmt_class);
305                                 goto error1;
306                         }
307                 }
308         } else {
309                 /* No registration request supplied */
310                 if (!send_handler)
311                         goto error1;
312                 if (registration_flags & IB_MAD_USER_RMPP)
313                         goto error1;
314         }
315
316         /* Validate device and port */
317         port_priv = ib_get_mad_port(device, port_num);
318         if (!port_priv) {
319                 dev_notice(&device->dev,
320                            "ib_register_mad_agent: Invalid port %d\n",
321                            port_num);
322                 ret = ERR_PTR(-ENODEV);
323                 goto error1;
324         }
325
326         /* Verify the QP requested is supported.  For example, Ethernet devices
327          * will not have QP0 */
328         if (!port_priv->qp_info[qpn].qp) {
329                 dev_notice(&device->dev,
330                            "ib_register_mad_agent: QP %d not supported\n", qpn);
331                 ret = ERR_PTR(-EPROTONOSUPPORT);
332                 goto error1;
333         }
334
335         /* Allocate structures */
336         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
337         if (!mad_agent_priv) {
338                 ret = ERR_PTR(-ENOMEM);
339                 goto error1;
340         }
341
342         if (mad_reg_req) {
343                 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
344                 if (!reg_req) {
345                         ret = ERR_PTR(-ENOMEM);
346                         goto error3;
347                 }
348         }
349
350         /* Now, fill in the various structures */
351         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
352         mad_agent_priv->reg_req = reg_req;
353         mad_agent_priv->agent.rmpp_version = rmpp_version;
354         mad_agent_priv->agent.device = device;
355         mad_agent_priv->agent.recv_handler = recv_handler;
356         mad_agent_priv->agent.send_handler = send_handler;
357         mad_agent_priv->agent.context = context;
358         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
359         mad_agent_priv->agent.port_num = port_num;
360         mad_agent_priv->agent.flags = registration_flags;
361         spin_lock_init(&mad_agent_priv->lock);
362         INIT_LIST_HEAD(&mad_agent_priv->send_list);
363         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
364         INIT_LIST_HEAD(&mad_agent_priv->done_list);
365         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
366         INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
367         INIT_LIST_HEAD(&mad_agent_priv->local_list);
368         INIT_WORK(&mad_agent_priv->local_work, local_completions);
369         atomic_set(&mad_agent_priv->refcount, 1);
370         init_completion(&mad_agent_priv->comp);
371
372         spin_lock_irqsave(&port_priv->reg_lock, flags);
373         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
374
375         /*
376          * Make sure MAD registration (if supplied)
377          * is non overlapping with any existing ones
378          */
379         if (mad_reg_req) {
380                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
381                 if (!is_vendor_class(mgmt_class)) {
382                         class = port_priv->version[mad_reg_req->
383                                                    mgmt_class_version].class;
384                         if (class) {
385                                 method = class->method_table[mgmt_class];
386                                 if (method) {
387                                         if (method_in_use(&method,
388                                                            mad_reg_req))
389                                                 goto error4;
390                                 }
391                         }
392                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
393                                                   mgmt_class);
394                 } else {
395                         /* "New" vendor class range */
396                         vendor = port_priv->version[mad_reg_req->
397                                                     mgmt_class_version].vendor;
398                         if (vendor) {
399                                 vclass = vendor_class_index(mgmt_class);
400                                 vendor_class = vendor->vendor_class[vclass];
401                                 if (vendor_class) {
402                                         if (is_vendor_method_in_use(
403                                                         vendor_class,
404                                                         mad_reg_req))
405                                                 goto error4;
406                                 }
407                         }
408                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
409                 }
410                 if (ret2) {
411                         ret = ERR_PTR(ret2);
412                         goto error4;
413                 }
414         }
415
416         /* Add mad agent into port's agent list */
417         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
418         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
419
420         return &mad_agent_priv->agent;
421
422 error4:
423         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
424         kfree(reg_req);
425 error3:
426         kfree(mad_agent_priv);
427 error1:
428         return ret;
429 }
430 EXPORT_SYMBOL(ib_register_mad_agent);
431
432 static inline int is_snooping_sends(int mad_snoop_flags)
433 {
434         return (mad_snoop_flags &
435                 (/*IB_MAD_SNOOP_POSTED_SENDS |
436                  IB_MAD_SNOOP_RMPP_SENDS |*/
437                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
438                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
439 }
440
441 static inline int is_snooping_recvs(int mad_snoop_flags)
442 {
443         return (mad_snoop_flags &
444                 (IB_MAD_SNOOP_RECVS /*|
445                  IB_MAD_SNOOP_RMPP_RECVS*/));
446 }
447
448 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
449                                 struct ib_mad_snoop_private *mad_snoop_priv)
450 {
451         struct ib_mad_snoop_private **new_snoop_table;
452         unsigned long flags;
453         int i;
454
455         spin_lock_irqsave(&qp_info->snoop_lock, flags);
456         /* Check for empty slot in array. */
457         for (i = 0; i < qp_info->snoop_table_size; i++)
458                 if (!qp_info->snoop_table[i])
459                         break;
460
461         if (i == qp_info->snoop_table_size) {
462                 /* Grow table. */
463                 new_snoop_table = krealloc(qp_info->snoop_table,
464                                            sizeof mad_snoop_priv *
465                                            (qp_info->snoop_table_size + 1),
466                                            GFP_ATOMIC);
467                 if (!new_snoop_table) {
468                         i = -ENOMEM;
469                         goto out;
470                 }
471
472                 qp_info->snoop_table = new_snoop_table;
473                 qp_info->snoop_table_size++;
474         }
475         qp_info->snoop_table[i] = mad_snoop_priv;
476         atomic_inc(&qp_info->snoop_count);
477 out:
478         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
479         return i;
480 }
481
482 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
483                                            u8 port_num,
484                                            enum ib_qp_type qp_type,
485                                            int mad_snoop_flags,
486                                            ib_mad_snoop_handler snoop_handler,
487                                            ib_mad_recv_handler recv_handler,
488                                            void *context)
489 {
490         struct ib_mad_port_private *port_priv;
491         struct ib_mad_agent *ret;
492         struct ib_mad_snoop_private *mad_snoop_priv;
493         int qpn;
494
495         /* Validate parameters */
496         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
497             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
498                 ret = ERR_PTR(-EINVAL);
499                 goto error1;
500         }
501         qpn = get_spl_qp_index(qp_type);
502         if (qpn == -1) {
503                 ret = ERR_PTR(-EINVAL);
504                 goto error1;
505         }
506         port_priv = ib_get_mad_port(device, port_num);
507         if (!port_priv) {
508                 ret = ERR_PTR(-ENODEV);
509                 goto error1;
510         }
511         /* Allocate structures */
512         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
513         if (!mad_snoop_priv) {
514                 ret = ERR_PTR(-ENOMEM);
515                 goto error1;
516         }
517
518         /* Now, fill in the various structures */
519         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
520         mad_snoop_priv->agent.device = device;
521         mad_snoop_priv->agent.recv_handler = recv_handler;
522         mad_snoop_priv->agent.snoop_handler = snoop_handler;
523         mad_snoop_priv->agent.context = context;
524         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
525         mad_snoop_priv->agent.port_num = port_num;
526         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
527         init_completion(&mad_snoop_priv->comp);
528         mad_snoop_priv->snoop_index = register_snoop_agent(
529                                                 &port_priv->qp_info[qpn],
530                                                 mad_snoop_priv);
531         if (mad_snoop_priv->snoop_index < 0) {
532                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
533                 goto error2;
534         }
535
536         atomic_set(&mad_snoop_priv->refcount, 1);
537         return &mad_snoop_priv->agent;
538
539 error2:
540         kfree(mad_snoop_priv);
541 error1:
542         return ret;
543 }
544 EXPORT_SYMBOL(ib_register_mad_snoop);
545
546 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
547 {
548         if (atomic_dec_and_test(&mad_agent_priv->refcount))
549                 complete(&mad_agent_priv->comp);
550 }
551
552 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
553 {
554         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
555                 complete(&mad_snoop_priv->comp);
556 }
557
558 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
559 {
560         struct ib_mad_port_private *port_priv;
561         unsigned long flags;
562
563         /* Note that we could still be handling received MADs */
564
565         /*
566          * Canceling all sends results in dropping received response
567          * MADs, preventing us from queuing additional work
568          */
569         cancel_mads(mad_agent_priv);
570         port_priv = mad_agent_priv->qp_info->port_priv;
571         cancel_delayed_work(&mad_agent_priv->timed_work);
572
573         spin_lock_irqsave(&port_priv->reg_lock, flags);
574         remove_mad_reg_req(mad_agent_priv);
575         list_del(&mad_agent_priv->agent_list);
576         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
577
578         flush_workqueue(port_priv->wq);
579         ib_cancel_rmpp_recvs(mad_agent_priv);
580
581         deref_mad_agent(mad_agent_priv);
582         wait_for_completion(&mad_agent_priv->comp);
583
584         kfree(mad_agent_priv->reg_req);
585         kfree(mad_agent_priv);
586 }
587
588 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
589 {
590         struct ib_mad_qp_info *qp_info;
591         unsigned long flags;
592
593         qp_info = mad_snoop_priv->qp_info;
594         spin_lock_irqsave(&qp_info->snoop_lock, flags);
595         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
596         atomic_dec(&qp_info->snoop_count);
597         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
598
599         deref_snoop_agent(mad_snoop_priv);
600         wait_for_completion(&mad_snoop_priv->comp);
601
602         kfree(mad_snoop_priv);
603 }
604
605 /*
606  * ib_unregister_mad_agent - Unregisters a client from using MAD services
607  */
608 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
609 {
610         struct ib_mad_agent_private *mad_agent_priv;
611         struct ib_mad_snoop_private *mad_snoop_priv;
612
613         /* If the TID is zero, the agent can only snoop. */
614         if (mad_agent->hi_tid) {
615                 mad_agent_priv = container_of(mad_agent,
616                                               struct ib_mad_agent_private,
617                                               agent);
618                 unregister_mad_agent(mad_agent_priv);
619         } else {
620                 mad_snoop_priv = container_of(mad_agent,
621                                               struct ib_mad_snoop_private,
622                                               agent);
623                 unregister_mad_snoop(mad_snoop_priv);
624         }
625         return 0;
626 }
627 EXPORT_SYMBOL(ib_unregister_mad_agent);
628
629 static void dequeue_mad(struct ib_mad_list_head *mad_list)
630 {
631         struct ib_mad_queue *mad_queue;
632         unsigned long flags;
633
634         BUG_ON(!mad_list->mad_queue);
635         mad_queue = mad_list->mad_queue;
636         spin_lock_irqsave(&mad_queue->lock, flags);
637         list_del(&mad_list->list);
638         mad_queue->count--;
639         spin_unlock_irqrestore(&mad_queue->lock, flags);
640 }
641
642 static void snoop_send(struct ib_mad_qp_info *qp_info,
643                        struct ib_mad_send_buf *send_buf,
644                        struct ib_mad_send_wc *mad_send_wc,
645                        int mad_snoop_flags)
646 {
647         struct ib_mad_snoop_private *mad_snoop_priv;
648         unsigned long flags;
649         int i;
650
651         spin_lock_irqsave(&qp_info->snoop_lock, flags);
652         for (i = 0; i < qp_info->snoop_table_size; i++) {
653                 mad_snoop_priv = qp_info->snoop_table[i];
654                 if (!mad_snoop_priv ||
655                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
656                         continue;
657
658                 atomic_inc(&mad_snoop_priv->refcount);
659                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
660                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
661                                                     send_buf, mad_send_wc);
662                 deref_snoop_agent(mad_snoop_priv);
663                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
664         }
665         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
666 }
667
668 static void snoop_recv(struct ib_mad_qp_info *qp_info,
669                        struct ib_mad_recv_wc *mad_recv_wc,
670                        int mad_snoop_flags)
671 {
672         struct ib_mad_snoop_private *mad_snoop_priv;
673         unsigned long flags;
674         int i;
675
676         spin_lock_irqsave(&qp_info->snoop_lock, flags);
677         for (i = 0; i < qp_info->snoop_table_size; i++) {
678                 mad_snoop_priv = qp_info->snoop_table[i];
679                 if (!mad_snoop_priv ||
680                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
681                         continue;
682
683                 atomic_inc(&mad_snoop_priv->refcount);
684                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
685                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
686                                                    mad_recv_wc);
687                 deref_snoop_agent(mad_snoop_priv);
688                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
689         }
690         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
691 }
692
693 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
694                 u16 pkey_index, u8 port_num, struct ib_wc *wc)
695 {
696         memset(wc, 0, sizeof *wc);
697         wc->wr_cqe = cqe;
698         wc->status = IB_WC_SUCCESS;
699         wc->opcode = IB_WC_RECV;
700         wc->pkey_index = pkey_index;
701         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
702         wc->src_qp = IB_QP0;
703         wc->qp = qp;
704         wc->slid = slid;
705         wc->sl = 0;
706         wc->dlid_path_bits = 0;
707         wc->port_num = port_num;
708 }
709
710 static size_t mad_priv_size(const struct ib_mad_private *mp)
711 {
712         return sizeof(struct ib_mad_private) + mp->mad_size;
713 }
714
715 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
716 {
717         size_t size = sizeof(struct ib_mad_private) + mad_size;
718         struct ib_mad_private *ret = kzalloc(size, flags);
719
720         if (ret)
721                 ret->mad_size = mad_size;
722
723         return ret;
724 }
725
726 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
727 {
728         return rdma_max_mad_size(port_priv->device, port_priv->port_num);
729 }
730
731 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
732 {
733         return sizeof(struct ib_grh) + mp->mad_size;
734 }
735
736 /*
737  * Return 0 if SMP is to be sent
738  * Return 1 if SMP was consumed locally (whether or not solicited)
739  * Return < 0 if error
740  */
741 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
742                                   struct ib_mad_send_wr_private *mad_send_wr)
743 {
744         int ret = 0;
745         struct ib_smp *smp = mad_send_wr->send_buf.mad;
746         struct opa_smp *opa_smp = (struct opa_smp *)smp;
747         unsigned long flags;
748         struct ib_mad_local_private *local;
749         struct ib_mad_private *mad_priv;
750         struct ib_mad_port_private *port_priv;
751         struct ib_mad_agent_private *recv_mad_agent = NULL;
752         struct ib_device *device = mad_agent_priv->agent.device;
753         u8 port_num;
754         struct ib_wc mad_wc;
755         struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
756         size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
757         u16 out_mad_pkey_index = 0;
758         u16 drslid;
759         bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
760                                     mad_agent_priv->qp_info->port_priv->port_num);
761
762         if (rdma_cap_ib_switch(device) &&
763             smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
764                 port_num = send_wr->port_num;
765         else
766                 port_num = mad_agent_priv->agent.port_num;
767
768         /*
769          * Directed route handling starts if the initial LID routed part of
770          * a request or the ending LID routed part of a response is empty.
771          * If we are at the start of the LID routed part, don't update the
772          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
773          */
774         if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
775                 u32 opa_drslid;
776
777                 if ((opa_get_smp_direction(opa_smp)
778                      ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
779                      OPA_LID_PERMISSIVE &&
780                      opa_smi_handle_dr_smp_send(opa_smp,
781                                                 rdma_cap_ib_switch(device),
782                                                 port_num) == IB_SMI_DISCARD) {
783                         ret = -EINVAL;
784                         dev_err(&device->dev, "OPA Invalid directed route\n");
785                         goto out;
786                 }
787                 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
788                 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
789                     opa_drslid & 0xffff0000) {
790                         ret = -EINVAL;
791                         dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
792                                opa_drslid);
793                         goto out;
794                 }
795                 drslid = (u16)(opa_drslid & 0x0000ffff);
796
797                 /* Check to post send on QP or process locally */
798                 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
799                     opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
800                         goto out;
801         } else {
802                 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
803                      IB_LID_PERMISSIVE &&
804                      smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
805                      IB_SMI_DISCARD) {
806                         ret = -EINVAL;
807                         dev_err(&device->dev, "Invalid directed route\n");
808                         goto out;
809                 }
810                 drslid = be16_to_cpu(smp->dr_slid);
811
812                 /* Check to post send on QP or process locally */
813                 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
814                     smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
815                         goto out;
816         }
817
818         local = kmalloc(sizeof *local, GFP_ATOMIC);
819         if (!local) {
820                 ret = -ENOMEM;
821                 goto out;
822         }
823         local->mad_priv = NULL;
824         local->recv_mad_agent = NULL;
825         mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
826         if (!mad_priv) {
827                 ret = -ENOMEM;
828                 kfree(local);
829                 goto out;
830         }
831
832         build_smp_wc(mad_agent_priv->agent.qp,
833                      send_wr->wr.wr_cqe, drslid,
834                      send_wr->pkey_index,
835                      send_wr->port_num, &mad_wc);
836
837         if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
838                 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
839                                         + mad_send_wr->send_buf.data_len
840                                         + sizeof(struct ib_grh);
841         }
842
843         /* No GRH for DR SMP */
844         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
845                                   (const struct ib_mad_hdr *)smp, mad_size,
846                                   (struct ib_mad_hdr *)mad_priv->mad,
847                                   &mad_size, &out_mad_pkey_index);
848         switch (ret)
849         {
850         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
851                 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
852                     mad_agent_priv->agent.recv_handler) {
853                         local->mad_priv = mad_priv;
854                         local->recv_mad_agent = mad_agent_priv;
855                         /*
856                          * Reference MAD agent until receive
857                          * side of local completion handled
858                          */
859                         atomic_inc(&mad_agent_priv->refcount);
860                 } else
861                         kfree(mad_priv);
862                 break;
863         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
864                 kfree(mad_priv);
865                 break;
866         case IB_MAD_RESULT_SUCCESS:
867                 /* Treat like an incoming receive MAD */
868                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
869                                             mad_agent_priv->agent.port_num);
870                 if (port_priv) {
871                         memcpy(mad_priv->mad, smp, mad_priv->mad_size);
872                         recv_mad_agent = find_mad_agent(port_priv,
873                                                         (const struct ib_mad_hdr *)mad_priv->mad);
874                 }
875                 if (!port_priv || !recv_mad_agent) {
876                         /*
877                          * No receiving agent so drop packet and
878                          * generate send completion.
879                          */
880                         kfree(mad_priv);
881                         break;
882                 }
883                 local->mad_priv = mad_priv;
884                 local->recv_mad_agent = recv_mad_agent;
885                 break;
886         default:
887                 kfree(mad_priv);
888                 kfree(local);
889                 ret = -EINVAL;
890                 goto out;
891         }
892
893         local->mad_send_wr = mad_send_wr;
894         if (opa) {
895                 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
896                 local->return_wc_byte_len = mad_size;
897         }
898         /* Reference MAD agent until send side of local completion handled */
899         atomic_inc(&mad_agent_priv->refcount);
900         /* Queue local completion to local list */
901         spin_lock_irqsave(&mad_agent_priv->lock, flags);
902         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
903         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
904         queue_work(mad_agent_priv->qp_info->port_priv->wq,
905                    &mad_agent_priv->local_work);
906         ret = 1;
907 out:
908         return ret;
909 }
910
911 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
912 {
913         int seg_size, pad;
914
915         seg_size = mad_size - hdr_len;
916         if (data_len && seg_size) {
917                 pad = seg_size - data_len % seg_size;
918                 return pad == seg_size ? 0 : pad;
919         } else
920                 return seg_size;
921 }
922
923 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
924 {
925         struct ib_rmpp_segment *s, *t;
926
927         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
928                 list_del(&s->list);
929                 kfree(s);
930         }
931 }
932
933 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
934                                 size_t mad_size, gfp_t gfp_mask)
935 {
936         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
937         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
938         struct ib_rmpp_segment *seg = NULL;
939         int left, seg_size, pad;
940
941         send_buf->seg_size = mad_size - send_buf->hdr_len;
942         send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
943         seg_size = send_buf->seg_size;
944         pad = send_wr->pad;
945
946         /* Allocate data segments. */
947         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
948                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
949                 if (!seg) {
950                         free_send_rmpp_list(send_wr);
951                         return -ENOMEM;
952                 }
953                 seg->num = ++send_buf->seg_count;
954                 list_add_tail(&seg->list, &send_wr->rmpp_list);
955         }
956
957         /* Zero any padding */
958         if (pad)
959                 memset(seg->data + seg_size - pad, 0, pad);
960
961         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
962                                           agent.rmpp_version;
963         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
964         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
965
966         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
967                                         struct ib_rmpp_segment, list);
968         send_wr->last_ack_seg = send_wr->cur_seg;
969         return 0;
970 }
971
972 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
973 {
974         return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
975 }
976 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
977
978 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
979                                             u32 remote_qpn, u16 pkey_index,
980                                             int rmpp_active,
981                                             int hdr_len, int data_len,
982                                             gfp_t gfp_mask,
983                                             u8 base_version)
984 {
985         struct ib_mad_agent_private *mad_agent_priv;
986         struct ib_mad_send_wr_private *mad_send_wr;
987         int pad, message_size, ret, size;
988         void *buf;
989         size_t mad_size;
990         bool opa;
991
992         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
993                                       agent);
994
995         opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
996
997         if (opa && base_version == OPA_MGMT_BASE_VERSION)
998                 mad_size = sizeof(struct opa_mad);
999         else
1000                 mad_size = sizeof(struct ib_mad);
1001
1002         pad = get_pad_size(hdr_len, data_len, mad_size);
1003         message_size = hdr_len + data_len + pad;
1004
1005         if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1006                 if (!rmpp_active && message_size > mad_size)
1007                         return ERR_PTR(-EINVAL);
1008         } else
1009                 if (rmpp_active || message_size > mad_size)
1010                         return ERR_PTR(-EINVAL);
1011
1012         size = rmpp_active ? hdr_len : mad_size;
1013         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1014         if (!buf)
1015                 return ERR_PTR(-ENOMEM);
1016
1017         mad_send_wr = buf + size;
1018         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1019         mad_send_wr->send_buf.mad = buf;
1020         mad_send_wr->send_buf.hdr_len = hdr_len;
1021         mad_send_wr->send_buf.data_len = data_len;
1022         mad_send_wr->pad = pad;
1023
1024         mad_send_wr->mad_agent_priv = mad_agent_priv;
1025         mad_send_wr->sg_list[0].length = hdr_len;
1026         mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1027
1028         /* OPA MADs don't have to be the full 2048 bytes */
1029         if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1030             data_len < mad_size - hdr_len)
1031                 mad_send_wr->sg_list[1].length = data_len;
1032         else
1033                 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1034
1035         mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1036
1037         mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1038
1039         mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1040         mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1041         mad_send_wr->send_wr.wr.num_sge = 2;
1042         mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1043         mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1044         mad_send_wr->send_wr.remote_qpn = remote_qpn;
1045         mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1046         mad_send_wr->send_wr.pkey_index = pkey_index;
1047
1048         if (rmpp_active) {
1049                 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1050                 if (ret) {
1051                         kfree(buf);
1052                         return ERR_PTR(ret);
1053                 }
1054         }
1055
1056         mad_send_wr->send_buf.mad_agent = mad_agent;
1057         atomic_inc(&mad_agent_priv->refcount);
1058         return &mad_send_wr->send_buf;
1059 }
1060 EXPORT_SYMBOL(ib_create_send_mad);
1061
1062 int ib_get_mad_data_offset(u8 mgmt_class)
1063 {
1064         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1065                 return IB_MGMT_SA_HDR;
1066         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1067                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1068                  (mgmt_class == IB_MGMT_CLASS_BIS))
1069                 return IB_MGMT_DEVICE_HDR;
1070         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1071                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1072                 return IB_MGMT_VENDOR_HDR;
1073         else
1074                 return IB_MGMT_MAD_HDR;
1075 }
1076 EXPORT_SYMBOL(ib_get_mad_data_offset);
1077
1078 int ib_is_mad_class_rmpp(u8 mgmt_class)
1079 {
1080         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1081             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1082             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1083             (mgmt_class == IB_MGMT_CLASS_BIS) ||
1084             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1085              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1086                 return 1;
1087         return 0;
1088 }
1089 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1090
1091 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1092 {
1093         struct ib_mad_send_wr_private *mad_send_wr;
1094         struct list_head *list;
1095
1096         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1097                                    send_buf);
1098         list = &mad_send_wr->cur_seg->list;
1099
1100         if (mad_send_wr->cur_seg->num < seg_num) {
1101                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1102                         if (mad_send_wr->cur_seg->num == seg_num)
1103                                 break;
1104         } else if (mad_send_wr->cur_seg->num > seg_num) {
1105                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1106                         if (mad_send_wr->cur_seg->num == seg_num)
1107                                 break;
1108         }
1109         return mad_send_wr->cur_seg->data;
1110 }
1111 EXPORT_SYMBOL(ib_get_rmpp_segment);
1112
1113 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1114 {
1115         if (mad_send_wr->send_buf.seg_count)
1116                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1117                                            mad_send_wr->seg_num);
1118         else
1119                 return mad_send_wr->send_buf.mad +
1120                        mad_send_wr->send_buf.hdr_len;
1121 }
1122
1123 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1124 {
1125         struct ib_mad_agent_private *mad_agent_priv;
1126         struct ib_mad_send_wr_private *mad_send_wr;
1127
1128         mad_agent_priv = container_of(send_buf->mad_agent,
1129                                       struct ib_mad_agent_private, agent);
1130         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1131                                    send_buf);
1132
1133         free_send_rmpp_list(mad_send_wr);
1134         kfree(send_buf->mad);
1135         deref_mad_agent(mad_agent_priv);
1136 }
1137 EXPORT_SYMBOL(ib_free_send_mad);
1138
1139 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1140 {
1141         struct ib_mad_qp_info *qp_info;
1142         struct list_head *list;
1143         struct ib_send_wr *bad_send_wr;
1144         struct ib_mad_agent *mad_agent;
1145         struct ib_sge *sge;
1146         unsigned long flags;
1147         int ret;
1148
1149         /* Set WR ID to find mad_send_wr upon completion */
1150         qp_info = mad_send_wr->mad_agent_priv->qp_info;
1151         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1152         mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1153         mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1154
1155         mad_agent = mad_send_wr->send_buf.mad_agent;
1156         sge = mad_send_wr->sg_list;
1157         sge[0].addr = ib_dma_map_single(mad_agent->device,
1158                                         mad_send_wr->send_buf.mad,
1159                                         sge[0].length,
1160                                         DMA_TO_DEVICE);
1161         if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1162                 return -ENOMEM;
1163
1164         mad_send_wr->header_mapping = sge[0].addr;
1165
1166         sge[1].addr = ib_dma_map_single(mad_agent->device,
1167                                         ib_get_payload(mad_send_wr),
1168                                         sge[1].length,
1169                                         DMA_TO_DEVICE);
1170         if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1171                 ib_dma_unmap_single(mad_agent->device,
1172                                     mad_send_wr->header_mapping,
1173                                     sge[0].length, DMA_TO_DEVICE);
1174                 return -ENOMEM;
1175         }
1176         mad_send_wr->payload_mapping = sge[1].addr;
1177
1178         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1179         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1180                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1181                                    &bad_send_wr);
1182                 list = &qp_info->send_queue.list;
1183         } else {
1184                 ret = 0;
1185                 list = &qp_info->overflow_list;
1186         }
1187
1188         if (!ret) {
1189                 qp_info->send_queue.count++;
1190                 list_add_tail(&mad_send_wr->mad_list.list, list);
1191         }
1192         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1193         if (ret) {
1194                 ib_dma_unmap_single(mad_agent->device,
1195                                     mad_send_wr->header_mapping,
1196                                     sge[0].length, DMA_TO_DEVICE);
1197                 ib_dma_unmap_single(mad_agent->device,
1198                                     mad_send_wr->payload_mapping,
1199                                     sge[1].length, DMA_TO_DEVICE);
1200         }
1201         return ret;
1202 }
1203
1204 /*
1205  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1206  *  with the registered client
1207  */
1208 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1209                      struct ib_mad_send_buf **bad_send_buf)
1210 {
1211         struct ib_mad_agent_private *mad_agent_priv;
1212         struct ib_mad_send_buf *next_send_buf;
1213         struct ib_mad_send_wr_private *mad_send_wr;
1214         unsigned long flags;
1215         int ret = -EINVAL;
1216
1217         /* Walk list of send WRs and post each on send list */
1218         for (; send_buf; send_buf = next_send_buf) {
1219
1220                 mad_send_wr = container_of(send_buf,
1221                                            struct ib_mad_send_wr_private,
1222                                            send_buf);
1223                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1224
1225                 if (!send_buf->mad_agent->send_handler ||
1226                     (send_buf->timeout_ms &&
1227                      !send_buf->mad_agent->recv_handler)) {
1228                         ret = -EINVAL;
1229                         goto error;
1230                 }
1231
1232                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1233                         if (mad_agent_priv->agent.rmpp_version) {
1234                                 ret = -EINVAL;
1235                                 goto error;
1236                         }
1237                 }
1238
1239                 /*
1240                  * Save pointer to next work request to post in case the
1241                  * current one completes, and the user modifies the work
1242                  * request associated with the completion
1243                  */
1244                 next_send_buf = send_buf->next;
1245                 mad_send_wr->send_wr.ah = send_buf->ah;
1246
1247                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1248                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1249                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1250                                                      mad_send_wr);
1251                         if (ret < 0)            /* error */
1252                                 goto error;
1253                         else if (ret == 1)      /* locally consumed */
1254                                 continue;
1255                 }
1256
1257                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1258                 /* Timeout will be updated after send completes */
1259                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1260                 mad_send_wr->max_retries = send_buf->retries;
1261                 mad_send_wr->retries_left = send_buf->retries;
1262                 send_buf->retries = 0;
1263                 /* Reference for work request to QP + response */
1264                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1265                 mad_send_wr->status = IB_WC_SUCCESS;
1266
1267                 /* Reference MAD agent until send completes */
1268                 atomic_inc(&mad_agent_priv->refcount);
1269                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1270                 list_add_tail(&mad_send_wr->agent_list,
1271                               &mad_agent_priv->send_list);
1272                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1273
1274                 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1275                         ret = ib_send_rmpp_mad(mad_send_wr);
1276                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1277                                 ret = ib_send_mad(mad_send_wr);
1278                 } else
1279                         ret = ib_send_mad(mad_send_wr);
1280                 if (ret < 0) {
1281                         /* Fail send request */
1282                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1283                         list_del(&mad_send_wr->agent_list);
1284                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1285                         atomic_dec(&mad_agent_priv->refcount);
1286                         goto error;
1287                 }
1288         }
1289         return 0;
1290 error:
1291         if (bad_send_buf)
1292                 *bad_send_buf = send_buf;
1293         return ret;
1294 }
1295 EXPORT_SYMBOL(ib_post_send_mad);
1296
1297 /*
1298  * ib_free_recv_mad - Returns data buffers used to receive
1299  *  a MAD to the access layer
1300  */
1301 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1302 {
1303         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1304         struct ib_mad_private_header *mad_priv_hdr;
1305         struct ib_mad_private *priv;
1306         struct list_head free_list;
1307
1308         INIT_LIST_HEAD(&free_list);
1309         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1310
1311         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1312                                         &free_list, list) {
1313                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1314                                            recv_buf);
1315                 mad_priv_hdr = container_of(mad_recv_wc,
1316                                             struct ib_mad_private_header,
1317                                             recv_wc);
1318                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1319                                     header);
1320                 kfree(priv);
1321         }
1322 }
1323 EXPORT_SYMBOL(ib_free_recv_mad);
1324
1325 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1326                                         u8 rmpp_version,
1327                                         ib_mad_send_handler send_handler,
1328                                         ib_mad_recv_handler recv_handler,
1329                                         void *context)
1330 {
1331         return ERR_PTR(-EINVAL);        /* XXX: for now */
1332 }
1333 EXPORT_SYMBOL(ib_redirect_mad_qp);
1334
1335 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1336                       struct ib_wc *wc)
1337 {
1338         dev_err(&mad_agent->device->dev,
1339                 "ib_process_mad_wc() not implemented yet\n");
1340         return 0;
1341 }
1342 EXPORT_SYMBOL(ib_process_mad_wc);
1343
1344 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1345                          struct ib_mad_reg_req *mad_reg_req)
1346 {
1347         int i;
1348
1349         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1350                 if ((*method)->agent[i]) {
1351                         pr_err("Method %d already in use\n", i);
1352                         return -EINVAL;
1353                 }
1354         }
1355         return 0;
1356 }
1357
1358 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1359 {
1360         /* Allocate management method table */
1361         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1362         return (*method) ? 0 : (-ENOMEM);
1363 }
1364
1365 /*
1366  * Check to see if there are any methods still in use
1367  */
1368 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1369 {
1370         int i;
1371
1372         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1373                 if (method->agent[i])
1374                         return 1;
1375         return 0;
1376 }
1377
1378 /*
1379  * Check to see if there are any method tables for this class still in use
1380  */
1381 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1382 {
1383         int i;
1384
1385         for (i = 0; i < MAX_MGMT_CLASS; i++)
1386                 if (class->method_table[i])
1387                         return 1;
1388         return 0;
1389 }
1390
1391 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1392 {
1393         int i;
1394
1395         for (i = 0; i < MAX_MGMT_OUI; i++)
1396                 if (vendor_class->method_table[i])
1397                         return 1;
1398         return 0;
1399 }
1400
1401 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1402                            const char *oui)
1403 {
1404         int i;
1405
1406         for (i = 0; i < MAX_MGMT_OUI; i++)
1407                 /* Is there matching OUI for this vendor class ? */
1408                 if (!memcmp(vendor_class->oui[i], oui, 3))
1409                         return i;
1410
1411         return -1;
1412 }
1413
1414 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1415 {
1416         int i;
1417
1418         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1419                 if (vendor->vendor_class[i])
1420                         return 1;
1421
1422         return 0;
1423 }
1424
1425 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1426                                      struct ib_mad_agent_private *agent)
1427 {
1428         int i;
1429
1430         /* Remove any methods for this mad agent */
1431         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1432                 if (method->agent[i] == agent) {
1433                         method->agent[i] = NULL;
1434                 }
1435         }
1436 }
1437
1438 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1439                               struct ib_mad_agent_private *agent_priv,
1440                               u8 mgmt_class)
1441 {
1442         struct ib_mad_port_private *port_priv;
1443         struct ib_mad_mgmt_class_table **class;
1444         struct ib_mad_mgmt_method_table **method;
1445         int i, ret;
1446
1447         port_priv = agent_priv->qp_info->port_priv;
1448         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1449         if (!*class) {
1450                 /* Allocate management class table for "new" class version */
1451                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1452                 if (!*class) {
1453                         ret = -ENOMEM;
1454                         goto error1;
1455                 }
1456
1457                 /* Allocate method table for this management class */
1458                 method = &(*class)->method_table[mgmt_class];
1459                 if ((ret = allocate_method_table(method)))
1460                         goto error2;
1461         } else {
1462                 method = &(*class)->method_table[mgmt_class];
1463                 if (!*method) {
1464                         /* Allocate method table for this management class */
1465                         if ((ret = allocate_method_table(method)))
1466                                 goto error1;
1467                 }
1468         }
1469
1470         /* Now, make sure methods are not already in use */
1471         if (method_in_use(method, mad_reg_req))
1472                 goto error3;
1473
1474         /* Finally, add in methods being registered */
1475         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1476                 (*method)->agent[i] = agent_priv;
1477
1478         return 0;
1479
1480 error3:
1481         /* Remove any methods for this mad agent */
1482         remove_methods_mad_agent(*method, agent_priv);
1483         /* Now, check to see if there are any methods in use */
1484         if (!check_method_table(*method)) {
1485                 /* If not, release management method table */
1486                 kfree(*method);
1487                 *method = NULL;
1488         }
1489         ret = -EINVAL;
1490         goto error1;
1491 error2:
1492         kfree(*class);
1493         *class = NULL;
1494 error1:
1495         return ret;
1496 }
1497
1498 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1499                            struct ib_mad_agent_private *agent_priv)
1500 {
1501         struct ib_mad_port_private *port_priv;
1502         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1503         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1504         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1505         struct ib_mad_mgmt_method_table **method;
1506         int i, ret = -ENOMEM;
1507         u8 vclass;
1508
1509         /* "New" vendor (with OUI) class */
1510         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1511         port_priv = agent_priv->qp_info->port_priv;
1512         vendor_table = &port_priv->version[
1513                                 mad_reg_req->mgmt_class_version].vendor;
1514         if (!*vendor_table) {
1515                 /* Allocate mgmt vendor class table for "new" class version */
1516                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1517                 if (!vendor)
1518                         goto error1;
1519
1520                 *vendor_table = vendor;
1521         }
1522         if (!(*vendor_table)->vendor_class[vclass]) {
1523                 /* Allocate table for this management vendor class */
1524                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1525                 if (!vendor_class)
1526                         goto error2;
1527
1528                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1529         }
1530         for (i = 0; i < MAX_MGMT_OUI; i++) {
1531                 /* Is there matching OUI for this vendor class ? */
1532                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1533                             mad_reg_req->oui, 3)) {
1534                         method = &(*vendor_table)->vendor_class[
1535                                                 vclass]->method_table[i];
1536                         BUG_ON(!*method);
1537                         goto check_in_use;
1538                 }
1539         }
1540         for (i = 0; i < MAX_MGMT_OUI; i++) {
1541                 /* OUI slot available ? */
1542                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1543                                 vclass]->oui[i])) {
1544                         method = &(*vendor_table)->vendor_class[
1545                                 vclass]->method_table[i];
1546                         BUG_ON(*method);
1547                         /* Allocate method table for this OUI */
1548                         if ((ret = allocate_method_table(method)))
1549                                 goto error3;
1550                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1551                                mad_reg_req->oui, 3);
1552                         goto check_in_use;
1553                 }
1554         }
1555         dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1556         goto error3;
1557
1558 check_in_use:
1559         /* Now, make sure methods are not already in use */
1560         if (method_in_use(method, mad_reg_req))
1561                 goto error4;
1562
1563         /* Finally, add in methods being registered */
1564         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1565                 (*method)->agent[i] = agent_priv;
1566
1567         return 0;
1568
1569 error4:
1570         /* Remove any methods for this mad agent */
1571         remove_methods_mad_agent(*method, agent_priv);
1572         /* Now, check to see if there are any methods in use */
1573         if (!check_method_table(*method)) {
1574                 /* If not, release management method table */
1575                 kfree(*method);
1576                 *method = NULL;
1577         }
1578         ret = -EINVAL;
1579 error3:
1580         if (vendor_class) {
1581                 (*vendor_table)->vendor_class[vclass] = NULL;
1582                 kfree(vendor_class);
1583         }
1584 error2:
1585         if (vendor) {
1586                 *vendor_table = NULL;
1587                 kfree(vendor);
1588         }
1589 error1:
1590         return ret;
1591 }
1592
1593 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1594 {
1595         struct ib_mad_port_private *port_priv;
1596         struct ib_mad_mgmt_class_table *class;
1597         struct ib_mad_mgmt_method_table *method;
1598         struct ib_mad_mgmt_vendor_class_table *vendor;
1599         struct ib_mad_mgmt_vendor_class *vendor_class;
1600         int index;
1601         u8 mgmt_class;
1602
1603         /*
1604          * Was MAD registration request supplied
1605          * with original registration ?
1606          */
1607         if (!agent_priv->reg_req) {
1608                 goto out;
1609         }
1610
1611         port_priv = agent_priv->qp_info->port_priv;
1612         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1613         class = port_priv->version[
1614                         agent_priv->reg_req->mgmt_class_version].class;
1615         if (!class)
1616                 goto vendor_check;
1617
1618         method = class->method_table[mgmt_class];
1619         if (method) {
1620                 /* Remove any methods for this mad agent */
1621                 remove_methods_mad_agent(method, agent_priv);
1622                 /* Now, check to see if there are any methods still in use */
1623                 if (!check_method_table(method)) {
1624                         /* If not, release management method table */
1625                         kfree(method);
1626                         class->method_table[mgmt_class] = NULL;
1627                         /* Any management classes left ? */
1628                         if (!check_class_table(class)) {
1629                                 /* If not, release management class table */
1630                                 kfree(class);
1631                                 port_priv->version[
1632                                         agent_priv->reg_req->
1633                                         mgmt_class_version].class = NULL;
1634                         }
1635                 }
1636         }
1637
1638 vendor_check:
1639         if (!is_vendor_class(mgmt_class))
1640                 goto out;
1641
1642         /* normalize mgmt_class to vendor range 2 */
1643         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1644         vendor = port_priv->version[
1645                         agent_priv->reg_req->mgmt_class_version].vendor;
1646
1647         if (!vendor)
1648                 goto out;
1649
1650         vendor_class = vendor->vendor_class[mgmt_class];
1651         if (vendor_class) {
1652                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1653                 if (index < 0)
1654                         goto out;
1655                 method = vendor_class->method_table[index];
1656                 if (method) {
1657                         /* Remove any methods for this mad agent */
1658                         remove_methods_mad_agent(method, agent_priv);
1659                         /*
1660                          * Now, check to see if there are
1661                          * any methods still in use
1662                          */
1663                         if (!check_method_table(method)) {
1664                                 /* If not, release management method table */
1665                                 kfree(method);
1666                                 vendor_class->method_table[index] = NULL;
1667                                 memset(vendor_class->oui[index], 0, 3);
1668                                 /* Any OUIs left ? */
1669                                 if (!check_vendor_class(vendor_class)) {
1670                                         /* If not, release vendor class table */
1671                                         kfree(vendor_class);
1672                                         vendor->vendor_class[mgmt_class] = NULL;
1673                                         /* Any other vendor classes left ? */
1674                                         if (!check_vendor_table(vendor)) {
1675                                                 kfree(vendor);
1676                                                 port_priv->version[
1677                                                         agent_priv->reg_req->
1678                                                         mgmt_class_version].
1679                                                         vendor = NULL;
1680                                         }
1681                                 }
1682                         }
1683                 }
1684         }
1685
1686 out:
1687         return;
1688 }
1689
1690 static struct ib_mad_agent_private *
1691 find_mad_agent(struct ib_mad_port_private *port_priv,
1692                const struct ib_mad_hdr *mad_hdr)
1693 {
1694         struct ib_mad_agent_private *mad_agent = NULL;
1695         unsigned long flags;
1696
1697         spin_lock_irqsave(&port_priv->reg_lock, flags);
1698         if (ib_response_mad(mad_hdr)) {
1699                 u32 hi_tid;
1700                 struct ib_mad_agent_private *entry;
1701
1702                 /*
1703                  * Routing is based on high 32 bits of transaction ID
1704                  * of MAD.
1705                  */
1706                 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1707                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1708                         if (entry->agent.hi_tid == hi_tid) {
1709                                 mad_agent = entry;
1710                                 break;
1711                         }
1712                 }
1713         } else {
1714                 struct ib_mad_mgmt_class_table *class;
1715                 struct ib_mad_mgmt_method_table *method;
1716                 struct ib_mad_mgmt_vendor_class_table *vendor;
1717                 struct ib_mad_mgmt_vendor_class *vendor_class;
1718                 const struct ib_vendor_mad *vendor_mad;
1719                 int index;
1720
1721                 /*
1722                  * Routing is based on version, class, and method
1723                  * For "newer" vendor MADs, also based on OUI
1724                  */
1725                 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1726                         goto out;
1727                 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1728                         class = port_priv->version[
1729                                         mad_hdr->class_version].class;
1730                         if (!class)
1731                                 goto out;
1732                         if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1733                             ARRAY_SIZE(class->method_table))
1734                                 goto out;
1735                         method = class->method_table[convert_mgmt_class(
1736                                                         mad_hdr->mgmt_class)];
1737                         if (method)
1738                                 mad_agent = method->agent[mad_hdr->method &
1739                                                           ~IB_MGMT_METHOD_RESP];
1740                 } else {
1741                         vendor = port_priv->version[
1742                                         mad_hdr->class_version].vendor;
1743                         if (!vendor)
1744                                 goto out;
1745                         vendor_class = vendor->vendor_class[vendor_class_index(
1746                                                 mad_hdr->mgmt_class)];
1747                         if (!vendor_class)
1748                                 goto out;
1749                         /* Find matching OUI */
1750                         vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1751                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1752                         if (index == -1)
1753                                 goto out;
1754                         method = vendor_class->method_table[index];
1755                         if (method) {
1756                                 mad_agent = method->agent[mad_hdr->method &
1757                                                           ~IB_MGMT_METHOD_RESP];
1758                         }
1759                 }
1760         }
1761
1762         if (mad_agent) {
1763                 if (mad_agent->agent.recv_handler)
1764                         atomic_inc(&mad_agent->refcount);
1765                 else {
1766                         dev_notice(&port_priv->device->dev,
1767                                    "No receive handler for client %p on port %d\n",
1768                                    &mad_agent->agent, port_priv->port_num);
1769                         mad_agent = NULL;
1770                 }
1771         }
1772 out:
1773         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1774
1775         return mad_agent;
1776 }
1777
1778 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1779                         const struct ib_mad_qp_info *qp_info,
1780                         bool opa)
1781 {
1782         int valid = 0;
1783         u32 qp_num = qp_info->qp->qp_num;
1784
1785         /* Make sure MAD base version is understood */
1786         if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1787             (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1788                 pr_err("MAD received with unsupported base version %d %s\n",
1789                        mad_hdr->base_version, opa ? "(opa)" : "");
1790                 goto out;
1791         }
1792
1793         /* Filter SMI packets sent to other than QP0 */
1794         if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1795             (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1796                 if (qp_num == 0)
1797                         valid = 1;
1798         } else {
1799                 /* CM attributes other than ClassPortInfo only use Send method */
1800                 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1801                     (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1802                     (mad_hdr->method != IB_MGMT_METHOD_SEND))
1803                         goto out;
1804                 /* Filter GSI packets sent to QP0 */
1805                 if (qp_num != 0)
1806                         valid = 1;
1807         }
1808
1809 out:
1810         return valid;
1811 }
1812
1813 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1814                             const struct ib_mad_hdr *mad_hdr)
1815 {
1816         struct ib_rmpp_mad *rmpp_mad;
1817
1818         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1819         return !mad_agent_priv->agent.rmpp_version ||
1820                 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1821                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1822                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1823                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1824 }
1825
1826 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1827                                      const struct ib_mad_recv_wc *rwc)
1828 {
1829         return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1830                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1831 }
1832
1833 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1834                                    const struct ib_mad_send_wr_private *wr,
1835                                    const struct ib_mad_recv_wc *rwc )
1836 {
1837         struct ib_ah_attr attr;
1838         u8 send_resp, rcv_resp;
1839         union ib_gid sgid;
1840         struct ib_device *device = mad_agent_priv->agent.device;
1841         u8 port_num = mad_agent_priv->agent.port_num;
1842         u8 lmc;
1843
1844         send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1845         rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1846
1847         if (send_resp == rcv_resp)
1848                 /* both requests, or both responses. GIDs different */
1849                 return 0;
1850
1851         if (ib_query_ah(wr->send_buf.ah, &attr))
1852                 /* Assume not equal, to avoid false positives. */
1853                 return 0;
1854
1855         if (!!(attr.ah_flags & IB_AH_GRH) !=
1856             !!(rwc->wc->wc_flags & IB_WC_GRH))
1857                 /* one has GID, other does not.  Assume different */
1858                 return 0;
1859
1860         if (!send_resp && rcv_resp) {
1861                 /* is request/response. */
1862                 if (!(attr.ah_flags & IB_AH_GRH)) {
1863                         if (ib_get_cached_lmc(device, port_num, &lmc))
1864                                 return 0;
1865                         return (!lmc || !((attr.src_path_bits ^
1866                                            rwc->wc->dlid_path_bits) &
1867                                           ((1 << lmc) - 1)));
1868                 } else {
1869                         if (ib_get_cached_gid(device, port_num,
1870                                               attr.grh.sgid_index, &sgid, NULL))
1871                                 return 0;
1872                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1873                                        16);
1874                 }
1875         }
1876
1877         if (!(attr.ah_flags & IB_AH_GRH))
1878                 return attr.dlid == rwc->wc->slid;
1879         else
1880                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1881                                16);
1882 }
1883
1884 static inline int is_direct(u8 class)
1885 {
1886         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1887 }
1888
1889 struct ib_mad_send_wr_private*
1890 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1891                  const struct ib_mad_recv_wc *wc)
1892 {
1893         struct ib_mad_send_wr_private *wr;
1894         const struct ib_mad_hdr *mad_hdr;
1895
1896         mad_hdr = &wc->recv_buf.mad->mad_hdr;
1897
1898         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1899                 if ((wr->tid == mad_hdr->tid) &&
1900                     rcv_has_same_class(wr, wc) &&
1901                     /*
1902                      * Don't check GID for direct routed MADs.
1903                      * These might have permissive LIDs.
1904                      */
1905                     (is_direct(mad_hdr->mgmt_class) ||
1906                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1907                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1908         }
1909
1910         /*
1911          * It's possible to receive the response before we've
1912          * been notified that the send has completed
1913          */
1914         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1915                 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1916                     wr->tid == mad_hdr->tid &&
1917                     wr->timeout &&
1918                     rcv_has_same_class(wr, wc) &&
1919                     /*
1920                      * Don't check GID for direct routed MADs.
1921                      * These might have permissive LIDs.
1922                      */
1923                     (is_direct(mad_hdr->mgmt_class) ||
1924                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1925                         /* Verify request has not been canceled */
1926                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1927         }
1928         return NULL;
1929 }
1930
1931 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1932 {
1933         mad_send_wr->timeout = 0;
1934         if (mad_send_wr->refcount == 1)
1935                 list_move_tail(&mad_send_wr->agent_list,
1936                               &mad_send_wr->mad_agent_priv->done_list);
1937 }
1938
1939 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1940                                  struct ib_mad_recv_wc *mad_recv_wc)
1941 {
1942         struct ib_mad_send_wr_private *mad_send_wr;
1943         struct ib_mad_send_wc mad_send_wc;
1944         unsigned long flags;
1945
1946         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1947         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1948         if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1949                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1950                                                       mad_recv_wc);
1951                 if (!mad_recv_wc) {
1952                         deref_mad_agent(mad_agent_priv);
1953                         return;
1954                 }
1955         }
1956
1957         /* Complete corresponding request */
1958         if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1959                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1960                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1961                 if (!mad_send_wr) {
1962                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1963                         if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1964                            && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1965                            && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1966                                         & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1967                                 /* user rmpp is in effect
1968                                  * and this is an active RMPP MAD
1969                                  */
1970                                 mad_agent_priv->agent.recv_handler(
1971                                                 &mad_agent_priv->agent, NULL,
1972                                                 mad_recv_wc);
1973                                 atomic_dec(&mad_agent_priv->refcount);
1974                         } else {
1975                                 /* not user rmpp, revert to normal behavior and
1976                                  * drop the mad */
1977                                 ib_free_recv_mad(mad_recv_wc);
1978                                 deref_mad_agent(mad_agent_priv);
1979                                 return;
1980                         }
1981                 } else {
1982                         ib_mark_mad_done(mad_send_wr);
1983                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1984
1985                         /* Defined behavior is to complete response before request */
1986                         mad_agent_priv->agent.recv_handler(
1987                                         &mad_agent_priv->agent,
1988                                         &mad_send_wr->send_buf,
1989                                         mad_recv_wc);
1990                         atomic_dec(&mad_agent_priv->refcount);
1991
1992                         mad_send_wc.status = IB_WC_SUCCESS;
1993                         mad_send_wc.vendor_err = 0;
1994                         mad_send_wc.send_buf = &mad_send_wr->send_buf;
1995                         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1996                 }
1997         } else {
1998                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
1999                                                    mad_recv_wc);
2000                 deref_mad_agent(mad_agent_priv);
2001         }
2002 }
2003
2004 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2005                                      const struct ib_mad_qp_info *qp_info,
2006                                      const struct ib_wc *wc,
2007                                      int port_num,
2008                                      struct ib_mad_private *recv,
2009                                      struct ib_mad_private *response)
2010 {
2011         enum smi_forward_action retsmi;
2012         struct ib_smp *smp = (struct ib_smp *)recv->mad;
2013
2014         if (smi_handle_dr_smp_recv(smp,
2015                                    rdma_cap_ib_switch(port_priv->device),
2016                                    port_num,
2017                                    port_priv->device->phys_port_cnt) ==
2018                                    IB_SMI_DISCARD)
2019                 return IB_SMI_DISCARD;
2020
2021         retsmi = smi_check_forward_dr_smp(smp);
2022         if (retsmi == IB_SMI_LOCAL)
2023                 return IB_SMI_HANDLE;
2024
2025         if (retsmi == IB_SMI_SEND) { /* don't forward */
2026                 if (smi_handle_dr_smp_send(smp,
2027                                            rdma_cap_ib_switch(port_priv->device),
2028                                            port_num) == IB_SMI_DISCARD)
2029                         return IB_SMI_DISCARD;
2030
2031                 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2032                         return IB_SMI_DISCARD;
2033         } else if (rdma_cap_ib_switch(port_priv->device)) {
2034                 /* forward case for switches */
2035                 memcpy(response, recv, mad_priv_size(response));
2036                 response->header.recv_wc.wc = &response->header.wc;
2037                 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2038                 response->header.recv_wc.recv_buf.grh = &response->grh;
2039
2040                 agent_send_response((const struct ib_mad_hdr *)response->mad,
2041                                     &response->grh, wc,
2042                                     port_priv->device,
2043                                     smi_get_fwd_port(smp),
2044                                     qp_info->qp->qp_num,
2045                                     response->mad_size,
2046                                     false);
2047
2048                 return IB_SMI_DISCARD;
2049         }
2050         return IB_SMI_HANDLE;
2051 }
2052
2053 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2054                                     struct ib_mad_private *response,
2055                                     size_t *resp_len, bool opa)
2056 {
2057         const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2058         struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2059
2060         if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2061             recv_hdr->method == IB_MGMT_METHOD_SET) {
2062                 memcpy(response, recv, mad_priv_size(response));
2063                 response->header.recv_wc.wc = &response->header.wc;
2064                 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2065                 response->header.recv_wc.recv_buf.grh = &response->grh;
2066                 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2067                 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2068                 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2069                         resp_hdr->status |= IB_SMP_DIRECTION;
2070
2071                 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2072                         if (recv_hdr->mgmt_class ==
2073                             IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2074                             recv_hdr->mgmt_class ==
2075                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2076                                 *resp_len = opa_get_smp_header_size(
2077                                                         (struct opa_smp *)recv->mad);
2078                         else
2079                                 *resp_len = sizeof(struct ib_mad_hdr);
2080                 }
2081
2082                 return true;
2083         } else {
2084                 return false;
2085         }
2086 }
2087
2088 static enum smi_action
2089 handle_opa_smi(struct ib_mad_port_private *port_priv,
2090                struct ib_mad_qp_info *qp_info,
2091                struct ib_wc *wc,
2092                int port_num,
2093                struct ib_mad_private *recv,
2094                struct ib_mad_private *response)
2095 {
2096         enum smi_forward_action retsmi;
2097         struct opa_smp *smp = (struct opa_smp *)recv->mad;
2098
2099         if (opa_smi_handle_dr_smp_recv(smp,
2100                                    rdma_cap_ib_switch(port_priv->device),
2101                                    port_num,
2102                                    port_priv->device->phys_port_cnt) ==
2103                                    IB_SMI_DISCARD)
2104                 return IB_SMI_DISCARD;
2105
2106         retsmi = opa_smi_check_forward_dr_smp(smp);
2107         if (retsmi == IB_SMI_LOCAL)
2108                 return IB_SMI_HANDLE;
2109
2110         if (retsmi == IB_SMI_SEND) { /* don't forward */
2111                 if (opa_smi_handle_dr_smp_send(smp,
2112                                            rdma_cap_ib_switch(port_priv->device),
2113                                            port_num) == IB_SMI_DISCARD)
2114                         return IB_SMI_DISCARD;
2115
2116                 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2117                     IB_SMI_DISCARD)
2118                         return IB_SMI_DISCARD;
2119
2120         } else if (rdma_cap_ib_switch(port_priv->device)) {
2121                 /* forward case for switches */
2122                 memcpy(response, recv, mad_priv_size(response));
2123                 response->header.recv_wc.wc = &response->header.wc;
2124                 response->header.recv_wc.recv_buf.opa_mad =
2125                                 (struct opa_mad *)response->mad;
2126                 response->header.recv_wc.recv_buf.grh = &response->grh;
2127
2128                 agent_send_response((const struct ib_mad_hdr *)response->mad,
2129                                     &response->grh, wc,
2130                                     port_priv->device,
2131                                     opa_smi_get_fwd_port(smp),
2132                                     qp_info->qp->qp_num,
2133                                     recv->header.wc.byte_len,
2134                                     true);
2135
2136                 return IB_SMI_DISCARD;
2137         }
2138
2139         return IB_SMI_HANDLE;
2140 }
2141
2142 static enum smi_action
2143 handle_smi(struct ib_mad_port_private *port_priv,
2144            struct ib_mad_qp_info *qp_info,
2145            struct ib_wc *wc,
2146            int port_num,
2147            struct ib_mad_private *recv,
2148            struct ib_mad_private *response,
2149            bool opa)
2150 {
2151         struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2152
2153         if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2154             mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2155                 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2156                                       response);
2157
2158         return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2159 }
2160
2161 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2162 {
2163         struct ib_mad_port_private *port_priv = cq->cq_context;
2164         struct ib_mad_list_head *mad_list =
2165                 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2166         struct ib_mad_qp_info *qp_info;
2167         struct ib_mad_private_header *mad_priv_hdr;
2168         struct ib_mad_private *recv, *response = NULL;
2169         struct ib_mad_agent_private *mad_agent;
2170         int port_num;
2171         int ret = IB_MAD_RESULT_SUCCESS;
2172         size_t mad_size;
2173         u16 resp_mad_pkey_index = 0;
2174         bool opa;
2175
2176         if (list_empty_careful(&port_priv->port_list))
2177                 return;
2178
2179         if (wc->status != IB_WC_SUCCESS) {
2180                 /*
2181                  * Receive errors indicate that the QP has entered the error
2182                  * state - error handling/shutdown code will cleanup
2183                  */
2184                 return;
2185         }
2186
2187         qp_info = mad_list->mad_queue->qp_info;
2188         dequeue_mad(mad_list);
2189
2190         opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2191                                qp_info->port_priv->port_num);
2192
2193         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2194                                     mad_list);
2195         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2196         ib_dma_unmap_single(port_priv->device,
2197                             recv->header.mapping,
2198                             mad_priv_dma_size(recv),
2199                             DMA_FROM_DEVICE);
2200
2201         /* Setup MAD receive work completion from "normal" work completion */
2202         recv->header.wc = *wc;
2203         recv->header.recv_wc.wc = &recv->header.wc;
2204
2205         if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2206                 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2207                 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2208         } else {
2209                 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2210                 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2211         }
2212
2213         recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2214         recv->header.recv_wc.recv_buf.grh = &recv->grh;
2215
2216         if (atomic_read(&qp_info->snoop_count))
2217                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2218
2219         /* Validate MAD */
2220         if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2221                 goto out;
2222
2223         mad_size = recv->mad_size;
2224         response = alloc_mad_private(mad_size, GFP_KERNEL);
2225         if (!response)
2226                 goto out;
2227
2228         if (rdma_cap_ib_switch(port_priv->device))
2229                 port_num = wc->port_num;
2230         else
2231                 port_num = port_priv->port_num;
2232
2233         if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2234             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2235                 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2236                                response, opa)
2237                     == IB_SMI_DISCARD)
2238                         goto out;
2239         }
2240
2241         /* Give driver "right of first refusal" on incoming MAD */
2242         if (port_priv->device->process_mad) {
2243                 ret = port_priv->device->process_mad(port_priv->device, 0,
2244                                                      port_priv->port_num,
2245                                                      wc, &recv->grh,
2246                                                      (const struct ib_mad_hdr *)recv->mad,
2247                                                      recv->mad_size,
2248                                                      (struct ib_mad_hdr *)response->mad,
2249                                                      &mad_size, &resp_mad_pkey_index);
2250
2251                 if (opa)
2252                         wc->pkey_index = resp_mad_pkey_index;
2253
2254                 if (ret & IB_MAD_RESULT_SUCCESS) {
2255                         if (ret & IB_MAD_RESULT_CONSUMED)
2256                                 goto out;
2257                         if (ret & IB_MAD_RESULT_REPLY) {
2258                                 agent_send_response((const struct ib_mad_hdr *)response->mad,
2259                                                     &recv->grh, wc,
2260                                                     port_priv->device,
2261                                                     port_num,
2262                                                     qp_info->qp->qp_num,
2263                                                     mad_size, opa);
2264                                 goto out;
2265                         }
2266                 }
2267         }
2268
2269         mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2270         if (mad_agent) {
2271                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2272                 /*
2273                  * recv is freed up in error cases in ib_mad_complete_recv
2274                  * or via recv_handler in ib_mad_complete_recv()
2275                  */
2276                 recv = NULL;
2277         } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2278                    generate_unmatched_resp(recv, response, &mad_size, opa)) {
2279                 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2280                                     port_priv->device, port_num,
2281                                     qp_info->qp->qp_num, mad_size, opa);
2282         }
2283
2284 out:
2285         /* Post another receive request for this QP */
2286         if (response) {
2287                 ib_mad_post_receive_mads(qp_info, response);
2288                 kfree(recv);
2289         } else
2290                 ib_mad_post_receive_mads(qp_info, recv);
2291 }
2292
2293 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2294 {
2295         struct ib_mad_send_wr_private *mad_send_wr;
2296         unsigned long delay;
2297
2298         if (list_empty(&mad_agent_priv->wait_list)) {
2299                 cancel_delayed_work(&mad_agent_priv->timed_work);
2300         } else {
2301                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2302                                          struct ib_mad_send_wr_private,
2303                                          agent_list);
2304
2305                 if (time_after(mad_agent_priv->timeout,
2306                                mad_send_wr->timeout)) {
2307                         mad_agent_priv->timeout = mad_send_wr->timeout;
2308                         delay = mad_send_wr->timeout - jiffies;
2309                         if ((long)delay <= 0)
2310                                 delay = 1;
2311                         mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2312                                          &mad_agent_priv->timed_work, delay);
2313                 }
2314         }
2315 }
2316
2317 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2318 {
2319         struct ib_mad_agent_private *mad_agent_priv;
2320         struct ib_mad_send_wr_private *temp_mad_send_wr;
2321         struct list_head *list_item;
2322         unsigned long delay;
2323
2324         mad_agent_priv = mad_send_wr->mad_agent_priv;
2325         list_del(&mad_send_wr->agent_list);
2326
2327         delay = mad_send_wr->timeout;
2328         mad_send_wr->timeout += jiffies;
2329
2330         if (delay) {
2331                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2332                         temp_mad_send_wr = list_entry(list_item,
2333                                                 struct ib_mad_send_wr_private,
2334                                                 agent_list);
2335                         if (time_after(mad_send_wr->timeout,
2336                                        temp_mad_send_wr->timeout))
2337                                 break;
2338                 }
2339         }
2340         else
2341                 list_item = &mad_agent_priv->wait_list;
2342         list_add(&mad_send_wr->agent_list, list_item);
2343
2344         /* Reschedule a work item if we have a shorter timeout */
2345         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2346                 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2347                                  &mad_agent_priv->timed_work, delay);
2348 }
2349
2350 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2351                           int timeout_ms)
2352 {
2353         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2354         wait_for_response(mad_send_wr);
2355 }
2356
2357 /*
2358  * Process a send work completion
2359  */
2360 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2361                              struct ib_mad_send_wc *mad_send_wc)
2362 {
2363         struct ib_mad_agent_private     *mad_agent_priv;
2364         unsigned long                   flags;
2365         int                             ret;
2366
2367         mad_agent_priv = mad_send_wr->mad_agent_priv;
2368         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2369         if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2370                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2371                 if (ret == IB_RMPP_RESULT_CONSUMED)
2372                         goto done;
2373         } else
2374                 ret = IB_RMPP_RESULT_UNHANDLED;
2375
2376         if (mad_send_wc->status != IB_WC_SUCCESS &&
2377             mad_send_wr->status == IB_WC_SUCCESS) {
2378                 mad_send_wr->status = mad_send_wc->status;
2379                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2380         }
2381
2382         if (--mad_send_wr->refcount > 0) {
2383                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2384                     mad_send_wr->status == IB_WC_SUCCESS) {
2385                         wait_for_response(mad_send_wr);
2386                 }
2387                 goto done;
2388         }
2389
2390         /* Remove send from MAD agent and notify client of completion */
2391         list_del(&mad_send_wr->agent_list);
2392         adjust_timeout(mad_agent_priv);
2393         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2394
2395         if (mad_send_wr->status != IB_WC_SUCCESS )
2396                 mad_send_wc->status = mad_send_wr->status;
2397         if (ret == IB_RMPP_RESULT_INTERNAL)
2398                 ib_rmpp_send_handler(mad_send_wc);
2399         else
2400                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2401                                                    mad_send_wc);
2402
2403         /* Release reference on agent taken when sending */
2404         deref_mad_agent(mad_agent_priv);
2405         return;
2406 done:
2407         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2408 }
2409
2410 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2411 {
2412         struct ib_mad_port_private *port_priv = cq->cq_context;
2413         struct ib_mad_list_head *mad_list =
2414                 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2415         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2416         struct ib_mad_qp_info           *qp_info;
2417         struct ib_mad_queue             *send_queue;
2418         struct ib_send_wr               *bad_send_wr;
2419         struct ib_mad_send_wc           mad_send_wc;
2420         unsigned long flags;
2421         int ret;
2422
2423         if (list_empty_careful(&port_priv->port_list))
2424                 return;
2425
2426         if (wc->status != IB_WC_SUCCESS) {
2427                 if (!ib_mad_send_error(port_priv, wc))
2428                         return;
2429         }
2430
2431         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2432                                    mad_list);
2433         send_queue = mad_list->mad_queue;
2434         qp_info = send_queue->qp_info;
2435
2436 retry:
2437         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2438                             mad_send_wr->header_mapping,
2439                             mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2440         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2441                             mad_send_wr->payload_mapping,
2442                             mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2443         queued_send_wr = NULL;
2444         spin_lock_irqsave(&send_queue->lock, flags);
2445         list_del(&mad_list->list);
2446
2447         /* Move queued send to the send queue */
2448         if (send_queue->count-- > send_queue->max_active) {
2449                 mad_list = container_of(qp_info->overflow_list.next,
2450                                         struct ib_mad_list_head, list);
2451                 queued_send_wr = container_of(mad_list,
2452                                         struct ib_mad_send_wr_private,
2453                                         mad_list);
2454                 list_move_tail(&mad_list->list, &send_queue->list);
2455         }
2456         spin_unlock_irqrestore(&send_queue->lock, flags);
2457
2458         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2459         mad_send_wc.status = wc->status;
2460         mad_send_wc.vendor_err = wc->vendor_err;
2461         if (atomic_read(&qp_info->snoop_count))
2462                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2463                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2464         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2465
2466         if (queued_send_wr) {
2467                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2468                                    &bad_send_wr);
2469                 if (ret) {
2470                         dev_err(&port_priv->device->dev,
2471                                 "ib_post_send failed: %d\n", ret);
2472                         mad_send_wr = queued_send_wr;
2473                         wc->status = IB_WC_LOC_QP_OP_ERR;
2474                         goto retry;
2475                 }
2476         }
2477 }
2478
2479 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2480 {
2481         struct ib_mad_send_wr_private *mad_send_wr;
2482         struct ib_mad_list_head *mad_list;
2483         unsigned long flags;
2484
2485         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2486         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2487                 mad_send_wr = container_of(mad_list,
2488                                            struct ib_mad_send_wr_private,
2489                                            mad_list);
2490                 mad_send_wr->retry = 1;
2491         }
2492         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2493 }
2494
2495 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2496                 struct ib_wc *wc)
2497 {
2498         struct ib_mad_list_head *mad_list =
2499                 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2500         struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2501         struct ib_mad_send_wr_private *mad_send_wr;
2502         int ret;
2503
2504         /*
2505          * Send errors will transition the QP to SQE - move
2506          * QP to RTS and repost flushed work requests
2507          */
2508         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2509                                    mad_list);
2510         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2511                 if (mad_send_wr->retry) {
2512                         /* Repost send */
2513                         struct ib_send_wr *bad_send_wr;
2514
2515                         mad_send_wr->retry = 0;
2516                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2517                                         &bad_send_wr);
2518                         if (!ret)
2519                                 return false;
2520                 }
2521         } else {
2522                 struct ib_qp_attr *attr;
2523
2524                 /* Transition QP to RTS and fail offending send */
2525                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2526                 if (attr) {
2527                         attr->qp_state = IB_QPS_RTS;
2528                         attr->cur_qp_state = IB_QPS_SQE;
2529                         ret = ib_modify_qp(qp_info->qp, attr,
2530                                            IB_QP_STATE | IB_QP_CUR_STATE);
2531                         kfree(attr);
2532                         if (ret)
2533                                 dev_err(&port_priv->device->dev,
2534                                         "%s - ib_modify_qp to RTS: %d\n",
2535                                         __func__, ret);
2536                         else
2537                                 mark_sends_for_retry(qp_info);
2538                 }
2539         }
2540
2541         return true;
2542 }
2543
2544 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2545 {
2546         unsigned long flags;
2547         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2548         struct ib_mad_send_wc mad_send_wc;
2549         struct list_head cancel_list;
2550
2551         INIT_LIST_HEAD(&cancel_list);
2552
2553         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2554         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2555                                  &mad_agent_priv->send_list, agent_list) {
2556                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2557                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2558                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2559                 }
2560         }
2561
2562         /* Empty wait list to prevent receives from finding a request */
2563         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2564         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2565
2566         /* Report all cancelled requests */
2567         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2568         mad_send_wc.vendor_err = 0;
2569
2570         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2571                                  &cancel_list, agent_list) {
2572                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2573                 list_del(&mad_send_wr->agent_list);
2574                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2575                                                    &mad_send_wc);
2576                 atomic_dec(&mad_agent_priv->refcount);
2577         }
2578 }
2579
2580 static struct ib_mad_send_wr_private*
2581 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2582              struct ib_mad_send_buf *send_buf)
2583 {
2584         struct ib_mad_send_wr_private *mad_send_wr;
2585
2586         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2587                             agent_list) {
2588                 if (&mad_send_wr->send_buf == send_buf)
2589                         return mad_send_wr;
2590         }
2591
2592         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2593                             agent_list) {
2594                 if (is_rmpp_data_mad(mad_agent_priv,
2595                                      mad_send_wr->send_buf.mad) &&
2596                     &mad_send_wr->send_buf == send_buf)
2597                         return mad_send_wr;
2598         }
2599         return NULL;
2600 }
2601
2602 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2603                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2604 {
2605         struct ib_mad_agent_private *mad_agent_priv;
2606         struct ib_mad_send_wr_private *mad_send_wr;
2607         unsigned long flags;
2608         int active;
2609
2610         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2611                                       agent);
2612         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2613         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2614         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2615                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2616                 return -EINVAL;
2617         }
2618
2619         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2620         if (!timeout_ms) {
2621                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2622                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2623         }
2624
2625         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2626         if (active)
2627                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2628         else
2629                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2630
2631         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2632         return 0;
2633 }
2634 EXPORT_SYMBOL(ib_modify_mad);
2635
2636 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2637                    struct ib_mad_send_buf *send_buf)
2638 {
2639         ib_modify_mad(mad_agent, send_buf, 0);
2640 }
2641 EXPORT_SYMBOL(ib_cancel_mad);
2642
2643 static void local_completions(struct work_struct *work)
2644 {
2645         struct ib_mad_agent_private *mad_agent_priv;
2646         struct ib_mad_local_private *local;
2647         struct ib_mad_agent_private *recv_mad_agent;
2648         unsigned long flags;
2649         int free_mad;
2650         struct ib_wc wc;
2651         struct ib_mad_send_wc mad_send_wc;
2652         bool opa;
2653
2654         mad_agent_priv =
2655                 container_of(work, struct ib_mad_agent_private, local_work);
2656
2657         opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2658                                mad_agent_priv->qp_info->port_priv->port_num);
2659
2660         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2661         while (!list_empty(&mad_agent_priv->local_list)) {
2662                 local = list_entry(mad_agent_priv->local_list.next,
2663                                    struct ib_mad_local_private,
2664                                    completion_list);
2665                 list_del(&local->completion_list);
2666                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2667                 free_mad = 0;
2668                 if (local->mad_priv) {
2669                         u8 base_version;
2670                         recv_mad_agent = local->recv_mad_agent;
2671                         if (!recv_mad_agent) {
2672                                 dev_err(&mad_agent_priv->agent.device->dev,
2673                                         "No receive MAD agent for local completion\n");
2674                                 free_mad = 1;
2675                                 goto local_send_completion;
2676                         }
2677
2678                         /*
2679                          * Defined behavior is to complete response
2680                          * before request
2681                          */
2682                         build_smp_wc(recv_mad_agent->agent.qp,
2683                                      local->mad_send_wr->send_wr.wr.wr_cqe,
2684                                      be16_to_cpu(IB_LID_PERMISSIVE),
2685                                      local->mad_send_wr->send_wr.pkey_index,
2686                                      recv_mad_agent->agent.port_num, &wc);
2687
2688                         local->mad_priv->header.recv_wc.wc = &wc;
2689
2690                         base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2691                         if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2692                                 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2693                                 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2694                         } else {
2695                                 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2696                                 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2697                         }
2698
2699                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2700                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2701                                  &local->mad_priv->header.recv_wc.rmpp_list);
2702                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2703                         local->mad_priv->header.recv_wc.recv_buf.mad =
2704                                                 (struct ib_mad *)local->mad_priv->mad;
2705                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2706                                 snoop_recv(recv_mad_agent->qp_info,
2707                                           &local->mad_priv->header.recv_wc,
2708                                            IB_MAD_SNOOP_RECVS);
2709                         recv_mad_agent->agent.recv_handler(
2710                                                 &recv_mad_agent->agent,
2711                                                 &local->mad_send_wr->send_buf,
2712                                                 &local->mad_priv->header.recv_wc);
2713                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2714                         atomic_dec(&recv_mad_agent->refcount);
2715                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2716                 }
2717
2718 local_send_completion:
2719                 /* Complete send */
2720                 mad_send_wc.status = IB_WC_SUCCESS;
2721                 mad_send_wc.vendor_err = 0;
2722                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2723                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2724                         snoop_send(mad_agent_priv->qp_info,
2725                                    &local->mad_send_wr->send_buf,
2726                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2727                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2728                                                    &mad_send_wc);
2729
2730                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2731                 atomic_dec(&mad_agent_priv->refcount);
2732                 if (free_mad)
2733                         kfree(local->mad_priv);
2734                 kfree(local);
2735         }
2736         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2737 }
2738
2739 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2740 {
2741         int ret;
2742
2743         if (!mad_send_wr->retries_left)
2744                 return -ETIMEDOUT;
2745
2746         mad_send_wr->retries_left--;
2747         mad_send_wr->send_buf.retries++;
2748
2749         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2750
2751         if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2752                 ret = ib_retry_rmpp(mad_send_wr);
2753                 switch (ret) {
2754                 case IB_RMPP_RESULT_UNHANDLED:
2755                         ret = ib_send_mad(mad_send_wr);
2756                         break;
2757                 case IB_RMPP_RESULT_CONSUMED:
2758                         ret = 0;
2759                         break;
2760                 default:
2761                         ret = -ECOMM;
2762                         break;
2763                 }
2764         } else
2765                 ret = ib_send_mad(mad_send_wr);
2766
2767         if (!ret) {
2768                 mad_send_wr->refcount++;
2769                 list_add_tail(&mad_send_wr->agent_list,
2770                               &mad_send_wr->mad_agent_priv->send_list);
2771         }
2772         return ret;
2773 }
2774
2775 static void timeout_sends(struct work_struct *work)
2776 {
2777         struct ib_mad_agent_private *mad_agent_priv;
2778         struct ib_mad_send_wr_private *mad_send_wr;
2779         struct ib_mad_send_wc mad_send_wc;
2780         unsigned long flags, delay;
2781
2782         mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2783                                       timed_work.work);
2784         mad_send_wc.vendor_err = 0;
2785
2786         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2787         while (!list_empty(&mad_agent_priv->wait_list)) {
2788                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2789                                          struct ib_mad_send_wr_private,
2790                                          agent_list);
2791
2792                 if (time_after(mad_send_wr->timeout, jiffies)) {
2793                         delay = mad_send_wr->timeout - jiffies;
2794                         if ((long)delay <= 0)
2795                                 delay = 1;
2796                         queue_delayed_work(mad_agent_priv->qp_info->
2797                                            port_priv->wq,
2798                                            &mad_agent_priv->timed_work, delay);
2799                         break;
2800                 }
2801
2802                 list_del(&mad_send_wr->agent_list);
2803                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2804                     !retry_send(mad_send_wr))
2805                         continue;
2806
2807                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2808
2809                 if (mad_send_wr->status == IB_WC_SUCCESS)
2810                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2811                 else
2812                         mad_send_wc.status = mad_send_wr->status;
2813                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2814                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2815                                                    &mad_send_wc);
2816
2817                 atomic_dec(&mad_agent_priv->refcount);
2818                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2819         }
2820         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2821 }
2822
2823 /*
2824  * Allocate receive MADs and post receive WRs for them
2825  */
2826 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2827                                     struct ib_mad_private *mad)
2828 {
2829         unsigned long flags;
2830         int post, ret;
2831         struct ib_mad_private *mad_priv;
2832         struct ib_sge sg_list;
2833         struct ib_recv_wr recv_wr, *bad_recv_wr;
2834         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2835
2836         /* Initialize common scatter list fields */
2837         sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2838
2839         /* Initialize common receive WR fields */
2840         recv_wr.next = NULL;
2841         recv_wr.sg_list = &sg_list;
2842         recv_wr.num_sge = 1;
2843
2844         do {
2845                 /* Allocate and map receive buffer */
2846                 if (mad) {
2847                         mad_priv = mad;
2848                         mad = NULL;
2849                 } else {
2850                         mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2851                                                      GFP_ATOMIC);
2852                         if (!mad_priv) {
2853                                 ret = -ENOMEM;
2854                                 break;
2855                         }
2856                 }
2857                 sg_list.length = mad_priv_dma_size(mad_priv);
2858                 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2859                                                  &mad_priv->grh,
2860                                                  mad_priv_dma_size(mad_priv),
2861                                                  DMA_FROM_DEVICE);
2862                 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2863                                                   sg_list.addr))) {
2864                         ret = -ENOMEM;
2865                         break;
2866                 }
2867                 mad_priv->header.mapping = sg_list.addr;
2868                 mad_priv->header.mad_list.mad_queue = recv_queue;
2869                 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2870                 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2871
2872                 /* Post receive WR */
2873                 spin_lock_irqsave(&recv_queue->lock, flags);
2874                 post = (++recv_queue->count < recv_queue->max_active);
2875                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2876                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2877                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2878                 if (ret) {
2879                         spin_lock_irqsave(&recv_queue->lock, flags);
2880                         list_del(&mad_priv->header.mad_list.list);
2881                         recv_queue->count--;
2882                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2883                         ib_dma_unmap_single(qp_info->port_priv->device,
2884                                             mad_priv->header.mapping,
2885                                             mad_priv_dma_size(mad_priv),
2886                                             DMA_FROM_DEVICE);
2887                         kfree(mad_priv);
2888                         dev_err(&qp_info->port_priv->device->dev,
2889                                 "ib_post_recv failed: %d\n", ret);
2890                         break;
2891                 }
2892         } while (post);
2893
2894         return ret;
2895 }
2896
2897 /*
2898  * Return all the posted receive MADs
2899  */
2900 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2901 {
2902         struct ib_mad_private_header *mad_priv_hdr;
2903         struct ib_mad_private *recv;
2904         struct ib_mad_list_head *mad_list;
2905
2906         if (!qp_info->qp)
2907                 return;
2908
2909         while (!list_empty(&qp_info->recv_queue.list)) {
2910
2911                 mad_list = list_entry(qp_info->recv_queue.list.next,
2912                                       struct ib_mad_list_head, list);
2913                 mad_priv_hdr = container_of(mad_list,
2914                                             struct ib_mad_private_header,
2915                                             mad_list);
2916                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2917                                     header);
2918
2919                 /* Remove from posted receive MAD list */
2920                 list_del(&mad_list->list);
2921
2922                 ib_dma_unmap_single(qp_info->port_priv->device,
2923                                     recv->header.mapping,
2924                                     mad_priv_dma_size(recv),
2925                                     DMA_FROM_DEVICE);
2926                 kfree(recv);
2927         }
2928
2929         qp_info->recv_queue.count = 0;
2930 }
2931
2932 /*
2933  * Start the port
2934  */
2935 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2936 {
2937         int ret, i;
2938         struct ib_qp_attr *attr;
2939         struct ib_qp *qp;
2940         u16 pkey_index;
2941
2942         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2943         if (!attr)
2944                 return -ENOMEM;
2945
2946         ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2947                            IB_DEFAULT_PKEY_FULL, &pkey_index);
2948         if (ret)
2949                 pkey_index = 0;
2950
2951         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2952                 qp = port_priv->qp_info[i].qp;
2953                 if (!qp)
2954                         continue;
2955
2956                 /*
2957                  * PKey index for QP1 is irrelevant but
2958                  * one is needed for the Reset to Init transition
2959                  */
2960                 attr->qp_state = IB_QPS_INIT;
2961                 attr->pkey_index = pkey_index;
2962                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2963                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2964                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2965                 if (ret) {
2966                         dev_err(&port_priv->device->dev,
2967                                 "Couldn't change QP%d state to INIT: %d\n",
2968                                 i, ret);
2969                         goto out;
2970                 }
2971
2972                 attr->qp_state = IB_QPS_RTR;
2973                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2974                 if (ret) {
2975                         dev_err(&port_priv->device->dev,
2976                                 "Couldn't change QP%d state to RTR: %d\n",
2977                                 i, ret);
2978                         goto out;
2979                 }
2980
2981                 attr->qp_state = IB_QPS_RTS;
2982                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2983                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2984                 if (ret) {
2985                         dev_err(&port_priv->device->dev,
2986                                 "Couldn't change QP%d state to RTS: %d\n",
2987                                 i, ret);
2988                         goto out;
2989                 }
2990         }
2991
2992         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2993         if (ret) {
2994                 dev_err(&port_priv->device->dev,
2995                         "Failed to request completion notification: %d\n",
2996                         ret);
2997                 goto out;
2998         }
2999
3000         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3001                 if (!port_priv->qp_info[i].qp)
3002                         continue;
3003
3004                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3005                 if (ret) {
3006                         dev_err(&port_priv->device->dev,
3007                                 "Couldn't post receive WRs\n");
3008                         goto out;
3009                 }
3010         }
3011 out:
3012         kfree(attr);
3013         return ret;
3014 }
3015
3016 static void qp_event_handler(struct ib_event *event, void *qp_context)
3017 {
3018         struct ib_mad_qp_info   *qp_info = qp_context;
3019
3020         /* It's worse than that! He's dead, Jim! */
3021         dev_err(&qp_info->port_priv->device->dev,
3022                 "Fatal error (%d) on MAD QP (%d)\n",
3023                 event->event, qp_info->qp->qp_num);
3024 }
3025
3026 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3027                            struct ib_mad_queue *mad_queue)
3028 {
3029         mad_queue->qp_info = qp_info;
3030         mad_queue->count = 0;
3031         spin_lock_init(&mad_queue->lock);
3032         INIT_LIST_HEAD(&mad_queue->list);
3033 }
3034
3035 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3036                         struct ib_mad_qp_info *qp_info)
3037 {
3038         qp_info->port_priv = port_priv;
3039         init_mad_queue(qp_info, &qp_info->send_queue);
3040         init_mad_queue(qp_info, &qp_info->recv_queue);
3041         INIT_LIST_HEAD(&qp_info->overflow_list);
3042         spin_lock_init(&qp_info->snoop_lock);
3043         qp_info->snoop_table = NULL;
3044         qp_info->snoop_table_size = 0;
3045         atomic_set(&qp_info->snoop_count, 0);
3046 }
3047
3048 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3049                          enum ib_qp_type qp_type)
3050 {
3051         struct ib_qp_init_attr  qp_init_attr;
3052         int ret;
3053
3054         memset(&qp_init_attr, 0, sizeof qp_init_attr);
3055         qp_init_attr.send_cq = qp_info->port_priv->cq;
3056         qp_init_attr.recv_cq = qp_info->port_priv->cq;
3057         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3058         qp_init_attr.cap.max_send_wr = mad_sendq_size;
3059         qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3060         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3061         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3062         qp_init_attr.qp_type = qp_type;
3063         qp_init_attr.port_num = qp_info->port_priv->port_num;
3064         qp_init_attr.qp_context = qp_info;
3065         qp_init_attr.event_handler = qp_event_handler;
3066         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3067         if (IS_ERR(qp_info->qp)) {
3068                 dev_err(&qp_info->port_priv->device->dev,
3069                         "Couldn't create ib_mad QP%d\n",
3070                         get_spl_qp_index(qp_type));
3071                 ret = PTR_ERR(qp_info->qp);
3072                 goto error;
3073         }
3074         /* Use minimum queue sizes unless the CQ is resized */
3075         qp_info->send_queue.max_active = mad_sendq_size;
3076         qp_info->recv_queue.max_active = mad_recvq_size;
3077         return 0;
3078
3079 error:
3080         return ret;
3081 }
3082
3083 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3084 {
3085         if (!qp_info->qp)
3086                 return;
3087
3088         ib_destroy_qp(qp_info->qp);
3089         kfree(qp_info->snoop_table);
3090 }
3091
3092 /*
3093  * Open the port
3094  * Create the QP, PD, MR, and CQ if needed
3095  */
3096 static int ib_mad_port_open(struct ib_device *device,
3097                             int port_num)
3098 {
3099         int ret, cq_size;
3100         struct ib_mad_port_private *port_priv;
3101         unsigned long flags;
3102         char name[sizeof "ib_mad123"];
3103         int has_smi;
3104
3105         if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3106                 return -EFAULT;
3107
3108         if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3109                     rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3110                 return -EFAULT;
3111
3112         /* Create new device info */
3113         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3114         if (!port_priv)
3115                 return -ENOMEM;
3116
3117         port_priv->device = device;
3118         port_priv->port_num = port_num;
3119         spin_lock_init(&port_priv->reg_lock);
3120         INIT_LIST_HEAD(&port_priv->agent_list);
3121         init_mad_qp(port_priv, &port_priv->qp_info[0]);
3122         init_mad_qp(port_priv, &port_priv->qp_info[1]);
3123
3124         cq_size = mad_sendq_size + mad_recvq_size;
3125         has_smi = rdma_cap_ib_smi(device, port_num);
3126         if (has_smi)
3127                 cq_size *= 2;
3128
3129         port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3130                         IB_POLL_WORKQUEUE);
3131         if (IS_ERR(port_priv->cq)) {
3132                 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3133                 ret = PTR_ERR(port_priv->cq);
3134                 goto error3;
3135         }
3136
3137         port_priv->pd = ib_alloc_pd(device, 0);
3138         if (IS_ERR(port_priv->pd)) {
3139                 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3140                 ret = PTR_ERR(port_priv->pd);
3141                 goto error4;
3142         }
3143
3144         if (has_smi) {
3145                 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3146                 if (ret)
3147                         goto error6;
3148         }
3149         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3150         if (ret)
3151                 goto error7;
3152
3153         snprintf(name, sizeof name, "ib_mad%d", port_num);
3154         port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3155         if (!port_priv->wq) {
3156                 ret = -ENOMEM;
3157                 goto error8;
3158         }
3159
3160         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3161         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3162         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3163
3164         ret = ib_mad_port_start(port_priv);
3165         if (ret) {
3166                 dev_err(&device->dev, "Couldn't start port\n");
3167                 goto error9;
3168         }
3169
3170         return 0;
3171
3172 error9:
3173         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3174         list_del_init(&port_priv->port_list);
3175         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3176
3177         destroy_workqueue(port_priv->wq);
3178 error8:
3179         destroy_mad_qp(&port_priv->qp_info[1]);
3180 error7:
3181         destroy_mad_qp(&port_priv->qp_info[0]);
3182 error6:
3183         ib_dealloc_pd(port_priv->pd);
3184 error4:
3185         ib_free_cq(port_priv->cq);
3186         cleanup_recv_queue(&port_priv->qp_info[1]);
3187         cleanup_recv_queue(&port_priv->qp_info[0]);
3188 error3:
3189         kfree(port_priv);
3190
3191         return ret;
3192 }
3193
3194 /*
3195  * Close the port
3196  * If there are no classes using the port, free the port
3197  * resources (CQ, MR, PD, QP) and remove the port's info structure
3198  */
3199 static int ib_mad_port_close(struct ib_device *device, int port_num)
3200 {
3201         struct ib_mad_port_private *port_priv;
3202         unsigned long flags;
3203
3204         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3205         port_priv = __ib_get_mad_port(device, port_num);
3206         if (port_priv == NULL) {
3207                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3208                 dev_err(&device->dev, "Port %d not found\n", port_num);
3209                 return -ENODEV;
3210         }
3211         list_del_init(&port_priv->port_list);
3212         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3213
3214         destroy_workqueue(port_priv->wq);
3215         destroy_mad_qp(&port_priv->qp_info[1]);
3216         destroy_mad_qp(&port_priv->qp_info[0]);
3217         ib_dealloc_pd(port_priv->pd);
3218         ib_free_cq(port_priv->cq);
3219         cleanup_recv_queue(&port_priv->qp_info[1]);
3220         cleanup_recv_queue(&port_priv->qp_info[0]);
3221         /* XXX: Handle deallocation of MAD registration tables */
3222
3223         kfree(port_priv);
3224
3225         return 0;
3226 }
3227
3228 static void ib_mad_init_device(struct ib_device *device)
3229 {
3230         int start, i;
3231
3232         start = rdma_start_port(device);
3233
3234         for (i = start; i <= rdma_end_port(device); i++) {
3235                 if (!rdma_cap_ib_mad(device, i))
3236                         continue;
3237
3238                 if (ib_mad_port_open(device, i)) {
3239                         dev_err(&device->dev, "Couldn't open port %d\n", i);
3240                         goto error;
3241                 }
3242                 if (ib_agent_port_open(device, i)) {
3243                         dev_err(&device->dev,
3244                                 "Couldn't open port %d for agents\n", i);
3245                         goto error_agent;
3246                 }
3247         }
3248         return;
3249
3250 error_agent:
3251         if (ib_mad_port_close(device, i))
3252                 dev_err(&device->dev, "Couldn't close port %d\n", i);
3253
3254 error:
3255         while (--i >= start) {
3256                 if (!rdma_cap_ib_mad(device, i))
3257                         continue;
3258
3259                 if (ib_agent_port_close(device, i))
3260                         dev_err(&device->dev,
3261                                 "Couldn't close port %d for agents\n", i);
3262                 if (ib_mad_port_close(device, i))
3263                         dev_err(&device->dev, "Couldn't close port %d\n", i);
3264         }
3265 }
3266
3267 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3268 {
3269         int i;
3270
3271         for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3272                 if (!rdma_cap_ib_mad(device, i))
3273                         continue;
3274
3275                 if (ib_agent_port_close(device, i))
3276                         dev_err(&device->dev,
3277                                 "Couldn't close port %d for agents\n", i);
3278                 if (ib_mad_port_close(device, i))
3279                         dev_err(&device->dev, "Couldn't close port %d\n", i);
3280         }
3281 }
3282
3283 static struct ib_client mad_client = {
3284         .name   = "mad",
3285         .add = ib_mad_init_device,
3286         .remove = ib_mad_remove_device
3287 };
3288
3289 int ib_mad_init(void)
3290 {
3291         mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3292         mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3293
3294         mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3295         mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3296
3297         INIT_LIST_HEAD(&ib_mad_port_list);
3298
3299         if (ib_register_client(&mad_client)) {
3300                 pr_err("Couldn't register ib_mad client\n");
3301                 return -EINVAL;
3302         }
3303
3304         return 0;
3305 }
3306
3307 void ib_mad_cleanup(void)
3308 {
3309         ib_unregister_client(&mad_client);
3310 }
This page took 0.274052 seconds and 4 git commands to generate.